aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/libfc/Makefile12
-rw-r--r--drivers/scsi/libfc/fc_disc.c845
-rw-r--r--drivers/scsi/libfc/fc_elsct.c71
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
-rw-r--r--drivers/scsi/libfc/fc_frame.c89
-rw-r--r--drivers/scsi/libfc/fc_lport.c1604
-rw-r--r--drivers/scsi/libfc/fc_rport.c1291
-rw-r--r--include/scsi/fc_encode.h309
-rw-r--r--include/scsi/fc_frame.h242
-rw-r--r--include/scsi/libfc.h938
13 files changed, 9509 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 1badcec18f41..24d762aab7c5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -603,6 +603,12 @@ config SCSI_FLASHPOINT
603 substantial, so users of MultiMaster Host Adapters may not 603 substantial, so users of MultiMaster Host Adapters may not
604 wish to include it. 604 wish to include it.
605 605
606config LIBFC
607 tristate "LibFC module"
608 depends on SCSI && SCSI_FC_ATTRS
609 ---help---
610 Fibre Channel library module
611
606config SCSI_DMX3191D 612config SCSI_DMX3191D
607 tristate "DMX3191D SCSI support" 613 tristate "DMX3191D SCSI support"
608 depends on PCI && SCSI 614 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b89aedfa9ed7..87355f573d60 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o 36obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
37obj-$(CONFIG_SCSI_DH) += device_handler/ 37obj-$(CONFIG_SCSI_DH) += device_handler/
38 38
39obj-$(CONFIG_LIBFC) += libfc/
39obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 40obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
40obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 41obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
41obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 42obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000000..55f982de3a9a
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
1# $Id: Makefile
2
3obj-$(CONFIG_LIBFC) += libfc.o
4
5libfc-objs := \
6 fc_disc.o \
7 fc_exch.o \
8 fc_elsct.o \
9 fc_frame.o \
10 fc_lport.o \
11 fc_rport.o \
12 fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000000..dd1564c9e04a
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27/*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35#include <linux/timer.h>
36#include <linux/err.h>
37#include <asm/unaligned.h>
38
39#include <scsi/fc/fc_gs.h>
40
41#include <scsi/libfc.h>
42
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46#define FC_DISC_DELAY 3
47
48static int fc_disc_debug;
49
50#define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56static void fc_disc_gpn_ft_req(struct fc_disc *);
57static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
59 struct fc_rport_identifiers *);
60static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
61static void fc_disc_done(struct fc_disc *);
62static void fc_disc_timeout(struct work_struct *);
63static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
64static void fc_disc_restart(struct fc_disc *);
65
66/**
67 * fc_disc_lookup_rport - lookup a remote port by port_id
68 * @lport: Fibre Channel host port instance
69 * @port_id: remote port port_id to match
70 */
71struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
72 u32 port_id)
73{
74 const struct fc_disc *disc = &lport->disc;
75 struct fc_rport *rport, *found = NULL;
76 struct fc_rport_libfc_priv *rdata;
77 int disc_found = 0;
78
79 list_for_each_entry(rdata, &disc->rports, peers) {
80 rport = PRIV_TO_RPORT(rdata);
81 if (rport->port_id == port_id) {
82 disc_found = 1;
83 found = rport;
84 break;
85 }
86 }
87
88 if (!disc_found)
89 found = NULL;
90
91 return found;
92}
93
94/**
95 * fc_disc_stop_rports - delete all the remote ports associated with the lport
96 * @disc: The discovery job to stop rports on
97 *
98 * Locking Note: This function expects that the lport mutex is locked before
99 * calling it.
100 */
101void fc_disc_stop_rports(struct fc_disc *disc)
102{
103 struct fc_lport *lport;
104 struct fc_rport *rport;
105 struct fc_rport_libfc_priv *rdata, *next;
106
107 lport = disc->lport;
108
109 mutex_lock(&disc->disc_mutex);
110 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
111 rport = PRIV_TO_RPORT(rdata);
112 list_del(&rdata->peers);
113 lport->tt.rport_logoff(rport);
114 }
115
116 mutex_unlock(&disc->disc_mutex);
117}
118
119/**
120 * fc_disc_rport_callback - Event handler for rport events
121 * @lport: The lport which is receiving the event
122 * @rport: The rport which the event has occured on
123 * @event: The event that occured
124 *
125 * Locking Note: The rport lock should not be held when calling
126 * this function.
127 */
128static void fc_disc_rport_callback(struct fc_lport *lport,
129 struct fc_rport *rport,
130 enum fc_rport_event event)
131{
132 struct fc_rport_libfc_priv *rdata = rport->dd_data;
133 struct fc_disc *disc = &lport->disc;
134 int found = 0;
135
136 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
137 rport->port_id);
138
139 if (event == RPORT_EV_CREATED) {
140 if (disc) {
141 found = 1;
142 mutex_lock(&disc->disc_mutex);
143 list_add_tail(&rdata->peers, &disc->rports);
144 mutex_unlock(&disc->disc_mutex);
145 }
146 }
147
148 if (!found)
149 FC_DEBUG_DISC("The rport (%6x) is not maintained "
150 "by the discovery layer\n", rport->port_id);
151}
152
153/**
154 * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
155 * @sp: Current sequence of the RSCN exchange
156 * @fp: RSCN Frame
157 * @lport: Fibre Channel host port instance
158 *
159 * Locking Note: This function expects that the disc_mutex is locked
160 * before it is called.
161 */
162static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
163 struct fc_disc *disc)
164{
165 struct fc_lport *lport;
166 struct fc_rport *rport;
167 struct fc_rport_libfc_priv *rdata;
168 struct fc_els_rscn *rp;
169 struct fc_els_rscn_page *pp;
170 struct fc_seq_els_data rjt_data;
171 unsigned int len;
172 int redisc = 0;
173 enum fc_els_rscn_ev_qual ev_qual;
174 enum fc_els_rscn_addr_fmt fmt;
175 LIST_HEAD(disc_ports);
176 struct fc_disc_port *dp, *next;
177
178 lport = disc->lport;
179
180 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
181 fc_host_port_id(lport->host));
182
183 /* make sure the frame contains an RSCN message */
184 rp = fc_frame_payload_get(fp, sizeof(*rp));
185 if (!rp)
186 goto reject;
187 /* make sure the page length is as expected (4 bytes) */
188 if (rp->rscn_page_len != sizeof(*pp))
189 goto reject;
190 /* get the RSCN payload length */
191 len = ntohs(rp->rscn_plen);
192 if (len < sizeof(*rp))
193 goto reject;
194 /* make sure the frame contains the expected payload */
195 rp = fc_frame_payload_get(fp, len);
196 if (!rp)
197 goto reject;
198 /* payload must be a multiple of the RSCN page size */
199 len -= sizeof(*rp);
200 if (len % sizeof(*pp))
201 goto reject;
202
203 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
204 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
205 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
206 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
207 fmt &= ELS_RSCN_ADDR_FMT_MASK;
208 /*
209 * if we get an address format other than port
210 * (area, domain, fabric), then do a full discovery
211 */
212 switch (fmt) {
213 case ELS_ADDR_FMT_PORT:
214 FC_DEBUG_DISC("Port address format for port (%6x)\n",
215 ntoh24(pp->rscn_fid));
216 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
217 if (!dp) {
218 redisc = 1;
219 break;
220 }
221 dp->lp = lport;
222 dp->ids.port_id = ntoh24(pp->rscn_fid);
223 dp->ids.port_name = -1;
224 dp->ids.node_name = -1;
225 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
226 list_add_tail(&dp->peers, &disc_ports);
227 break;
228 case ELS_ADDR_FMT_AREA:
229 case ELS_ADDR_FMT_DOM:
230 case ELS_ADDR_FMT_FAB:
231 default:
232 FC_DEBUG_DISC("Address format is (%d)\n", fmt);
233 redisc = 1;
234 break;
235 }
236 }
237 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
238 if (redisc) {
239 FC_DEBUG_DISC("RSCN received: rediscovering\n");
240 fc_disc_restart(disc);
241 } else {
242 FC_DEBUG_DISC("RSCN received: not rediscovering. "
243 "redisc %d state %d in_prog %d\n",
244 redisc, lport->state, disc->pending);
245 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
246 list_del(&dp->peers);
247 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
248 if (rport) {
249 rdata = RPORT_TO_PRIV(rport);
250 list_del(&rdata->peers);
251 lport->tt.rport_logoff(rport);
252 }
253 fc_disc_single(disc, dp);
254 }
255 }
256 fc_frame_free(fp);
257 return;
258reject:
259 FC_DEBUG_DISC("Received a bad RSCN frame\n");
260 rjt_data.fp = NULL;
261 rjt_data.reason = ELS_RJT_LOGIC;
262 rjt_data.explan = ELS_EXPL_NONE;
263 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
264 fc_frame_free(fp);
265}
266
267/**
268 * fc_disc_recv_req - Handle incoming requests
269 * @sp: Current sequence of the request exchange
270 * @fp: The frame
271 * @lport: The FC local port
272 *
273 * Locking Note: This function is called from the EM and will lock
274 * the disc_mutex before calling the handler for the
275 * request.
276 */
277static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
278 struct fc_lport *lport)
279{
280 u8 op;
281 struct fc_disc *disc = &lport->disc;
282
283 op = fc_frame_payload_op(fp);
284 switch (op) {
285 case ELS_RSCN:
286 mutex_lock(&disc->disc_mutex);
287 fc_disc_recv_rscn_req(sp, fp, disc);
288 mutex_unlock(&disc->disc_mutex);
289 break;
290 default:
291 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
292 break;
293 }
294}
295
296/**
297 * fc_disc_restart - Restart discovery
298 * @lport: FC discovery context
299 *
300 * Locking Note: This function expects that the disc mutex
301 * is already locked.
302 */
303static void fc_disc_restart(struct fc_disc *disc)
304{
305 struct fc_rport *rport;
306 struct fc_rport_libfc_priv *rdata, *next;
307 struct fc_lport *lport = disc->lport;
308
309 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
310 fc_host_port_id(lport->host));
311
312 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
313 rport = PRIV_TO_RPORT(rdata);
314 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
315 list_del(&rdata->peers);
316 lport->tt.rport_logoff(rport);
317 }
318
319 disc->requested = 1;
320 if (!disc->pending)
321 fc_disc_gpn_ft_req(disc);
322}
323
324/**
325 * fc_disc_start - Fibre Channel Target discovery
326 * @lport: FC local port
327 *
328 * Returns non-zero if discovery cannot be started.
329 */
330static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
331 enum fc_disc_event),
332 struct fc_lport *lport)
333{
334 struct fc_rport *rport;
335 struct fc_rport_identifiers ids;
336 struct fc_disc *disc = &lport->disc;
337
338 /*
339 * At this point we may have a new disc job or an existing
340 * one. Either way, let's lock when we make changes to it
341 * and send the GPN_FT request.
342 */
343 mutex_lock(&disc->disc_mutex);
344
345 disc->disc_callback = disc_callback;
346
347 /*
348 * If not ready, or already running discovery, just set request flag.
349 */
350 disc->requested = 1;
351
352 if (disc->pending) {
353 mutex_unlock(&disc->disc_mutex);
354 return;
355 }
356
357 /*
358 * Handle point-to-point mode as a simple discovery
359 * of the remote port. Yucky, yucky, yuck, yuck!
360 */
361 rport = disc->lport->ptp_rp;
362 if (rport) {
363 ids.port_id = rport->port_id;
364 ids.port_name = rport->port_name;
365 ids.node_name = rport->node_name;
366 ids.roles = FC_RPORT_ROLE_UNKNOWN;
367 get_device(&rport->dev);
368
369 if (!fc_disc_new_target(disc, rport, &ids)) {
370 disc->event = DISC_EV_SUCCESS;
371 fc_disc_done(disc);
372 }
373 put_device(&rport->dev);
374 } else {
375 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
376 }
377
378 mutex_unlock(&disc->disc_mutex);
379}
380
381static struct fc_rport_operations fc_disc_rport_ops = {
382 .event_callback = fc_disc_rport_callback,
383};
384
385/**
386 * fc_disc_new_target - Handle new target found by discovery
387 * @lport: FC local port
388 * @rport: The previous FC remote port (NULL if new remote port)
389 * @ids: Identifiers for the new FC remote port
390 *
391 * Locking Note: This function expects that the disc_mutex is locked
392 * before it is called.
393 */
394static int fc_disc_new_target(struct fc_disc *disc,
395 struct fc_rport *rport,
396 struct fc_rport_identifiers *ids)
397{
398 struct fc_lport *lport = disc->lport;
399 struct fc_rport_libfc_priv *rp;
400 int error = 0;
401
402 if (rport && ids->port_name) {
403 if (rport->port_name == -1) {
404 /*
405 * Set WWN and fall through to notify of create.
406 */
407 fc_rport_set_name(rport, ids->port_name,
408 rport->node_name);
409 } else if (rport->port_name != ids->port_name) {
410 /*
411 * This is a new port with the same FCID as
412 * a previously-discovered port. Presumably the old
413 * port logged out and a new port logged in and was
414 * assigned the same FCID. This should be rare.
415 * Delete the old one and fall thru to re-create.
416 */
417 fc_disc_del_target(disc, rport);
418 rport = NULL;
419 }
420 }
421 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
422 ids->port_id != fc_host_port_id(lport->host) &&
423 ids->port_name != lport->wwpn) {
424 if (!rport) {
425 rport = lport->tt.rport_lookup(lport, ids->port_id);
426 if (!rport) {
427 struct fc_disc_port dp;
428 dp.lp = lport;
429 dp.ids.port_id = ids->port_id;
430 dp.ids.port_name = ids->port_name;
431 dp.ids.node_name = ids->node_name;
432 dp.ids.roles = ids->roles;
433 rport = fc_rport_rogue_create(&dp);
434 }
435 if (!rport)
436 error = -ENOMEM;
437 }
438 if (rport) {
439 rp = rport->dd_data;
440 rp->ops = &fc_disc_rport_ops;
441 rp->rp_state = RPORT_ST_INIT;
442 lport->tt.rport_login(rport);
443 }
444 }
445 return error;
446}
447
448/**
449 * fc_disc_del_target - Delete a target
450 * @disc: FC discovery context
451 * @rport: The remote port to be removed
452 */
453static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
454{
455 struct fc_lport *lport = disc->lport;
456 struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
457 list_del(&rdata->peers);
458 lport->tt.rport_logoff(rport);
459}
460
461/**
462 * fc_disc_done - Discovery has been completed
463 * @disc: FC discovery context
464 */
465static void fc_disc_done(struct fc_disc *disc)
466{
467 struct fc_lport *lport = disc->lport;
468
469 FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
470 fc_host_port_id(lport->host));
471
472 disc->disc_callback(lport, disc->event);
473 disc->event = DISC_EV_NONE;
474
475 if (disc->requested)
476 fc_disc_gpn_ft_req(disc);
477 else
478 disc->pending = 0;
479}
480
481/**
482 * fc_disc_error - Handle error on dNS request
483 * @disc: FC discovery context
484 * @fp: The frame pointer
485 */
486static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
487{
488 struct fc_lport *lport = disc->lport;
489 unsigned long delay = 0;
490 if (fc_disc_debug)
491 FC_DBG("Error %ld, retries %d/%d\n",
492 PTR_ERR(fp), disc->retry_count,
493 FC_DISC_RETRY_LIMIT);
494
495 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
496 /*
497 * Memory allocation failure, or the exchange timed out,
498 * retry after delay.
499 */
500 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
501 /* go ahead and retry */
502 if (!fp)
503 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
504 else {
505 delay = msecs_to_jiffies(lport->e_d_tov);
506
507 /* timeout faster first time */
508 if (!disc->retry_count)
509 delay /= 4;
510 }
511 disc->retry_count++;
512 schedule_delayed_work(&disc->disc_work, delay);
513 } else {
514 /* exceeded retries */
515 disc->event = DISC_EV_FAILED;
516 fc_disc_done(disc);
517 }
518 }
519}
520
521/**
522 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
523 * @lport: FC discovery context
524 *
525 * Locking Note: This function expects that the disc_mutex is locked
526 * before it is called.
527 */
528static void fc_disc_gpn_ft_req(struct fc_disc *disc)
529{
530 struct fc_frame *fp;
531 struct fc_lport *lport = disc->lport;
532
533 WARN_ON(!fc_lport_test_ready(lport));
534
535 disc->pending = 1;
536 disc->requested = 0;
537
538 disc->buf_len = 0;
539 disc->seq_count = 0;
540 fp = fc_frame_alloc(lport,
541 sizeof(struct fc_ct_hdr) +
542 sizeof(struct fc_ns_gid_ft));
543 if (!fp)
544 goto err;
545
546 if (lport->tt.elsct_send(lport, NULL, fp,
547 FC_NS_GPN_FT,
548 fc_disc_gpn_ft_resp,
549 disc, lport->e_d_tov))
550 return;
551err:
552 fc_disc_error(disc, fp);
553}
554
555/**
556 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
557 * @lport: Fibre Channel host port instance
558 * @buf: GPN_FT response buffer
559 * @len: size of response buffer
560 */
561static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
562{
563 struct fc_lport *lport;
564 struct fc_gpn_ft_resp *np;
565 char *bp;
566 size_t plen;
567 size_t tlen;
568 int error = 0;
569 struct fc_disc_port dp;
570 struct fc_rport *rport;
571 struct fc_rport_libfc_priv *rdata;
572
573 lport = disc->lport;
574
575 /*
576 * Handle partial name record left over from previous call.
577 */
578 bp = buf;
579 plen = len;
580 np = (struct fc_gpn_ft_resp *)bp;
581 tlen = disc->buf_len;
582 if (tlen) {
583 WARN_ON(tlen >= sizeof(*np));
584 plen = sizeof(*np) - tlen;
585 WARN_ON(plen <= 0);
586 WARN_ON(plen >= sizeof(*np));
587 if (plen > len)
588 plen = len;
589 np = &disc->partial_buf;
590 memcpy((char *)np + tlen, bp, plen);
591
592 /*
593 * Set bp so that the loop below will advance it to the
594 * first valid full name element.
595 */
596 bp -= tlen;
597 len += tlen;
598 plen += tlen;
599 disc->buf_len = (unsigned char) plen;
600 if (plen == sizeof(*np))
601 disc->buf_len = 0;
602 }
603
604 /*
605 * Handle full name records, including the one filled from above.
606 * Normally, np == bp and plen == len, but from the partial case above,
607 * bp, len describe the overall buffer, and np, plen describe the
608 * partial buffer, which if would usually be full now.
609 * After the first time through the loop, things return to "normal".
610 */
611 while (plen >= sizeof(*np)) {
612 dp.lp = lport;
613 dp.ids.port_id = ntoh24(np->fp_fid);
614 dp.ids.port_name = ntohll(np->fp_wwpn);
615 dp.ids.node_name = -1;
616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
617
618 if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
619 (dp.ids.port_name != lport->wwpn)) {
620 rport = fc_rport_rogue_create(&dp);
621 if (rport) {
622 rdata = rport->dd_data;
623 rdata->ops = &fc_disc_rport_ops;
624 rdata->local_port = lport;
625 lport->tt.rport_login(rport);
626 } else
627 FC_DBG("Failed to allocate memory for "
628 "the newly discovered port (%6x)\n",
629 dp.ids.port_id);
630 }
631
632 if (np->fp_flags & FC_NS_FID_LAST) {
633 disc->event = DISC_EV_SUCCESS;
634 fc_disc_done(disc);
635 len = 0;
636 break;
637 }
638 len -= sizeof(*np);
639 bp += sizeof(*np);
640 np = (struct fc_gpn_ft_resp *)bp;
641 plen = len;
642 }
643
644 /*
645 * Save any partial record at the end of the buffer for next time.
646 */
647 if (error == 0 && len > 0 && len < sizeof(*np)) {
648 if (np != &disc->partial_buf) {
649 FC_DEBUG_DISC("Partial buffer remains "
650 "for discovery by (%6x)\n",
651 fc_host_port_id(lport->host));
652 memcpy(&disc->partial_buf, np, len);
653 }
654 disc->buf_len = (unsigned char) len;
655 } else {
656 disc->buf_len = 0;
657 }
658 return error;
659}
660
661/*
662 * Handle retry of memory allocation for remote ports.
663 */
664static void fc_disc_timeout(struct work_struct *work)
665{
666 struct fc_disc *disc = container_of(work,
667 struct fc_disc,
668 disc_work.work);
669 mutex_lock(&disc->disc_mutex);
670 if (disc->requested && !disc->pending)
671 fc_disc_gpn_ft_req(disc);
672 mutex_unlock(&disc->disc_mutex);
673}
674
675/**
676 * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
677 * @sp: Current sequence of GPN_FT exchange
678 * @fp: response frame
679 * @lp_arg: Fibre Channel host port instance
680 *
681 * Locking Note: This function expects that the disc_mutex is locked
682 * before it is called.
683 */
684static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
685 void *disc_arg)
686{
687 struct fc_disc *disc = disc_arg;
688 struct fc_ct_hdr *cp;
689 struct fc_frame_header *fh;
690 unsigned int seq_cnt;
691 void *buf = NULL;
692 unsigned int len;
693 int error;
694
695 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
696 fc_host_port_id(disc->lport->host));
697
698 if (IS_ERR(fp)) {
699 fc_disc_error(disc, fp);
700 return;
701 }
702
703 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
704 fh = fc_frame_header_get(fp);
705 len = fr_len(fp) - sizeof(*fh);
706 seq_cnt = ntohs(fh->fh_seq_cnt);
707 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
708 disc->seq_count == 0) {
709 cp = fc_frame_payload_get(fp, sizeof(*cp));
710 if (!cp) {
711 FC_DBG("GPN_FT response too short, len %d\n",
712 fr_len(fp));
713 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
714
715 /*
716 * Accepted. Parse response.
717 */
718 buf = cp + 1;
719 len -= sizeof(*cp);
720 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
721 FC_DBG("GPN_FT rejected reason %x exp %x "
722 "(check zoning)\n", cp->ct_reason,
723 cp->ct_explan);
724 disc->event = DISC_EV_FAILED;
725 fc_disc_done(disc);
726 } else {
727 FC_DBG("GPN_FT unexpected response code %x\n",
728 ntohs(cp->ct_cmd));
729 }
730 } else if (fr_sof(fp) == FC_SOF_N3 &&
731 seq_cnt == disc->seq_count) {
732 buf = fh + 1;
733 } else {
734 FC_DBG("GPN_FT unexpected frame - out of sequence? "
735 "seq_cnt %x expected %x sof %x eof %x\n",
736 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
737 }
738 if (buf) {
739 error = fc_disc_gpn_ft_parse(disc, buf, len);
740 if (error)
741 fc_disc_error(disc, fp);
742 else
743 disc->seq_count++;
744 }
745 fc_frame_free(fp);
746}
747
748/**
749 * fc_disc_single - Discover the directory information for a single target
750 * @lport: FC local port
751 * @dp: The port to rediscover
752 *
753 * Locking Note: This function expects that the disc_mutex is locked
754 * before it is called.
755 */
756static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
757{
758 struct fc_lport *lport;
759 struct fc_rport *rport;
760 struct fc_rport *new_rport;
761 struct fc_rport_libfc_priv *rdata;
762
763 lport = disc->lport;
764
765 if (dp->ids.port_id == fc_host_port_id(lport->host))
766 goto out;
767
768 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
769 if (rport)
770 fc_disc_del_target(disc, rport);
771
772 new_rport = fc_rport_rogue_create(dp);
773 if (new_rport) {
774 rdata = new_rport->dd_data;
775 rdata->ops = &fc_disc_rport_ops;
776 kfree(dp);
777 lport->tt.rport_login(new_rport);
778 }
779 return;
780out:
781 kfree(dp);
782}
783
784/**
785 * fc_disc_stop - Stop discovery for a given lport
786 * @lport: The lport that discovery should stop for
787 */
788void fc_disc_stop(struct fc_lport *lport)
789{
790 struct fc_disc *disc = &lport->disc;
791
792 if (disc) {
793 cancel_delayed_work_sync(&disc->disc_work);
794 fc_disc_stop_rports(disc);
795 }
796}
797
798/**
799 * fc_disc_stop_final - Stop discovery for a given lport
800 * @lport: The lport that discovery should stop for
801 *
802 * This function will block until discovery has been
803 * completely stopped and all rports have been deleted.
804 */
805void fc_disc_stop_final(struct fc_lport *lport)
806{
807 fc_disc_stop(lport);
808 lport->tt.rport_flush_queue();
809}
810
811/**
812 * fc_disc_init - Initialize the discovery block
813 * @lport: FC local port
814 */
815int fc_disc_init(struct fc_lport *lport)
816{
817 struct fc_disc *disc;
818
819 if (!lport->tt.disc_start)
820 lport->tt.disc_start = fc_disc_start;
821
822 if (!lport->tt.disc_stop)
823 lport->tt.disc_stop = fc_disc_stop;
824
825 if (!lport->tt.disc_stop_final)
826 lport->tt.disc_stop_final = fc_disc_stop_final;
827
828 if (!lport->tt.disc_recv_req)
829 lport->tt.disc_recv_req = fc_disc_recv_req;
830
831 if (!lport->tt.rport_lookup)
832 lport->tt.rport_lookup = fc_disc_lookup_rport;
833
834 disc = &lport->disc;
835 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
836 mutex_init(&disc->disc_mutex);
837 INIT_LIST_HEAD(&disc->rports);
838
839 disc->lport = lport;
840 disc->delay = FC_DISC_DELAY;
841 disc->event = DISC_EV_NONE;
842
843 return 0;
844}
845EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000000..dd47fe619d1e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright(c) 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Provide interface to send ELS/CT FC frames
22 */
23
24#include <asm/unaligned.h>
25#include <scsi/fc/fc_gs.h>
26#include <scsi/fc/fc_ns.h>
27#include <scsi/fc/fc_els.h>
28#include <scsi/libfc.h>
29#include <scsi/fc_encode.h>
30
31/*
32 * fc_elsct_send - sends ELS/CT frame
33 */
34static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
35 struct fc_rport *rport,
36 struct fc_frame *fp,
37 unsigned int op,
38 void (*resp)(struct fc_seq *,
39 struct fc_frame *fp,
40 void *arg),
41 void *arg, u32 timer_msec)
42{
43 enum fc_rctl r_ctl;
44 u32 did;
45 enum fc_fh_type fh_type;
46 int rc;
47
48 /* ELS requests */
49 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
50 rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
51 else
52 /* CT requests */
53 rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
54
55 if (rc)
56 return NULL;
57
58 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
59 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
60
61 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
62}
63
64int fc_elsct_init(struct fc_lport *lport)
65{
66 if (!lport->tt.elsct_send)
67 lport->tt.elsct_send = fc_elsct_send;
68
69 return 0;
70}
71EXPORT_SYMBOL(fc_elsct_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
35#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
36
37/*
38 * fc_exch_debug can be set in debugger or at compile time to get more logs.
39 */
40static int fc_exch_debug;
41
42#define FC_DEBUG_EXCH(fmt...) \
43 do { \
44 if (fc_exch_debug) \
45 FC_DBG(fmt); \
46 } while (0)
47
48static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
49
50/*
51 * Structure and function definitions for managing Fibre Channel Exchanges
52 * and Sequences.
53 *
54 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
55 *
56 * fc_exch_mgr holds the exchange state for an N port
57 *
58 * fc_exch holds state for one exchange and links to its active sequence.
59 *
60 * fc_seq holds the state for an individual sequence.
61 */
62
63/*
64 * Exchange manager.
65 *
66 * This structure is the center for creating exchanges and sequences.
67 * It manages the allocation of exchange IDs.
68 */
69struct fc_exch_mgr {
70 enum fc_class class; /* default class for sequences */
71 spinlock_t em_lock; /* exchange manager lock,
72 must be taken before ex_lock */
73 u16 last_xid; /* last allocated exchange ID */
74 u16 min_xid; /* min exchange ID */
75 u16 max_xid; /* max exchange ID */
76 u16 max_read; /* max exchange ID for read */
77 u16 last_read; /* last xid allocated for read */
78 u32 total_exches; /* total allocated exchanges */
79 struct list_head ex_list; /* allocated exchanges list */
80 struct fc_lport *lp; /* fc device instance */
81 mempool_t *ep_pool; /* reserve ep's */
82
83 /*
84 * currently exchange mgr stats are updated but not used.
85 * either stats can be expose via sysfs or remove them
86 * all together if not used XXX
87 */
88 struct {
89 atomic_t no_free_exch;
90 atomic_t no_free_exch_xid;
91 atomic_t xid_not_found;
92 atomic_t xid_busy;
93 atomic_t seq_not_found;
94 atomic_t non_bls_resp;
95 } stats;
96 struct fc_exch **exches; /* for exch pointers indexed by xid */
97};
98#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
99
100static void fc_exch_rrq(struct fc_exch *);
101static void fc_seq_ls_acc(struct fc_seq *);
102static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
103 enum fc_els_rjt_explan);
104static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
105static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
106static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
107
108/*
109 * Internal implementation notes.
110 *
111 * The exchange manager is one by default in libfc but LLD may choose
112 * to have one per CPU. The sequence manager is one per exchange manager
113 * and currently never separated.
114 *
115 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
116 * assigned by the Sequence Initiator that shall be unique for a specific
117 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
118 * qualified by exchange ID, which one might think it would be.
119 * In practice this limits the number of open sequences and exchanges to 256
120 * per session. For most targets we could treat this limit as per exchange.
121 *
122 * The exchange and its sequence are freed when the last sequence is received.
123 * It's possible for the remote port to leave an exchange open without
124 * sending any sequences.
125 *
126 * Notes on reference counts:
127 *
128 * Exchanges are reference counted and exchange gets freed when the reference
129 * count becomes zero.
130 *
131 * Timeouts:
132 * Sequences are timed out for E_D_TOV and R_A_TOV.
133 *
134 * Sequence event handling:
135 *
136 * The following events may occur on initiator sequences:
137 *
138 * Send.
139 * For now, the whole thing is sent.
140 * Receive ACK
141 * This applies only to class F.
142 * The sequence is marked complete.
143 * ULP completion.
144 * The upper layer calls fc_exch_done() when done
145 * with exchange and sequence tuple.
146 * RX-inferred completion.
147 * When we receive the next sequence on the same exchange, we can
148 * retire the previous sequence ID. (XXX not implemented).
149 * Timeout.
150 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
151 * E_D_TOV causes abort and calls upper layer response handler
152 * with FC_EX_TIMEOUT error.
153 * Receive RJT
154 * XXX defer.
155 * Send ABTS
156 * On timeout.
157 *
158 * The following events may occur on recipient sequences:
159 *
160 * Receive
161 * Allocate sequence for first frame received.
162 * Hold during receive handler.
163 * Release when final frame received.
164 * Keep status of last N of these for the ELS RES command. XXX TBD.
165 * Receive ABTS
166 * Deallocate sequence
167 * Send RJT
168 * Deallocate
169 *
170 * For now, we neglect conditions where only part of a sequence was
171 * received or transmitted, or where out-of-order receipt is detected.
172 */
173
174/*
175 * Locking notes:
176 *
177 * The EM code run in a per-CPU worker thread.
178 *
179 * To protect against concurrency between a worker thread code and timers,
180 * sequence allocation and deallocation must be locked.
181 * - exchange refcnt can be done atomicly without locks.
182 * - sequence allocation must be locked by exch lock.
183 * - If the em_lock and ex_lock must be taken at the same time, then the
184 * em_lock must be taken before the ex_lock.
185 */
186
187/*
188 * opcode names for debugging.
189 */
190static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
191
192#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
193
194static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
195 unsigned int max_index)
196{
197 const char *name = NULL;
198
199 if (op < max_index)
200 name = table[op];
201 if (!name)
202 name = "unknown";
203 return name;
204}
205
206static const char *fc_exch_rctl_name(unsigned int op)
207{
208 return fc_exch_name_lookup(op, fc_exch_rctl_names,
209 FC_TABLE_SIZE(fc_exch_rctl_names));
210}
211
212/*
213 * Hold an exchange - keep it from being freed.
214 */
215static void fc_exch_hold(struct fc_exch *ep)
216{
217 atomic_inc(&ep->ex_refcnt);
218}
219
220/*
221 * setup fc hdr by initializing few more FC header fields and sof/eof.
222 * Initialized fields by this func:
223 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
224 * - sof and eof
225 */
226static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
227 u32 f_ctl)
228{
229 struct fc_frame_header *fh = fc_frame_header_get(fp);
230 u16 fill;
231
232 fr_sof(fp) = ep->class;
233 if (ep->seq.cnt)
234 fr_sof(fp) = fc_sof_normal(ep->class);
235
236 if (f_ctl & FC_FC_END_SEQ) {
237 fr_eof(fp) = FC_EOF_T;
238 if (fc_sof_needs_ack(ep->class))
239 fr_eof(fp) = FC_EOF_N;
240 /*
241 * Form f_ctl.
242 * The number of fill bytes to make the length a 4-byte
243 * multiple is the low order 2-bits of the f_ctl.
244 * The fill itself will have been cleared by the frame
245 * allocation.
246 * After this, the length will be even, as expected by
247 * the transport.
248 */
249 fill = fr_len(fp) & 3;
250 if (fill) {
251 fill = 4 - fill;
252 /* TODO, this may be a problem with fragmented skb */
253 skb_put(fp_skb(fp), fill);
254 hton24(fh->fh_f_ctl, f_ctl | fill);
255 }
256 } else {
257 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
258 fr_eof(fp) = FC_EOF_N;
259 }
260
261 /*
262 * Initialize remainig fh fields
263 * from fc_fill_fc_hdr
264 */
265 fh->fh_ox_id = htons(ep->oxid);
266 fh->fh_rx_id = htons(ep->rxid);
267 fh->fh_seq_id = ep->seq.id;
268 fh->fh_seq_cnt = htons(ep->seq.cnt);
269}
270
271
272/*
273 * Release a reference to an exchange.
274 * If the refcnt goes to zero and the exchange is complete, it is freed.
275 */
276static void fc_exch_release(struct fc_exch *ep)
277{
278 struct fc_exch_mgr *mp;
279
280 if (atomic_dec_and_test(&ep->ex_refcnt)) {
281 mp = ep->em;
282 if (ep->destructor)
283 ep->destructor(&ep->seq, ep->arg);
284 if (ep->lp->tt.exch_put)
285 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
286 WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
287 mempool_free(ep, mp->ep_pool);
288 }
289}
290
291static int fc_exch_done_locked(struct fc_exch *ep)
292{
293 int rc = 1;
294
295 /*
296 * We must check for completion in case there are two threads
297 * tyring to complete this. But the rrq code will reuse the
298 * ep, and in that case we only clear the resp and set it as
299 * complete, so it can be reused by the timer to send the rrq.
300 */
301 ep->resp = NULL;
302 if (ep->state & FC_EX_DONE)
303 return rc;
304 ep->esb_stat |= ESB_ST_COMPLETE;
305
306 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
307 ep->state |= FC_EX_DONE;
308 if (cancel_delayed_work(&ep->timeout_work))
309 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
310 rc = 0;
311 }
312 return rc;
313}
314
315static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
316{
317 struct fc_exch_mgr *mp;
318
319 mp = ep->em;
320 spin_lock_bh(&mp->em_lock);
321 WARN_ON(mp->total_exches <= 0);
322 mp->total_exches--;
323 mp->exches[ep->xid - mp->min_xid] = NULL;
324 list_del(&ep->ex_list);
325 spin_unlock_bh(&mp->em_lock);
326 fc_exch_release(ep); /* drop hold for exch in mp */
327}
328
329/*
330 * Internal version of fc_exch_timer_set - used with lock held.
331 */
332static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
333 unsigned int timer_msec)
334{
335 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
336 return;
337
338 FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
339 ep->xid);
340 if (schedule_delayed_work(&ep->timeout_work,
341 msecs_to_jiffies(timer_msec)))
342 fc_exch_hold(ep); /* hold for timer */
343}
344
345/*
346 * Set timer for an exchange.
347 * The time is a minimum delay in milliseconds until the timer fires.
348 * Used for upper level protocols to time out the exchange.
349 * The timer is cancelled when it fires or when the exchange completes.
350 * Returns non-zero if a timer couldn't be allocated.
351 */
352static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
353{
354 spin_lock_bh(&ep->ex_lock);
355 fc_exch_timer_set_locked(ep, timer_msec);
356 spin_unlock_bh(&ep->ex_lock);
357}
358
359int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
360{
361 struct fc_seq *sp;
362 struct fc_exch *ep;
363 struct fc_frame *fp;
364 int error;
365
366 ep = fc_seq_exch(req_sp);
367
368 spin_lock_bh(&ep->ex_lock);
369 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
370 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
371 spin_unlock_bh(&ep->ex_lock);
372 return -ENXIO;
373 }
374
375 /*
376 * Send the abort on a new sequence if possible.
377 */
378 sp = fc_seq_start_next_locked(&ep->seq);
379 if (!sp) {
380 spin_unlock_bh(&ep->ex_lock);
381 return -ENOMEM;
382 }
383
384 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
385 if (timer_msec)
386 fc_exch_timer_set_locked(ep, timer_msec);
387 spin_unlock_bh(&ep->ex_lock);
388
389 /*
390 * If not logged into the fabric, don't send ABTS but leave
391 * sequence active until next timeout.
392 */
393 if (!ep->sid)
394 return 0;
395
396 /*
397 * Send an abort for the sequence that timed out.
398 */
399 fp = fc_frame_alloc(ep->lp, 0);
400 if (fp) {
401 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
402 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
403 error = fc_seq_send(ep->lp, sp, fp);
404 } else
405 error = -ENOBUFS;
406 return error;
407}
408EXPORT_SYMBOL(fc_seq_exch_abort);
409
410/*
411 * Exchange timeout - handle exchange timer expiration.
412 * The timer will have been cancelled before this is called.
413 */
414static void fc_exch_timeout(struct work_struct *work)
415{
416 struct fc_exch *ep = container_of(work, struct fc_exch,
417 timeout_work.work);
418 struct fc_seq *sp = &ep->seq;
419 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
420 void *arg;
421 u32 e_stat;
422 int rc = 1;
423
424 spin_lock_bh(&ep->ex_lock);
425 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
426 goto unlock;
427
428 e_stat = ep->esb_stat;
429 if (e_stat & ESB_ST_COMPLETE) {
430 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
431 if (e_stat & ESB_ST_REC_QUAL)
432 fc_exch_rrq(ep);
433 spin_unlock_bh(&ep->ex_lock);
434 goto done;
435 } else {
436 resp = ep->resp;
437 arg = ep->arg;
438 ep->resp = NULL;
439 if (e_stat & ESB_ST_ABNORMAL)
440 rc = fc_exch_done_locked(ep);
441 spin_unlock_bh(&ep->ex_lock);
442 if (!rc)
443 fc_exch_mgr_delete_ep(ep);
444 if (resp)
445 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
446 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
447 goto done;
448 }
449unlock:
450 spin_unlock_bh(&ep->ex_lock);
451done:
452 /*
453 * This release matches the hold taken when the timer was set.
454 */
455 fc_exch_release(ep);
456}
457
458/*
459 * Allocate a sequence.
460 *
461 * We don't support multiple originated sequences on the same exchange.
462 * By implication, any previously originated sequence on this exchange
463 * is complete, and we reallocate the same sequence.
464 */
465static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
466{
467 struct fc_seq *sp;
468
469 sp = &ep->seq;
470 sp->ssb_stat = 0;
471 sp->cnt = 0;
472 sp->id = seq_id;
473 return sp;
474}
475
476/*
477 * fc_em_alloc_xid - returns an xid based on request type
478 * @lp : ptr to associated lport
479 * @fp : ptr to the assocated frame
480 *
481 * check the associated fc_fsp_pkt to get scsi command type and
482 * command direction to decide from which range this exch id
483 * will be allocated from.
484 *
485 * Returns : 0 or an valid xid
486 */
487static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
488{
489 u16 xid, min, max;
490 u16 *plast;
491 struct fc_exch *ep = NULL;
492
493 if (mp->max_read) {
494 if (fc_frame_is_read(fp)) {
495 min = mp->min_xid;
496 max = mp->max_read;
497 plast = &mp->last_read;
498 } else {
499 min = mp->max_read + 1;
500 max = mp->max_xid;
501 plast = &mp->last_xid;
502 }
503 } else {
504 min = mp->min_xid;
505 max = mp->max_xid;
506 plast = &mp->last_xid;
507 }
508 xid = *plast;
509 do {
510 xid = (xid == max) ? min : xid + 1;
511 ep = mp->exches[xid - mp->min_xid];
512 } while ((ep != NULL) && (xid != *plast));
513
514 if (unlikely(ep))
515 xid = 0;
516 else
517 *plast = xid;
518
519 return xid;
520}
521
522/*
523 * fc_exch_alloc - allocate an exchange.
524 * @mp : ptr to the exchange manager
525 * @xid: input xid
526 *
527 * if xid is supplied zero then assign next free exchange ID
528 * from exchange manager, otherwise use supplied xid.
529 * Returns with exch lock held.
530 */
531struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
532 struct fc_frame *fp, u16 xid)
533{
534 struct fc_exch *ep;
535
536 /* allocate memory for exchange */
537 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
538 if (!ep) {
539 atomic_inc(&mp->stats.no_free_exch);
540 goto out;
541 }
542 memset(ep, 0, sizeof(*ep));
543
544 spin_lock_bh(&mp->em_lock);
545 /* alloc xid if input xid 0 */
546 if (!xid) {
547 /* alloc a new xid */
548 xid = fc_em_alloc_xid(mp, fp);
549 if (!xid) {
550 printk(KERN_ERR "fc_em_alloc_xid() failed\n");
551 goto err;
552 }
553 }
554
555 fc_exch_hold(ep); /* hold for exch in mp */
556 spin_lock_init(&ep->ex_lock);
557 /*
558 * Hold exch lock for caller to prevent fc_exch_reset()
559 * from releasing exch while fc_exch_alloc() caller is
560 * still working on exch.
561 */
562 spin_lock_bh(&ep->ex_lock);
563
564 mp->exches[xid - mp->min_xid] = ep;
565 list_add_tail(&ep->ex_list, &mp->ex_list);
566 fc_seq_alloc(ep, ep->seq_id++);
567 mp->total_exches++;
568 spin_unlock_bh(&mp->em_lock);
569
570 /*
571 * update exchange
572 */
573 ep->oxid = ep->xid = xid;
574 ep->em = mp;
575 ep->lp = mp->lp;
576 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
577 ep->rxid = FC_XID_UNKNOWN;
578 ep->class = mp->class;
579 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
580out:
581 return ep;
582err:
583 spin_unlock_bh(&mp->em_lock);
584 atomic_inc(&mp->stats.no_free_exch_xid);
585 mempool_free(ep, mp->ep_pool);
586 return NULL;
587}
588EXPORT_SYMBOL(fc_exch_alloc);
589
590/*
591 * Lookup and hold an exchange.
592 */
593static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
594{
595 struct fc_exch *ep = NULL;
596
597 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
598 spin_lock_bh(&mp->em_lock);
599 ep = mp->exches[xid - mp->min_xid];
600 if (ep) {
601 fc_exch_hold(ep);
602 WARN_ON(ep->xid != xid);
603 }
604 spin_unlock_bh(&mp->em_lock);
605 }
606 return ep;
607}
608
609void fc_exch_done(struct fc_seq *sp)
610{
611 struct fc_exch *ep = fc_seq_exch(sp);
612 int rc;
613
614 spin_lock_bh(&ep->ex_lock);
615 rc = fc_exch_done_locked(ep);
616 spin_unlock_bh(&ep->ex_lock);
617 if (!rc)
618 fc_exch_mgr_delete_ep(ep);
619}
620EXPORT_SYMBOL(fc_exch_done);
621
622/*
623 * Allocate a new exchange as responder.
624 * Sets the responder ID in the frame header.
625 */
626static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
627{
628 struct fc_exch *ep;
629 struct fc_frame_header *fh;
630 u16 rxid;
631
632 ep = mp->lp->tt.exch_get(mp->lp, fp);
633 if (ep) {
634 ep->class = fc_frame_class(fp);
635
636 /*
637 * Set EX_CTX indicating we're responding on this exchange.
638 */
639 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
640 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
641 fh = fc_frame_header_get(fp);
642 ep->sid = ntoh24(fh->fh_d_id);
643 ep->did = ntoh24(fh->fh_s_id);
644 ep->oid = ep->did;
645
646 /*
647 * Allocated exchange has placed the XID in the
648 * originator field. Move it to the responder field,
649 * and set the originator XID from the frame.
650 */
651 ep->rxid = ep->xid;
652 ep->oxid = ntohs(fh->fh_ox_id);
653 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
654 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
655 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
656
657 /*
658 * Set the responder ID in the frame header.
659 * The old one should've been 0xffff.
660 * If it isn't, don't assign one.
661 * Incoming basic link service frames may specify
662 * a referenced RX_ID.
663 */
664 if (fh->fh_type != FC_TYPE_BLS) {
665 rxid = ntohs(fh->fh_rx_id);
666 WARN_ON(rxid != FC_XID_UNKNOWN);
667 fh->fh_rx_id = htons(ep->rxid);
668 }
669 fc_exch_hold(ep); /* hold for caller */
670 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
671 }
672 return ep;
673}
674
675/*
676 * Find a sequence for receive where the other end is originating the sequence.
677 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
678 * on the ep that should be released by the caller.
679 */
680static enum fc_pf_rjt_reason
681fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
682{
683 struct fc_frame_header *fh = fc_frame_header_get(fp);
684 struct fc_exch *ep = NULL;
685 struct fc_seq *sp = NULL;
686 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
687 u32 f_ctl;
688 u16 xid;
689
690 f_ctl = ntoh24(fh->fh_f_ctl);
691 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
692
693 /*
694 * Lookup or create the exchange if we will be creating the sequence.
695 */
696 if (f_ctl & FC_FC_EX_CTX) {
697 xid = ntohs(fh->fh_ox_id); /* we originated exch */
698 ep = fc_exch_find(mp, xid);
699 if (!ep) {
700 atomic_inc(&mp->stats.xid_not_found);
701 reject = FC_RJT_OX_ID;
702 goto out;
703 }
704 if (ep->rxid == FC_XID_UNKNOWN)
705 ep->rxid = ntohs(fh->fh_rx_id);
706 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
707 reject = FC_RJT_OX_ID;
708 goto rel;
709 }
710 } else {
711 xid = ntohs(fh->fh_rx_id); /* we are the responder */
712
713 /*
714 * Special case for MDS issuing an ELS TEST with a
715 * bad rxid of 0.
716 * XXX take this out once we do the proper reject.
717 */
718 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
719 fc_frame_payload_op(fp) == ELS_TEST) {
720 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
721 xid = FC_XID_UNKNOWN;
722 }
723
724 /*
725 * new sequence - find the exchange
726 */
727 ep = fc_exch_find(mp, xid);
728 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
729 if (ep) {
730 atomic_inc(&mp->stats.xid_busy);
731 reject = FC_RJT_RX_ID;
732 goto rel;
733 }
734 ep = fc_exch_resp(mp, fp);
735 if (!ep) {
736 reject = FC_RJT_EXCH_EST; /* XXX */
737 goto out;
738 }
739 xid = ep->xid; /* get our XID */
740 } else if (!ep) {
741 atomic_inc(&mp->stats.xid_not_found);
742 reject = FC_RJT_RX_ID; /* XID not found */
743 goto out;
744 }
745 }
746
747 /*
748 * At this point, we have the exchange held.
749 * Find or create the sequence.
750 */
751 if (fc_sof_is_init(fr_sof(fp))) {
752 sp = fc_seq_start_next(&ep->seq);
753 if (!sp) {
754 reject = FC_RJT_SEQ_XS; /* exchange shortage */
755 goto rel;
756 }
757 sp->id = fh->fh_seq_id;
758 sp->ssb_stat |= SSB_ST_RESP;
759 } else {
760 sp = &ep->seq;
761 if (sp->id != fh->fh_seq_id) {
762 atomic_inc(&mp->stats.seq_not_found);
763 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
764 goto rel;
765 }
766 }
767 WARN_ON(ep != fc_seq_exch(sp));
768
769 if (f_ctl & FC_FC_SEQ_INIT)
770 ep->esb_stat |= ESB_ST_SEQ_INIT;
771
772 fr_seq(fp) = sp;
773out:
774 return reject;
775rel:
776 fc_exch_done(&ep->seq);
777 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
778 return reject;
779}
780
781/*
782 * Find the sequence for a frame being received.
783 * We originated the sequence, so it should be found.
784 * We may or may not have originated the exchange.
785 * Does not hold the sequence for the caller.
786 */
787static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
788 struct fc_frame *fp)
789{
790 struct fc_frame_header *fh = fc_frame_header_get(fp);
791 struct fc_exch *ep;
792 struct fc_seq *sp = NULL;
793 u32 f_ctl;
794 u16 xid;
795
796 f_ctl = ntoh24(fh->fh_f_ctl);
797 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
798 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
799 ep = fc_exch_find(mp, xid);
800 if (!ep)
801 return NULL;
802 if (ep->seq.id == fh->fh_seq_id) {
803 /*
804 * Save the RX_ID if we didn't previously know it.
805 */
806 sp = &ep->seq;
807 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
808 ep->rxid == FC_XID_UNKNOWN) {
809 ep->rxid = ntohs(fh->fh_rx_id);
810 }
811 }
812 fc_exch_release(ep);
813 return sp;
814}
815
816/*
817 * Set addresses for an exchange.
818 * Note this must be done before the first sequence of the exchange is sent.
819 */
820static void fc_exch_set_addr(struct fc_exch *ep,
821 u32 orig_id, u32 resp_id)
822{
823 ep->oid = orig_id;
824 if (ep->esb_stat & ESB_ST_RESP) {
825 ep->sid = resp_id;
826 ep->did = orig_id;
827 } else {
828 ep->sid = orig_id;
829 ep->did = resp_id;
830 }
831}
832
833static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
834{
835 struct fc_exch *ep = fc_seq_exch(sp);
836
837 sp = fc_seq_alloc(ep, ep->seq_id++);
838 FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
839 ep->xid, ep->f_ctl, sp->id);
840 return sp;
841}
842/*
843 * Allocate a new sequence on the same exchange as the supplied sequence.
844 * This will never return NULL.
845 */
846struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
847{
848 struct fc_exch *ep = fc_seq_exch(sp);
849
850 spin_lock_bh(&ep->ex_lock);
851 WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
852 sp = fc_seq_start_next_locked(sp);
853 spin_unlock_bh(&ep->ex_lock);
854
855 return sp;
856}
857EXPORT_SYMBOL(fc_seq_start_next);
858
859int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
860{
861 struct fc_exch *ep;
862 struct fc_frame_header *fh = fc_frame_header_get(fp);
863 int error;
864 u32 f_ctl;
865
866 ep = fc_seq_exch(sp);
867 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
868
869 f_ctl = ntoh24(fh->fh_f_ctl);
870 fc_exch_setup_hdr(ep, fp, f_ctl);
871
872 /*
873 * update sequence count if this frame is carrying
874 * multiple FC frames when sequence offload is enabled
875 * by LLD.
876 */
877 if (fr_max_payload(fp))
878 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
879 fr_max_payload(fp));
880 else
881 sp->cnt++;
882
883 /*
884 * Send the frame.
885 */
886 error = lp->tt.frame_send(lp, fp);
887
888 /*
889 * Update the exchange and sequence flags,
890 * assuming all frames for the sequence have been sent.
891 * We can only be called to send once for each sequence.
892 */
893 spin_lock_bh(&ep->ex_lock);
894 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
895 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
896 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
897 spin_unlock_bh(&ep->ex_lock);
898 return error;
899}
900EXPORT_SYMBOL(fc_seq_send);
901
902void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
903 struct fc_seq_els_data *els_data)
904{
905 switch (els_cmd) {
906 case ELS_LS_RJT:
907 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
908 break;
909 case ELS_LS_ACC:
910 fc_seq_ls_acc(sp);
911 break;
912 case ELS_RRQ:
913 fc_exch_els_rrq(sp, els_data->fp);
914 break;
915 case ELS_REC:
916 fc_exch_els_rec(sp, els_data->fp);
917 break;
918 default:
919 FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
920 }
921}
922EXPORT_SYMBOL(fc_seq_els_rsp_send);
923
924/*
925 * Send a sequence, which is also the last sequence in the exchange.
926 */
927static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
928 enum fc_rctl rctl, enum fc_fh_type fh_type)
929{
930 u32 f_ctl;
931 struct fc_exch *ep = fc_seq_exch(sp);
932
933 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
934 f_ctl |= ep->f_ctl;
935 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
936 fc_seq_send(ep->lp, sp, fp);
937}
938
939/*
940 * Send ACK_1 (or equiv.) indicating we received something.
941 * The frame we're acking is supplied.
942 */
943static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
944{
945 struct fc_frame *fp;
946 struct fc_frame_header *rx_fh;
947 struct fc_frame_header *fh;
948 struct fc_exch *ep = fc_seq_exch(sp);
949 struct fc_lport *lp = ep->lp;
950 unsigned int f_ctl;
951
952 /*
953 * Don't send ACKs for class 3.
954 */
955 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
956 fp = fc_frame_alloc(lp, 0);
957 if (!fp)
958 return;
959
960 fh = fc_frame_header_get(fp);
961 fh->fh_r_ctl = FC_RCTL_ACK_1;
962 fh->fh_type = FC_TYPE_BLS;
963
964 /*
965 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
966 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
967 * Bits 9-8 are meaningful (retransmitted or unidirectional).
968 * Last ACK uses bits 7-6 (continue sequence),
969 * bits 5-4 are meaningful (what kind of ACK to use).
970 */
971 rx_fh = fc_frame_header_get(rx_fp);
972 f_ctl = ntoh24(rx_fh->fh_f_ctl);
973 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
974 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
975 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
976 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
977 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
978 hton24(fh->fh_f_ctl, f_ctl);
979
980 fc_exch_setup_hdr(ep, fp, f_ctl);
981 fh->fh_seq_id = rx_fh->fh_seq_id;
982 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
983 fh->fh_parm_offset = htonl(1); /* ack single frame */
984
985 fr_sof(fp) = fr_sof(rx_fp);
986 if (f_ctl & FC_FC_END_SEQ)
987 fr_eof(fp) = FC_EOF_T;
988 else
989 fr_eof(fp) = FC_EOF_N;
990
991 (void) lp->tt.frame_send(lp, fp);
992 }
993}
994
995/*
996 * Send BLS Reject.
997 * This is for rejecting BA_ABTS only.
998 */
999static void
1000fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
1001 enum fc_ba_rjt_explan explan)
1002{
1003 struct fc_frame *fp;
1004 struct fc_frame_header *rx_fh;
1005 struct fc_frame_header *fh;
1006 struct fc_ba_rjt *rp;
1007 struct fc_lport *lp;
1008 unsigned int f_ctl;
1009
1010 lp = fr_dev(rx_fp);
1011 fp = fc_frame_alloc(lp, sizeof(*rp));
1012 if (!fp)
1013 return;
1014 fh = fc_frame_header_get(fp);
1015 rx_fh = fc_frame_header_get(rx_fp);
1016
1017 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1018
1019 rp = fc_frame_payload_get(fp, sizeof(*rp));
1020 rp->br_reason = reason;
1021 rp->br_explan = explan;
1022
1023 /*
1024 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1025 */
1026 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1027 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1028 fh->fh_ox_id = rx_fh->fh_rx_id;
1029 fh->fh_rx_id = rx_fh->fh_ox_id;
1030 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1031 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1032 fh->fh_type = FC_TYPE_BLS;
1033
1034 /*
1035 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1036 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1037 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1038 * Last ACK uses bits 7-6 (continue sequence),
1039 * bits 5-4 are meaningful (what kind of ACK to use).
1040 * Always set LAST_SEQ, END_SEQ.
1041 */
1042 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1043 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1044 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1045 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1046 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1047 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1048 f_ctl &= ~FC_FC_FIRST_SEQ;
1049 hton24(fh->fh_f_ctl, f_ctl);
1050
1051 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1052 fr_eof(fp) = FC_EOF_T;
1053 if (fc_sof_needs_ack(fr_sof(fp)))
1054 fr_eof(fp) = FC_EOF_N;
1055
1056 (void) lp->tt.frame_send(lp, fp);
1057}
1058
1059/*
1060 * Handle an incoming ABTS. This would be for target mode usually,
1061 * but could be due to lost FCP transfer ready, confirm or RRQ.
1062 * We always handle this as an exchange abort, ignoring the parameter.
1063 */
1064static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1065{
1066 struct fc_frame *fp;
1067 struct fc_ba_acc *ap;
1068 struct fc_frame_header *fh;
1069 struct fc_seq *sp;
1070
1071 if (!ep)
1072 goto reject;
1073 spin_lock_bh(&ep->ex_lock);
1074 if (ep->esb_stat & ESB_ST_COMPLETE) {
1075 spin_unlock_bh(&ep->ex_lock);
1076 goto reject;
1077 }
1078 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1079 fc_exch_hold(ep); /* hold for REC_QUAL */
1080 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1081 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1082
1083 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1084 if (!fp) {
1085 spin_unlock_bh(&ep->ex_lock);
1086 goto free;
1087 }
1088 fh = fc_frame_header_get(fp);
1089 ap = fc_frame_payload_get(fp, sizeof(*ap));
1090 memset(ap, 0, sizeof(*ap));
1091 sp = &ep->seq;
1092 ap->ba_high_seq_cnt = htons(0xffff);
1093 if (sp->ssb_stat & SSB_ST_RESP) {
1094 ap->ba_seq_id = sp->id;
1095 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1096 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1097 ap->ba_low_seq_cnt = htons(sp->cnt);
1098 }
1099 sp = fc_seq_start_next(sp);
1100 spin_unlock_bh(&ep->ex_lock);
1101 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1102 fc_frame_free(rx_fp);
1103 return;
1104
1105reject:
1106 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1107free:
1108 fc_frame_free(rx_fp);
1109}
1110
1111/*
1112 * Handle receive where the other end is originating the sequence.
1113 */
1114static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1115 struct fc_frame *fp)
1116{
1117 struct fc_frame_header *fh = fc_frame_header_get(fp);
1118 struct fc_seq *sp = NULL;
1119 struct fc_exch *ep = NULL;
1120 enum fc_sof sof;
1121 enum fc_eof eof;
1122 u32 f_ctl;
1123 enum fc_pf_rjt_reason reject;
1124
1125 fr_seq(fp) = NULL;
1126 reject = fc_seq_lookup_recip(mp, fp);
1127 if (reject == FC_RJT_NONE) {
1128 sp = fr_seq(fp); /* sequence will be held */
1129 ep = fc_seq_exch(sp);
1130 sof = fr_sof(fp);
1131 eof = fr_eof(fp);
1132 f_ctl = ntoh24(fh->fh_f_ctl);
1133 fc_seq_send_ack(sp, fp);
1134
1135 /*
1136 * Call the receive function.
1137 *
1138 * The receive function may allocate a new sequence
1139 * over the old one, so we shouldn't change the
1140 * sequence after this.
1141 *
1142 * The frame will be freed by the receive function.
1143 * If new exch resp handler is valid then call that
1144 * first.
1145 */
1146 if (ep->resp)
1147 ep->resp(sp, fp, ep->arg);
1148 else
1149 lp->tt.lport_recv(lp, sp, fp);
1150 fc_exch_release(ep); /* release from lookup */
1151 } else {
1152 FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
1153 fc_frame_free(fp);
1154 }
1155}
1156
1157/*
1158 * Handle receive where the other end is originating the sequence in
1159 * response to our exchange.
1160 */
1161static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1162{
1163 struct fc_frame_header *fh = fc_frame_header_get(fp);
1164 struct fc_seq *sp;
1165 struct fc_exch *ep;
1166 enum fc_sof sof;
1167 u32 f_ctl;
1168 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1169 void *ex_resp_arg;
1170 int rc;
1171
1172 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1173 if (!ep) {
1174 atomic_inc(&mp->stats.xid_not_found);
1175 goto out;
1176 }
1177 if (ep->rxid == FC_XID_UNKNOWN)
1178 ep->rxid = ntohs(fh->fh_rx_id);
1179 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1180 atomic_inc(&mp->stats.xid_not_found);
1181 goto rel;
1182 }
1183 if (ep->did != ntoh24(fh->fh_s_id) &&
1184 ep->did != FC_FID_FLOGI) {
1185 atomic_inc(&mp->stats.xid_not_found);
1186 goto rel;
1187 }
1188 sof = fr_sof(fp);
1189 if (fc_sof_is_init(sof)) {
1190 sp = fc_seq_start_next(&ep->seq);
1191 sp->id = fh->fh_seq_id;
1192 sp->ssb_stat |= SSB_ST_RESP;
1193 } else {
1194 sp = &ep->seq;
1195 if (sp->id != fh->fh_seq_id) {
1196 atomic_inc(&mp->stats.seq_not_found);
1197 goto rel;
1198 }
1199 }
1200 f_ctl = ntoh24(fh->fh_f_ctl);
1201 fr_seq(fp) = sp;
1202 if (f_ctl & FC_FC_SEQ_INIT)
1203 ep->esb_stat |= ESB_ST_SEQ_INIT;
1204
1205 if (fc_sof_needs_ack(sof))
1206 fc_seq_send_ack(sp, fp);
1207 resp = ep->resp;
1208 ex_resp_arg = ep->arg;
1209
1210 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1211 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1212 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1213 spin_lock_bh(&ep->ex_lock);
1214 rc = fc_exch_done_locked(ep);
1215 WARN_ON(fc_seq_exch(sp) != ep);
1216 spin_unlock_bh(&ep->ex_lock);
1217 if (!rc)
1218 fc_exch_mgr_delete_ep(ep);
1219 }
1220
1221 /*
1222 * Call the receive function.
1223 * The sequence is held (has a refcnt) for us,
1224 * but not for the receive function.
1225 *
1226 * The receive function may allocate a new sequence
1227 * over the old one, so we shouldn't change the
1228 * sequence after this.
1229 *
1230 * The frame will be freed by the receive function.
1231 * If new exch resp handler is valid then call that
1232 * first.
1233 */
1234 if (resp)
1235 resp(sp, fp, ex_resp_arg);
1236 else
1237 fc_frame_free(fp);
1238 fc_exch_release(ep);
1239 return;
1240rel:
1241 fc_exch_release(ep);
1242out:
1243 fc_frame_free(fp);
1244}
1245
1246/*
1247 * Handle receive for a sequence where other end is responding to our sequence.
1248 */
1249static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1250{
1251 struct fc_seq *sp;
1252
1253 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1254 if (!sp) {
1255 atomic_inc(&mp->stats.xid_not_found);
1256 FC_DEBUG_EXCH("seq lookup failed\n");
1257 } else {
1258 atomic_inc(&mp->stats.non_bls_resp);
1259 FC_DEBUG_EXCH("non-BLS response to sequence");
1260 }
1261 fc_frame_free(fp);
1262}
1263
1264/*
1265 * Handle the response to an ABTS for exchange or sequence.
1266 * This can be BA_ACC or BA_RJT.
1267 */
1268static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1269{
1270 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1271 void *ex_resp_arg;
1272 struct fc_frame_header *fh;
1273 struct fc_ba_acc *ap;
1274 struct fc_seq *sp;
1275 u16 low;
1276 u16 high;
1277 int rc = 1, has_rec = 0;
1278
1279 fh = fc_frame_header_get(fp);
1280 FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
1281 fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
1282
1283 if (cancel_delayed_work_sync(&ep->timeout_work))
1284 fc_exch_release(ep); /* release from pending timer hold */
1285
1286 spin_lock_bh(&ep->ex_lock);
1287 switch (fh->fh_r_ctl) {
1288 case FC_RCTL_BA_ACC:
1289 ap = fc_frame_payload_get(fp, sizeof(*ap));
1290 if (!ap)
1291 break;
1292
1293 /*
1294 * Decide whether to establish a Recovery Qualifier.
1295 * We do this if there is a non-empty SEQ_CNT range and
1296 * SEQ_ID is the same as the one we aborted.
1297 */
1298 low = ntohs(ap->ba_low_seq_cnt);
1299 high = ntohs(ap->ba_high_seq_cnt);
1300 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1301 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1302 ap->ba_seq_id == ep->seq_id) && low != high) {
1303 ep->esb_stat |= ESB_ST_REC_QUAL;
1304 fc_exch_hold(ep); /* hold for recovery qualifier */
1305 has_rec = 1;
1306 }
1307 break;
1308 case FC_RCTL_BA_RJT:
1309 break;
1310 default:
1311 break;
1312 }
1313
1314 resp = ep->resp;
1315 ex_resp_arg = ep->arg;
1316
1317 /* do we need to do some other checks here. Can we reuse more of
1318 * fc_exch_recv_seq_resp
1319 */
1320 sp = &ep->seq;
1321 /*
1322 * do we want to check END_SEQ as well as LAST_SEQ here?
1323 */
1324 if (ep->fh_type != FC_TYPE_FCP &&
1325 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1326 rc = fc_exch_done_locked(ep);
1327 spin_unlock_bh(&ep->ex_lock);
1328 if (!rc)
1329 fc_exch_mgr_delete_ep(ep);
1330
1331 if (resp)
1332 resp(sp, fp, ex_resp_arg);
1333 else
1334 fc_frame_free(fp);
1335
1336 if (has_rec)
1337 fc_exch_timer_set(ep, ep->r_a_tov);
1338
1339}
1340
1341/*
1342 * Receive BLS sequence.
1343 * This is always a sequence initiated by the remote side.
1344 * We may be either the originator or recipient of the exchange.
1345 */
1346static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1347{
1348 struct fc_frame_header *fh;
1349 struct fc_exch *ep;
1350 u32 f_ctl;
1351
1352 fh = fc_frame_header_get(fp);
1353 f_ctl = ntoh24(fh->fh_f_ctl);
1354 fr_seq(fp) = NULL;
1355
1356 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1357 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1358 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1359 spin_lock_bh(&ep->ex_lock);
1360 ep->esb_stat |= ESB_ST_SEQ_INIT;
1361 spin_unlock_bh(&ep->ex_lock);
1362 }
1363 if (f_ctl & FC_FC_SEQ_CTX) {
1364 /*
1365 * A response to a sequence we initiated.
1366 * This should only be ACKs for class 2 or F.
1367 */
1368 switch (fh->fh_r_ctl) {
1369 case FC_RCTL_ACK_1:
1370 case FC_RCTL_ACK_0:
1371 break;
1372 default:
1373 FC_DEBUG_EXCH("BLS rctl %x - %s received",
1374 fh->fh_r_ctl,
1375 fc_exch_rctl_name(fh->fh_r_ctl));
1376 break;
1377 }
1378 fc_frame_free(fp);
1379 } else {
1380 switch (fh->fh_r_ctl) {
1381 case FC_RCTL_BA_RJT:
1382 case FC_RCTL_BA_ACC:
1383 if (ep)
1384 fc_exch_abts_resp(ep, fp);
1385 else
1386 fc_frame_free(fp);
1387 break;
1388 case FC_RCTL_BA_ABTS:
1389 fc_exch_recv_abts(ep, fp);
1390 break;
1391 default: /* ignore junk */
1392 fc_frame_free(fp);
1393 break;
1394 }
1395 }
1396 if (ep)
1397 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1398}
1399
1400/*
1401 * Accept sequence with LS_ACC.
1402 * If this fails due to allocation or transmit congestion, assume the
1403 * originator will repeat the sequence.
1404 */
1405static void fc_seq_ls_acc(struct fc_seq *req_sp)
1406{
1407 struct fc_seq *sp;
1408 struct fc_els_ls_acc *acc;
1409 struct fc_frame *fp;
1410
1411 sp = fc_seq_start_next(req_sp);
1412 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1413 if (fp) {
1414 acc = fc_frame_payload_get(fp, sizeof(*acc));
1415 memset(acc, 0, sizeof(*acc));
1416 acc->la_cmd = ELS_LS_ACC;
1417 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1418 }
1419}
1420
1421/*
1422 * Reject sequence with ELS LS_RJT.
1423 * If this fails due to allocation or transmit congestion, assume the
1424 * originator will repeat the sequence.
1425 */
1426static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1427 enum fc_els_rjt_explan explan)
1428{
1429 struct fc_seq *sp;
1430 struct fc_els_ls_rjt *rjt;
1431 struct fc_frame *fp;
1432
1433 sp = fc_seq_start_next(req_sp);
1434 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1435 if (fp) {
1436 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1437 memset(rjt, 0, sizeof(*rjt));
1438 rjt->er_cmd = ELS_LS_RJT;
1439 rjt->er_reason = reason;
1440 rjt->er_explan = explan;
1441 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1442 }
1443}
1444
1445static void fc_exch_reset(struct fc_exch *ep)
1446{
1447 struct fc_seq *sp;
1448 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1449 void *arg;
1450 int rc = 1;
1451
1452 spin_lock_bh(&ep->ex_lock);
1453 ep->state |= FC_EX_RST_CLEANUP;
1454 /*
1455 * we really want to call del_timer_sync, but cannot due
1456 * to the lport calling with the lport lock held (some resp
1457 * functions can also grab the lport lock which could cause
1458 * a deadlock).
1459 */
1460 if (cancel_delayed_work(&ep->timeout_work))
1461 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1462 resp = ep->resp;
1463 ep->resp = NULL;
1464 if (ep->esb_stat & ESB_ST_REC_QUAL)
1465 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1466 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1467 arg = ep->arg;
1468 sp = &ep->seq;
1469 rc = fc_exch_done_locked(ep);
1470 spin_unlock_bh(&ep->ex_lock);
1471 if (!rc)
1472 fc_exch_mgr_delete_ep(ep);
1473
1474 if (resp)
1475 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1476}
1477
1478/*
1479 * Reset an exchange manager, releasing all sequences and exchanges.
1480 * If sid is non-zero, reset only exchanges we source from that FID.
1481 * If did is non-zero, reset only exchanges destined to that FID.
1482 */
1483void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
1484{
1485 struct fc_exch *ep;
1486 struct fc_exch *next;
1487
1488 spin_lock_bh(&mp->em_lock);
1489restart:
1490 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1491 if ((sid == 0 || sid == ep->sid) &&
1492 (did == 0 || did == ep->did)) {
1493 fc_exch_hold(ep);
1494 spin_unlock_bh(&mp->em_lock);
1495
1496 fc_exch_reset(ep);
1497
1498 fc_exch_release(ep);
1499 spin_lock_bh(&mp->em_lock);
1500
1501 /*
1502 * must restart loop incase while lock was down
1503 * multiple eps were released.
1504 */
1505 goto restart;
1506 }
1507 }
1508 spin_unlock_bh(&mp->em_lock);
1509}
1510EXPORT_SYMBOL(fc_exch_mgr_reset);
1511
1512/*
1513 * Handle incoming ELS REC - Read Exchange Concise.
1514 * Note that the requesting port may be different than the S_ID in the request.
1515 */
1516static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1517{
1518 struct fc_frame *fp;
1519 struct fc_exch *ep;
1520 struct fc_exch_mgr *em;
1521 struct fc_els_rec *rp;
1522 struct fc_els_rec_acc *acc;
1523 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1524 enum fc_els_rjt_explan explan;
1525 u32 sid;
1526 u16 rxid;
1527 u16 oxid;
1528
1529 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1530 explan = ELS_EXPL_INV_LEN;
1531 if (!rp)
1532 goto reject;
1533 sid = ntoh24(rp->rec_s_id);
1534 rxid = ntohs(rp->rec_rx_id);
1535 oxid = ntohs(rp->rec_ox_id);
1536
1537 /*
1538 * Currently it's hard to find the local S_ID from the exchange
1539 * manager. This will eventually be fixed, but for now it's easier
1540 * to lookup the subject exchange twice, once as if we were
1541 * the initiator, and then again if we weren't.
1542 */
1543 em = fc_seq_exch(sp)->em;
1544 ep = fc_exch_find(em, oxid);
1545 explan = ELS_EXPL_OXID_RXID;
1546 if (ep && ep->oid == sid) {
1547 if (ep->rxid != FC_XID_UNKNOWN &&
1548 rxid != FC_XID_UNKNOWN &&
1549 ep->rxid != rxid)
1550 goto rel;
1551 } else {
1552 if (ep)
1553 fc_exch_release(ep);
1554 ep = NULL;
1555 if (rxid != FC_XID_UNKNOWN)
1556 ep = fc_exch_find(em, rxid);
1557 if (!ep)
1558 goto reject;
1559 }
1560
1561 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1562 if (!fp) {
1563 fc_exch_done(sp);
1564 goto out;
1565 }
1566 sp = fc_seq_start_next(sp);
1567 acc = fc_frame_payload_get(fp, sizeof(*acc));
1568 memset(acc, 0, sizeof(*acc));
1569 acc->reca_cmd = ELS_LS_ACC;
1570 acc->reca_ox_id = rp->rec_ox_id;
1571 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1572 acc->reca_rx_id = htons(ep->rxid);
1573 if (ep->sid == ep->oid)
1574 hton24(acc->reca_rfid, ep->did);
1575 else
1576 hton24(acc->reca_rfid, ep->sid);
1577 acc->reca_fc4value = htonl(ep->seq.rec_data);
1578 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1579 ESB_ST_SEQ_INIT |
1580 ESB_ST_COMPLETE));
1581 sp = fc_seq_start_next(sp);
1582 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1583out:
1584 fc_exch_release(ep);
1585 fc_frame_free(rfp);
1586 return;
1587
1588rel:
1589 fc_exch_release(ep);
1590reject:
1591 fc_seq_ls_rjt(sp, reason, explan);
1592 fc_frame_free(rfp);
1593}
1594
1595/*
1596 * Handle response from RRQ.
1597 * Not much to do here, really.
1598 * Should report errors.
1599 *
1600 * TODO: fix error handler.
1601 */
1602static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1603{
1604 struct fc_exch *aborted_ep = arg;
1605 unsigned int op;
1606
1607 if (IS_ERR(fp)) {
1608 int err = PTR_ERR(fp);
1609
1610 if (err == -FC_EX_CLOSED)
1611 goto cleanup;
1612 FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
1613 return;
1614 }
1615
1616 op = fc_frame_payload_op(fp);
1617 fc_frame_free(fp);
1618
1619 switch (op) {
1620 case ELS_LS_RJT:
1621 FC_DBG("LS_RJT for RRQ");
1622 /* fall through */
1623 case ELS_LS_ACC:
1624 goto cleanup;
1625 default:
1626 FC_DBG("unexpected response op %x for RRQ", op);
1627 return;
1628 }
1629
1630cleanup:
1631 fc_exch_done(&aborted_ep->seq);
1632 /* drop hold for rec qual */
1633 fc_exch_release(aborted_ep);
1634}
1635
1636/*
1637 * Send ELS RRQ - Reinstate Recovery Qualifier.
1638 * This tells the remote port to stop blocking the use of
1639 * the exchange and the seq_cnt range.
1640 */
1641static void fc_exch_rrq(struct fc_exch *ep)
1642{
1643 struct fc_lport *lp;
1644 struct fc_els_rrq *rrq;
1645 struct fc_frame *fp;
1646 struct fc_seq *rrq_sp;
1647 u32 did;
1648
1649 lp = ep->lp;
1650
1651 fp = fc_frame_alloc(lp, sizeof(*rrq));
1652 if (!fp)
1653 return;
1654 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1655 memset(rrq, 0, sizeof(*rrq));
1656 rrq->rrq_cmd = ELS_RRQ;
1657 hton24(rrq->rrq_s_id, ep->sid);
1658 rrq->rrq_ox_id = htons(ep->oxid);
1659 rrq->rrq_rx_id = htons(ep->rxid);
1660
1661 did = ep->did;
1662 if (ep->esb_stat & ESB_ST_RESP)
1663 did = ep->sid;
1664
1665 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1666 fc_host_port_id(lp->host), FC_TYPE_ELS,
1667 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1668
1669 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
1670 lp->e_d_tov);
1671 if (!rrq_sp) {
1672 ep->esb_stat |= ESB_ST_REC_QUAL;
1673 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1674 return;
1675 }
1676}
1677
1678
1679/*
1680 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1681 */
1682static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1683{
1684 struct fc_exch *ep; /* request or subject exchange */
1685 struct fc_els_rrq *rp;
1686 u32 sid;
1687 u16 xid;
1688 enum fc_els_rjt_explan explan;
1689
1690 rp = fc_frame_payload_get(fp, sizeof(*rp));
1691 explan = ELS_EXPL_INV_LEN;
1692 if (!rp)
1693 goto reject;
1694
1695 /*
1696 * lookup subject exchange.
1697 */
1698 ep = fc_seq_exch(sp);
1699 sid = ntoh24(rp->rrq_s_id); /* subject source */
1700 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1701 ep = fc_exch_find(ep->em, xid);
1702
1703 explan = ELS_EXPL_OXID_RXID;
1704 if (!ep)
1705 goto reject;
1706 spin_lock_bh(&ep->ex_lock);
1707 if (ep->oxid != ntohs(rp->rrq_ox_id))
1708 goto unlock_reject;
1709 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1710 ep->rxid != FC_XID_UNKNOWN)
1711 goto unlock_reject;
1712 explan = ELS_EXPL_SID;
1713 if (ep->sid != sid)
1714 goto unlock_reject;
1715
1716 /*
1717 * Clear Recovery Qualifier state, and cancel timer if complete.
1718 */
1719 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1720 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1721 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1722 }
1723 if (ep->esb_stat & ESB_ST_COMPLETE) {
1724 if (cancel_delayed_work(&ep->timeout_work))
1725 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1726 }
1727
1728 spin_unlock_bh(&ep->ex_lock);
1729
1730 /*
1731 * Send LS_ACC.
1732 */
1733 fc_seq_ls_acc(sp);
1734 fc_frame_free(fp);
1735 return;
1736
1737unlock_reject:
1738 spin_unlock_bh(&ep->ex_lock);
1739 fc_exch_release(ep); /* drop hold from fc_exch_find */
1740reject:
1741 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1742 fc_frame_free(fp);
1743}
1744
1745struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1746 enum fc_class class,
1747 u16 min_xid, u16 max_xid)
1748{
1749 struct fc_exch_mgr *mp;
1750 size_t len;
1751
1752 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
1753 FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1754 min_xid, max_xid);
1755 return NULL;
1756 }
1757
1758 /*
1759 * Memory need for EM
1760 */
1761#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1762 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1763 len += sizeof(struct fc_exch_mgr);
1764
1765 mp = kzalloc(len, GFP_ATOMIC);
1766 if (!mp)
1767 return NULL;
1768
1769 mp->class = class;
1770 mp->total_exches = 0;
1771 mp->exches = (struct fc_exch **)(mp + 1);
1772 mp->lp = lp;
1773 /* adjust em exch xid range for offload */
1774 mp->min_xid = min_xid;
1775 mp->max_xid = max_xid;
1776 mp->last_xid = min_xid - 1;
1777 mp->max_read = 0;
1778 mp->last_read = 0;
1779 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1780 mp->max_read = lp->lro_xid;
1781 mp->last_read = min_xid - 1;
1782 mp->last_xid = mp->max_read;
1783 } else {
1784 /* disable lro if no xid control over read */
1785 lp->lro_enabled = 0;
1786 }
1787
1788 INIT_LIST_HEAD(&mp->ex_list);
1789 spin_lock_init(&mp->em_lock);
1790
1791 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1792 if (!mp->ep_pool)
1793 goto free_mp;
1794
1795 return mp;
1796
1797free_mp:
1798 kfree(mp);
1799 return NULL;
1800}
1801EXPORT_SYMBOL(fc_exch_mgr_alloc);
1802
1803void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1804{
1805 WARN_ON(!mp);
1806 /*
1807 * The total exch count must be zero
1808 * before freeing exchange manager.
1809 */
1810 WARN_ON(mp->total_exches != 0);
1811 mempool_destroy(mp->ep_pool);
1812 kfree(mp);
1813}
1814EXPORT_SYMBOL(fc_exch_mgr_free);
1815
1816struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1817{
1818 if (!lp || !lp->emp)
1819 return NULL;
1820
1821 return fc_exch_alloc(lp->emp, fp, 0);
1822}
1823EXPORT_SYMBOL(fc_exch_get);
1824
1825struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1826 struct fc_frame *fp,
1827 void (*resp)(struct fc_seq *,
1828 struct fc_frame *fp,
1829 void *arg),
1830 void (*destructor)(struct fc_seq *, void *),
1831 void *arg, u32 timer_msec)
1832{
1833 struct fc_exch *ep;
1834 struct fc_seq *sp = NULL;
1835 struct fc_frame_header *fh;
1836 int rc = 1;
1837
1838 ep = lp->tt.exch_get(lp, fp);
1839 if (!ep) {
1840 fc_frame_free(fp);
1841 return NULL;
1842 }
1843 ep->esb_stat |= ESB_ST_SEQ_INIT;
1844 fh = fc_frame_header_get(fp);
1845 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1846 ep->resp = resp;
1847 ep->destructor = destructor;
1848 ep->arg = arg;
1849 ep->r_a_tov = FC_DEF_R_A_TOV;
1850 ep->lp = lp;
1851 sp = &ep->seq;
1852
1853 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1854 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1855 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1856 sp->cnt++;
1857
1858 if (unlikely(lp->tt.frame_send(lp, fp)))
1859 goto err;
1860
1861 if (timer_msec)
1862 fc_exch_timer_set_locked(ep, timer_msec);
1863 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1864
1865 if (ep->f_ctl & FC_FC_SEQ_INIT)
1866 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1867 spin_unlock_bh(&ep->ex_lock);
1868 return sp;
1869err:
1870 rc = fc_exch_done_locked(ep);
1871 spin_unlock_bh(&ep->ex_lock);
1872 if (!rc)
1873 fc_exch_mgr_delete_ep(ep);
1874 return NULL;
1875}
1876EXPORT_SYMBOL(fc_exch_seq_send);
1877
1878/*
1879 * Receive a frame
1880 */
1881void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1882 struct fc_frame *fp)
1883{
1884 struct fc_frame_header *fh = fc_frame_header_get(fp);
1885 u32 f_ctl;
1886
1887 /* lport lock ? */
1888 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
1889 FC_DBG("fc_lport or EM is not allocated and configured");
1890 fc_frame_free(fp);
1891 return;
1892 }
1893
1894 /*
1895 * If frame is marked invalid, just drop it.
1896 */
1897 f_ctl = ntoh24(fh->fh_f_ctl);
1898 switch (fr_eof(fp)) {
1899 case FC_EOF_T:
1900 if (f_ctl & FC_FC_END_SEQ)
1901 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1902 /* fall through */
1903 case FC_EOF_N:
1904 if (fh->fh_type == FC_TYPE_BLS)
1905 fc_exch_recv_bls(mp, fp);
1906 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1907 FC_FC_EX_CTX)
1908 fc_exch_recv_seq_resp(mp, fp);
1909 else if (f_ctl & FC_FC_SEQ_CTX)
1910 fc_exch_recv_resp(mp, fp);
1911 else
1912 fc_exch_recv_req(lp, mp, fp);
1913 break;
1914 default:
1915 FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
1916 fc_frame_free(fp);
1917 break;
1918 }
1919}
1920EXPORT_SYMBOL(fc_exch_recv);
1921
1922int fc_exch_init(struct fc_lport *lp)
1923{
1924 if (!lp->tt.exch_get) {
1925 /*
1926 * exch_put() should be NULL if
1927 * exch_get() is NULL
1928 */
1929 WARN_ON(lp->tt.exch_put);
1930 lp->tt.exch_get = fc_exch_get;
1931 }
1932
1933 if (!lp->tt.seq_start_next)
1934 lp->tt.seq_start_next = fc_seq_start_next;
1935
1936 if (!lp->tt.exch_seq_send)
1937 lp->tt.exch_seq_send = fc_exch_seq_send;
1938
1939 if (!lp->tt.seq_send)
1940 lp->tt.seq_send = fc_seq_send;
1941
1942 if (!lp->tt.seq_els_rsp_send)
1943 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1944
1945 if (!lp->tt.exch_done)
1946 lp->tt.exch_done = fc_exch_done;
1947
1948 if (!lp->tt.exch_mgr_reset)
1949 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1950
1951 if (!lp->tt.seq_exch_abort)
1952 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1953
1954 return 0;
1955}
1956EXPORT_SYMBOL(fc_exch_init);
1957
1958int fc_setup_exch_mgr(void)
1959{
1960 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1961 0, SLAB_HWCACHE_ALIGN, NULL);
1962 if (!fc_em_cachep)
1963 return -ENOMEM;
1964 return 0;
1965}
1966
1967void fc_destroy_exch_mgr(void)
1968{
1969 kmem_cache_destroy(fc_em_cachep);
1970}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000000..404e63ff46b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/spinlock.h>
27#include <linux/scatterlist.h>
28#include <linux/err.h>
29#include <linux/crc32.h>
30
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36
37#include <scsi/fc/fc_fc2.h>
38
39#include <scsi/libfc.h>
40#include <scsi/fc_encode.h>
41
42MODULE_AUTHOR("Open-FCoE.org");
43MODULE_DESCRIPTION("libfc");
44MODULE_LICENSE("GPL");
45
46static int fc_fcp_debug;
47
48#define FC_DEBUG_FCP(fmt...) \
49 do { \
50 if (fc_fcp_debug) \
51 FC_DBG(fmt); \
52 } while (0)
53
54static struct kmem_cache *scsi_pkt_cachep;
55
56/* SRB state definitions */
57#define FC_SRB_FREE 0 /* cmd is free */
58#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
59#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
60#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
61#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
62#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
63#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
64#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
65#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
66
67#define FC_SRB_READ (1 << 1)
68#define FC_SRB_WRITE (1 << 0)
69
70/*
71 * The SCp.ptr should be tested and set under the host lock. NULL indicates
72 * that the command has been retruned to the scsi layer.
73 */
74#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
75#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
76#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
77#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
78#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
79
80struct fc_fcp_internal {
81 mempool_t *scsi_pkt_pool;
82 struct list_head scsi_pkt_queue;
83 u8 throttled;
84};
85
86#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
87
88/*
89 * function prototypes
90 * FC scsi I/O related functions
91 */
92static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
93static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
94static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
95static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
96static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
97static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
98static void fc_timeout_error(struct fc_fcp_pkt *);
99static void fc_fcp_timeout(unsigned long data);
100static void fc_fcp_rec(struct fc_fcp_pkt *);
101static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
102static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
103static void fc_io_compl(struct fc_fcp_pkt *);
104
105static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
106static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
107static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
108
109/*
110 * command status codes
111 */
112#define FC_COMPLETE 0
113#define FC_CMD_ABORTED 1
114#define FC_CMD_RESET 2
115#define FC_CMD_PLOGO 3
116#define FC_SNS_RCV 4
117#define FC_TRANS_ERR 5
118#define FC_DATA_OVRRUN 6
119#define FC_DATA_UNDRUN 7
120#define FC_ERROR 8
121#define FC_HRD_ERROR 9
122#define FC_CMD_TIME_OUT 10
123
124/*
125 * Error recovery timeout values.
126 */
127#define FC_SCSI_ER_TIMEOUT (10 * HZ)
128#define FC_SCSI_TM_TOV (10 * HZ)
129#define FC_SCSI_REC_TOV (2 * HZ)
130#define FC_HOST_RESET_TIMEOUT (30 * HZ)
131
132#define FC_MAX_ERROR_CNT 5
133#define FC_MAX_RECOV_RETRY 3
134
135#define FC_FCP_DFLT_QUEUE_DEPTH 32
136
137/**
138 * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
139 * @lp: fc lport struct
140 * @gfp: gfp flags for allocation
141 *
142 * This is used by upper layer scsi driver.
143 * Return Value : scsi_pkt structure or null on allocation failure.
144 * Context : call from process context. no locking required.
145 */
146static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
147{
148 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
149 struct fc_fcp_pkt *fsp;
150
151 fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
152 if (fsp) {
153 memset(fsp, 0, sizeof(*fsp));
154 fsp->lp = lp;
155 atomic_set(&fsp->ref_cnt, 1);
156 init_timer(&fsp->timer);
157 INIT_LIST_HEAD(&fsp->list);
158 spin_lock_init(&fsp->scsi_pkt_lock);
159 }
160 return fsp;
161}
162
163/**
164 * fc_fcp_pkt_release - release hold on scsi_pkt packet
165 * @fsp: fcp packet struct
166 *
167 * This is used by upper layer scsi driver.
168 * Context : call from process and interrupt context.
169 * no locking required
170 */
171static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
172{
173 if (atomic_dec_and_test(&fsp->ref_cnt)) {
174 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
175
176 mempool_free(fsp, si->scsi_pkt_pool);
177 }
178}
179
180static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
181{
182 atomic_inc(&fsp->ref_cnt);
183}
184
185/**
186 * fc_fcp_pkt_destory - release hold on scsi_pkt packet
187 *
188 * @seq: exchange sequence
189 * @fsp: fcp packet struct
190 *
191 * Release hold on scsi_pkt packet set to keep scsi_pkt
192 * till EM layer exch resource is not freed.
193 * Context : called from from EM layer.
194 * no locking required
195 */
196static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
197{
198 fc_fcp_pkt_release(fsp);
199}
200
201/**
202 * fc_fcp_lock_pkt - lock a packet and get a ref to it.
203 * @fsp: fcp packet
204 *
205 * We should only return error if we return a command to scsi-ml before
206 * getting a response. This can happen in cases where we send a abort, but
207 * do not wait for the response and the abort and command can be passing
208 * each other on the wire/network-layer.
209 *
210 * Note: this function locks the packet and gets a reference to allow
211 * callers to call the completion function while the lock is held and
212 * not have to worry about the packets refcount.
213 *
214 * TODO: Maybe we should just have callers grab/release the lock and
215 * have a function that they call to verify the fsp and grab a ref if
216 * needed.
217 */
218static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
219{
220 spin_lock_bh(&fsp->scsi_pkt_lock);
221 if (fsp->state & FC_SRB_COMPL) {
222 spin_unlock_bh(&fsp->scsi_pkt_lock);
223 return -EPERM;
224 }
225
226 fc_fcp_pkt_hold(fsp);
227 return 0;
228}
229
230static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
231{
232 spin_unlock_bh(&fsp->scsi_pkt_lock);
233 fc_fcp_pkt_release(fsp);
234}
235
236static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
237{
238 if (!(fsp->state & FC_SRB_COMPL))
239 mod_timer(&fsp->timer, jiffies + delay);
240}
241
242static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
243{
244 if (!fsp->seq_ptr)
245 return -EINVAL;
246
247 fsp->state |= FC_SRB_ABORT_PENDING;
248 return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
249}
250
251/*
252 * Retry command.
253 * An abort isn't needed.
254 */
255static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
256{
257 if (fsp->seq_ptr) {
258 fsp->lp->tt.exch_done(fsp->seq_ptr);
259 fsp->seq_ptr = NULL;
260 }
261
262 fsp->state &= ~FC_SRB_ABORT_PENDING;
263 fsp->io_status = SUGGEST_RETRY << 24;
264 fsp->status_code = FC_ERROR;
265 fc_fcp_complete_locked(fsp);
266}
267
268/*
269 * Receive SCSI data from target.
270 * Called after receiving solicited data.
271 */
272static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
273{
274 struct scsi_cmnd *sc = fsp->cmd;
275 struct fc_lport *lp = fsp->lp;
276 struct fcoe_dev_stats *stats;
277 struct fc_frame_header *fh;
278 size_t start_offset;
279 size_t offset;
280 u32 crc;
281 u32 copy_len = 0;
282 size_t len;
283 void *buf;
284 struct scatterlist *sg;
285 size_t remaining;
286
287 fh = fc_frame_header_get(fp);
288 offset = ntohl(fh->fh_parm_offset);
289 start_offset = offset;
290 len = fr_len(fp) - sizeof(*fh);
291 buf = fc_frame_payload_get(fp, 0);
292
293 if (offset + len > fsp->data_len) {
294 /*
295 * this should never happen
296 */
297 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
298 fc_frame_crc_check(fp))
299 goto crc_err;
300 FC_DEBUG_FCP("data received past end. len %zx offset %zx "
301 "data_len %x\n", len, offset, fsp->data_len);
302 fc_fcp_retry_cmd(fsp);
303 return;
304 }
305 if (offset != fsp->xfer_len)
306 fsp->state |= FC_SRB_DISCONTIG;
307
308 crc = 0;
309 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
310 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
311
312 sg = scsi_sglist(sc);
313 remaining = len;
314
315 while (remaining > 0 && sg) {
316 size_t off;
317 void *page_addr;
318 size_t sg_bytes;
319
320 if (offset >= sg->length) {
321 offset -= sg->length;
322 sg = sg_next(sg);
323 continue;
324 }
325 sg_bytes = min(remaining, sg->length - offset);
326
327 /*
328 * The scatterlist item may be bigger than PAGE_SIZE,
329 * but we are limited to mapping PAGE_SIZE at a time.
330 */
331 off = offset + sg->offset;
332 sg_bytes = min(sg_bytes, (size_t)
333 (PAGE_SIZE - (off & ~PAGE_MASK)));
334 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
335 KM_SOFTIRQ0);
336 if (!page_addr)
337 break; /* XXX panic? */
338
339 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
340 crc = crc32(crc, buf, sg_bytes);
341 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
342 sg_bytes);
343
344 kunmap_atomic(page_addr, KM_SOFTIRQ0);
345 buf += sg_bytes;
346 offset += sg_bytes;
347 remaining -= sg_bytes;
348 copy_len += sg_bytes;
349 }
350
351 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
352 buf = fc_frame_payload_get(fp, 0);
353 if (len % 4) {
354 crc = crc32(crc, buf + len, 4 - (len % 4));
355 len += 4 - (len % 4);
356 }
357
358 if (~crc != le32_to_cpu(fr_crc(fp))) {
359crc_err:
360 stats = lp->dev_stats[smp_processor_id()];
361 stats->ErrorFrames++;
362 if (stats->InvalidCRCCount++ < 5)
363 FC_DBG("CRC error on data frame\n");
364 /*
365 * Assume the frame is total garbage.
366 * We may have copied it over the good part
367 * of the buffer.
368 * If so, we need to retry the entire operation.
369 * Otherwise, ignore it.
370 */
371 if (fsp->state & FC_SRB_DISCONTIG)
372 fc_fcp_retry_cmd(fsp);
373 return;
374 }
375 }
376
377 if (fsp->xfer_contig_end == start_offset)
378 fsp->xfer_contig_end += copy_len;
379 fsp->xfer_len += copy_len;
380
381 /*
382 * In the very rare event that this data arrived after the response
383 * and completes the transfer, call the completion handler.
384 */
385 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
386 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
387 fc_fcp_complete_locked(fsp);
388}
389
390/*
391 * fc_fcp_send_data - Send SCSI data to target.
392 * @fsp: ptr to fc_fcp_pkt
393 * @sp: ptr to this sequence
394 * @offset: starting offset for this data request
395 * @seq_blen: the burst length for this data request
396 *
397 * Called after receiving a Transfer Ready data descriptor.
398 * if LLD is capable of seq offload then send down seq_blen
399 * size of data in single frame, otherwise send multiple FC
400 * frames of max FC frame payload supported by target port.
401 *
402 * Returns : 0 for success.
403 */
404static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
405 size_t offset, size_t seq_blen)
406{
407 struct fc_exch *ep;
408 struct scsi_cmnd *sc;
409 struct scatterlist *sg;
410 struct fc_frame *fp = NULL;
411 struct fc_lport *lp = fsp->lp;
412 size_t remaining;
413 size_t t_blen;
414 size_t tlen;
415 size_t sg_bytes;
416 size_t frame_offset, fh_parm_offset;
417 int error;
418 void *data = NULL;
419 void *page_addr;
420 int using_sg = lp->sg_supp;
421 u32 f_ctl;
422
423 WARN_ON(seq_blen <= 0);
424 if (unlikely(offset + seq_blen > fsp->data_len)) {
425 /* this should never happen */
426 FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
427 seq_blen, offset);
428 fc_fcp_send_abort(fsp);
429 return 0;
430 } else if (offset != fsp->xfer_len) {
431 /* Out of Order Data Request - no problem, but unexpected. */
432 FC_DEBUG_FCP("xfer-ready non-contiguous. "
433 "seq_blen %zx offset %zx\n", seq_blen, offset);
434 }
435
436 /*
437 * if LLD is capable of seq_offload then set transport
438 * burst length (t_blen) to seq_blen, otherwise set t_blen
439 * to max FC frame payload previously set in fsp->max_payload.
440 */
441 t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
442 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
443 if (t_blen > 512)
444 t_blen &= ~(512 - 1); /* round down to block size */
445 WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
446 sc = fsp->cmd;
447
448 remaining = seq_blen;
449 fh_parm_offset = frame_offset = offset;
450 tlen = 0;
451 seq = lp->tt.seq_start_next(seq);
452 f_ctl = FC_FC_REL_OFF;
453 WARN_ON(!seq);
454
455 /*
456 * If a get_page()/put_page() will fail, don't use sg lists
457 * in the fc_frame structure.
458 *
459 * The put_page() may be long after the I/O has completed
460 * in the case of FCoE, since the network driver does it
461 * via free_skb(). See the test in free_pages_check().
462 *
463 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
464 */
465 if (using_sg) {
466 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
467 if (page_count(sg_page(sg)) == 0 ||
468 (sg_page(sg)->flags & (1 << PG_lru |
469 1 << PG_private |
470 1 << PG_locked |
471 1 << PG_active |
472 1 << PG_slab |
473 1 << PG_swapcache |
474 1 << PG_writeback |
475 1 << PG_reserved |
476 1 << PG_buddy))) {
477 using_sg = 0;
478 break;
479 }
480 }
481 }
482 sg = scsi_sglist(sc);
483
484 while (remaining > 0 && sg) {
485 if (offset >= sg->length) {
486 offset -= sg->length;
487 sg = sg_next(sg);
488 continue;
489 }
490 if (!fp) {
491 tlen = min(t_blen, remaining);
492
493 /*
494 * TODO. Temporary workaround. fc_seq_send() can't
495 * handle odd lengths in non-linear skbs.
496 * This will be the final fragment only.
497 */
498 if (tlen % 4)
499 using_sg = 0;
500 if (using_sg) {
501 fp = _fc_frame_alloc(lp, 0);
502 if (!fp)
503 return -ENOMEM;
504 } else {
505 fp = fc_frame_alloc(lp, tlen);
506 if (!fp)
507 return -ENOMEM;
508
509 data = (void *)(fr_hdr(fp)) +
510 sizeof(struct fc_frame_header);
511 }
512 fh_parm_offset = frame_offset;
513 fr_max_payload(fp) = fsp->max_payload;
514 }
515 sg_bytes = min(tlen, sg->length - offset);
516 if (using_sg) {
517 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
518 FC_FRAME_SG_LEN);
519 get_page(sg_page(sg));
520 skb_fill_page_desc(fp_skb(fp),
521 skb_shinfo(fp_skb(fp))->nr_frags,
522 sg_page(sg), sg->offset + offset,
523 sg_bytes);
524 fp_skb(fp)->data_len += sg_bytes;
525 fr_len(fp) += sg_bytes;
526 fp_skb(fp)->truesize += PAGE_SIZE;
527 } else {
528 size_t off = offset + sg->offset;
529
530 /*
531 * The scatterlist item may be bigger than PAGE_SIZE,
532 * but we must not cross pages inside the kmap.
533 */
534 sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
535 (off & ~PAGE_MASK)));
536 page_addr = kmap_atomic(sg_page(sg) +
537 (off >> PAGE_SHIFT),
538 KM_SOFTIRQ0);
539 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
540 sg_bytes);
541 kunmap_atomic(page_addr, KM_SOFTIRQ0);
542 data += sg_bytes;
543 }
544 offset += sg_bytes;
545 frame_offset += sg_bytes;
546 tlen -= sg_bytes;
547 remaining -= sg_bytes;
548
549 if (tlen)
550 continue;
551
552 /*
553 * Send sequence with transfer sequence initiative in case
554 * this is last FCP frame of the sequence.
555 */
556 if (remaining == 0)
557 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
558
559 ep = fc_seq_exch(seq);
560 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
561 FC_TYPE_FCP, f_ctl, fh_parm_offset);
562
563 /*
564 * send fragment using for a sequence.
565 */
566 error = lp->tt.seq_send(lp, seq, fp);
567 if (error) {
568 WARN_ON(1); /* send error should be rare */
569 fc_fcp_retry_cmd(fsp);
570 return 0;
571 }
572 fp = NULL;
573 }
574 fsp->xfer_len += seq_blen; /* premature count? */
575 return 0;
576}
577
578static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
579{
580 int ba_done = 1;
581 struct fc_ba_rjt *brp;
582 struct fc_frame_header *fh;
583
584 fh = fc_frame_header_get(fp);
585 switch (fh->fh_r_ctl) {
586 case FC_RCTL_BA_ACC:
587 break;
588 case FC_RCTL_BA_RJT:
589 brp = fc_frame_payload_get(fp, sizeof(*brp));
590 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
591 break;
592 /* fall thru */
593 default:
594 /*
595 * we will let the command timeout
596 * and scsi-ml recover in this case,
597 * therefore cleared the ba_done flag.
598 */
599 ba_done = 0;
600 }
601
602 if (ba_done) {
603 fsp->state |= FC_SRB_ABORTED;
604 fsp->state &= ~FC_SRB_ABORT_PENDING;
605
606 if (fsp->wait_for_comp)
607 complete(&fsp->tm_done);
608 else
609 fc_fcp_complete_locked(fsp);
610 }
611}
612
613/*
614 * fc_fcp_reduce_can_queue - drop can_queue
615 * @lp: lport to drop queueing for
616 *
617 * If we are getting memory allocation failures, then we may
618 * be trying to execute too many commands. We let the running
619 * commands complete or timeout, then try again with a reduced
620 * can_queue. Eventually we will hit the point where we run
621 * on all reserved structs.
622 */
623static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
624{
625 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
626 unsigned long flags;
627 int can_queue;
628
629 spin_lock_irqsave(lp->host->host_lock, flags);
630 if (si->throttled)
631 goto done;
632 si->throttled = 1;
633
634 can_queue = lp->host->can_queue;
635 can_queue >>= 1;
636 if (!can_queue)
637 can_queue = 1;
638 lp->host->can_queue = can_queue;
639 shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
640 "Reducing can_queue to %d.\n", can_queue);
641done:
642 spin_unlock_irqrestore(lp->host->host_lock, flags);
643}
644
645/*
646 * exch mgr calls this routine to process scsi
647 * exchanges.
648 *
649 * Return : None
650 * Context : called from Soft IRQ context
651 * can not called holding list lock
652 */
653static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
654{
655 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
656 struct fc_lport *lp;
657 struct fc_frame_header *fh;
658 struct fcp_txrdy *dd;
659 u8 r_ctl;
660 int rc = 0;
661
662 if (IS_ERR(fp))
663 goto errout;
664
665 fh = fc_frame_header_get(fp);
666 r_ctl = fh->fh_r_ctl;
667 lp = fsp->lp;
668
669 if (!(lp->state & LPORT_ST_READY))
670 goto out;
671 if (fc_fcp_lock_pkt(fsp))
672 goto out;
673 fsp->last_pkt_time = jiffies;
674
675 if (fh->fh_type == FC_TYPE_BLS) {
676 fc_fcp_abts_resp(fsp, fp);
677 goto unlock;
678 }
679
680 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
681 goto unlock;
682
683 if (r_ctl == FC_RCTL_DD_DATA_DESC) {
684 /*
685 * received XFER RDY from the target
686 * need to send data to the target
687 */
688 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
689 dd = fc_frame_payload_get(fp, sizeof(*dd));
690 WARN_ON(!dd);
691
692 rc = fc_fcp_send_data(fsp, seq,
693 (size_t) ntohl(dd->ft_data_ro),
694 (size_t) ntohl(dd->ft_burst_len));
695 if (!rc)
696 seq->rec_data = fsp->xfer_len;
697 else if (rc == -ENOMEM)
698 fsp->state |= FC_SRB_NOMEM;
699 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
700 /*
701 * received a DATA frame
702 * next we will copy the data to the system buffer
703 */
704 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
705 fc_fcp_recv_data(fsp, fp);
706 seq->rec_data = fsp->xfer_contig_end;
707 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
708 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
709
710 fc_fcp_resp(fsp, fp);
711 } else {
712 FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
713 }
714unlock:
715 fc_fcp_unlock_pkt(fsp);
716out:
717 fc_frame_free(fp);
718errout:
719 if (IS_ERR(fp))
720 fc_fcp_error(fsp, fp);
721 else if (rc == -ENOMEM)
722 fc_fcp_reduce_can_queue(lp);
723}
724
725static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
726{
727 struct fc_frame_header *fh;
728 struct fcp_resp *fc_rp;
729 struct fcp_resp_ext *rp_ex;
730 struct fcp_resp_rsp_info *fc_rp_info;
731 u32 plen;
732 u32 expected_len;
733 u32 respl = 0;
734 u32 snsl = 0;
735 u8 flags = 0;
736
737 plen = fr_len(fp);
738 fh = (struct fc_frame_header *)fr_hdr(fp);
739 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
740 goto len_err;
741 plen -= sizeof(*fh);
742 fc_rp = (struct fcp_resp *)(fh + 1);
743 fsp->cdb_status = fc_rp->fr_status;
744 flags = fc_rp->fr_flags;
745 fsp->scsi_comp_flags = flags;
746 expected_len = fsp->data_len;
747
748 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
749 rp_ex = (void *)(fc_rp + 1);
750 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
751 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
752 goto len_err;
753 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
754 if (flags & FCP_RSP_LEN_VAL) {
755 respl = ntohl(rp_ex->fr_rsp_len);
756 if (respl != sizeof(*fc_rp_info))
757 goto len_err;
758 if (fsp->wait_for_comp) {
759 /* Abuse cdb_status for rsp code */
760 fsp->cdb_status = fc_rp_info->rsp_code;
761 complete(&fsp->tm_done);
762 /*
763 * tmfs will not have any scsi cmd so
764 * exit here
765 */
766 return;
767 } else
768 goto err;
769 }
770 if (flags & FCP_SNS_LEN_VAL) {
771 snsl = ntohl(rp_ex->fr_sns_len);
772 if (snsl > SCSI_SENSE_BUFFERSIZE)
773 snsl = SCSI_SENSE_BUFFERSIZE;
774 memcpy(fsp->cmd->sense_buffer,
775 (char *)fc_rp_info + respl, snsl);
776 }
777 }
778 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
779 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
780 goto len_err;
781 if (flags & FCP_RESID_UNDER) {
782 fsp->scsi_resid = ntohl(rp_ex->fr_resid);
783 /*
784 * The cmnd->underflow is the minimum number of
785 * bytes that must be transfered for this
786 * command. Provided a sense condition is not
787 * present, make sure the actual amount
788 * transferred is at least the underflow value
789 * or fail.
790 */
791 if (!(flags & FCP_SNS_LEN_VAL) &&
792 (fc_rp->fr_status == 0) &&
793 (scsi_bufflen(fsp->cmd) -
794 fsp->scsi_resid) < fsp->cmd->underflow)
795 goto err;
796 expected_len -= fsp->scsi_resid;
797 } else {
798 fsp->status_code = FC_ERROR;
799 }
800 }
801 }
802 fsp->state |= FC_SRB_RCV_STATUS;
803
804 /*
805 * Check for missing or extra data frames.
806 */
807 if (unlikely(fsp->xfer_len != expected_len)) {
808 if (fsp->xfer_len < expected_len) {
809 /*
810 * Some data may be queued locally,
811 * Wait a at least one jiffy to see if it is delivered.
812 * If this expires without data, we may do SRR.
813 */
814 fc_fcp_timer_set(fsp, 2);
815 return;
816 }
817 fsp->status_code = FC_DATA_OVRRUN;
818 FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
819 "data len %x\n",
820 fsp->rport->port_id,
821 fsp->xfer_len, expected_len, fsp->data_len);
822 }
823 fc_fcp_complete_locked(fsp);
824 return;
825
826len_err:
827 FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
828 flags, fr_len(fp), respl, snsl);
829err:
830 fsp->status_code = FC_ERROR;
831 fc_fcp_complete_locked(fsp);
832}
833
834/**
835 * fc_fcp_complete_locked - complete processing of a fcp packet
836 * @fsp: fcp packet
837 *
838 * This function may sleep if a timer is pending. The packet lock must be
839 * held, and the host lock must not be held.
840 */
841static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
842{
843 struct fc_lport *lp = fsp->lp;
844 struct fc_seq *seq;
845 struct fc_exch *ep;
846 u32 f_ctl;
847
848 if (fsp->state & FC_SRB_ABORT_PENDING)
849 return;
850
851 if (fsp->state & FC_SRB_ABORTED) {
852 if (!fsp->status_code)
853 fsp->status_code = FC_CMD_ABORTED;
854 } else {
855 /*
856 * Test for transport underrun, independent of response
857 * underrun status.
858 */
859 if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
860 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
861 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
862 fsp->status_code = FC_DATA_UNDRUN;
863 fsp->io_status = SUGGEST_RETRY << 24;
864 }
865 }
866
867 seq = fsp->seq_ptr;
868 if (seq) {
869 fsp->seq_ptr = NULL;
870 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
871 struct fc_frame *conf_frame;
872 struct fc_seq *csp;
873
874 csp = lp->tt.seq_start_next(seq);
875 conf_frame = fc_frame_alloc(fsp->lp, 0);
876 if (conf_frame) {
877 f_ctl = FC_FC_SEQ_INIT;
878 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
879 ep = fc_seq_exch(seq);
880 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
881 ep->did, ep->sid,
882 FC_TYPE_FCP, f_ctl, 0);
883 lp->tt.seq_send(lp, csp, conf_frame);
884 }
885 }
886 lp->tt.exch_done(seq);
887 }
888 fc_io_compl(fsp);
889}
890
891static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
892{
893 struct fc_lport *lp = fsp->lp;
894
895 if (fsp->seq_ptr) {
896 lp->tt.exch_done(fsp->seq_ptr);
897 fsp->seq_ptr = NULL;
898 }
899 fsp->status_code = error;
900}
901
902/**
903 * fc_fcp_cleanup_each_cmd - run fn on each active command
904 * @lp: logical port
905 * @id: target id
906 * @lun: lun
907 * @error: fsp status code
908 *
909 * If lun or id is -1, they are ignored.
910 */
911static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
912 unsigned int lun, int error)
913{
914 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
915 struct fc_fcp_pkt *fsp;
916 struct scsi_cmnd *sc_cmd;
917 unsigned long flags;
918
919 spin_lock_irqsave(lp->host->host_lock, flags);
920restart:
921 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
922 sc_cmd = fsp->cmd;
923 if (id != -1 && scmd_id(sc_cmd) != id)
924 continue;
925
926 if (lun != -1 && sc_cmd->device->lun != lun)
927 continue;
928
929 fc_fcp_pkt_hold(fsp);
930 spin_unlock_irqrestore(lp->host->host_lock, flags);
931
932 if (!fc_fcp_lock_pkt(fsp)) {
933 fc_fcp_cleanup_cmd(fsp, error);
934 fc_io_compl(fsp);
935 fc_fcp_unlock_pkt(fsp);
936 }
937
938 fc_fcp_pkt_release(fsp);
939 spin_lock_irqsave(lp->host->host_lock, flags);
940 /*
941 * while we dropped the lock multiple pkts could
942 * have been released, so we have to start over.
943 */
944 goto restart;
945 }
946 spin_unlock_irqrestore(lp->host->host_lock, flags);
947}
948
949static void fc_fcp_abort_io(struct fc_lport *lp)
950{
951 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
952}
953
954/**
955 * fc_fcp_pkt_send - send a fcp packet to the lower level.
956 * @lp: fc lport
957 * @fsp: fc packet.
958 *
959 * This is called by upper layer protocol.
960 * Return : zero for success and -1 for failure
961 * Context : called from queuecommand which can be called from process
962 * or scsi soft irq.
963 * Locks : called with the host lock and irqs disabled.
964 */
965static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
966{
967 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
968 int rc;
969
970 fsp->cmd->SCp.ptr = (char *)fsp;
971 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
972 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
973
974 int_to_scsilun(fsp->cmd->device->lun,
975 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
976 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
977 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
978
979 spin_unlock_irq(lp->host->host_lock);
980 rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
981 spin_lock_irq(lp->host->host_lock);
982 if (rc)
983 list_del(&fsp->list);
984
985 return rc;
986}
987
988static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
989 void (*resp)(struct fc_seq *,
990 struct fc_frame *fp,
991 void *arg))
992{
993 struct fc_frame *fp;
994 struct fc_seq *seq;
995 struct fc_rport *rport;
996 struct fc_rport_libfc_priv *rp;
997 const size_t len = sizeof(fsp->cdb_cmd);
998 int rc = 0;
999
1000 if (fc_fcp_lock_pkt(fsp))
1001 return 0;
1002
1003 fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
1004 if (!fp) {
1005 rc = -1;
1006 goto unlock;
1007 }
1008
1009 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1010 fr_cmd(fp) = fsp->cmd;
1011 rport = fsp->rport;
1012 fsp->max_payload = rport->maxframe_size;
1013 rp = rport->dd_data;
1014
1015 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1016 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1017 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1018
1019 seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
1020 if (!seq) {
1021 fc_frame_free(fp);
1022 rc = -1;
1023 goto unlock;
1024 }
1025 fsp->last_pkt_time = jiffies;
1026 fsp->seq_ptr = seq;
1027 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1028
1029 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
1030 fc_fcp_timer_set(fsp,
1031 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
1032 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
1033unlock:
1034 fc_fcp_unlock_pkt(fsp);
1035 return rc;
1036}
1037
1038/*
1039 * transport error handler
1040 */
1041static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1042{
1043 int error = PTR_ERR(fp);
1044
1045 if (fc_fcp_lock_pkt(fsp))
1046 return;
1047
1048 switch (error) {
1049 case -FC_EX_CLOSED:
1050 fc_fcp_retry_cmd(fsp);
1051 goto unlock;
1052 default:
1053 FC_DBG("unknown error %ld\n", PTR_ERR(fp));
1054 }
1055 /*
1056 * clear abort pending, because the lower layer
1057 * decided to force completion.
1058 */
1059 fsp->state &= ~FC_SRB_ABORT_PENDING;
1060 fsp->status_code = FC_CMD_PLOGO;
1061 fc_fcp_complete_locked(fsp);
1062unlock:
1063 fc_fcp_unlock_pkt(fsp);
1064}
1065
1066/*
1067 * Scsi abort handler- calls to send an abort
1068 * and then wait for abort completion
1069 */
1070static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
1071{
1072 int rc = FAILED;
1073
1074 if (fc_fcp_send_abort(fsp))
1075 return FAILED;
1076
1077 init_completion(&fsp->tm_done);
1078 fsp->wait_for_comp = 1;
1079
1080 spin_unlock_bh(&fsp->scsi_pkt_lock);
1081 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1082 spin_lock_bh(&fsp->scsi_pkt_lock);
1083 fsp->wait_for_comp = 0;
1084
1085 if (!rc) {
1086 FC_DBG("target abort cmd failed\n");
1087 rc = FAILED;
1088 } else if (fsp->state & FC_SRB_ABORTED) {
1089 FC_DBG("target abort cmd passed\n");
1090 rc = SUCCESS;
1091 fc_fcp_complete_locked(fsp);
1092 }
1093
1094 return rc;
1095}
1096
1097/*
1098 * Retry LUN reset after resource allocation failed.
1099 */
1100static void fc_lun_reset_send(unsigned long data)
1101{
1102 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1103 struct fc_lport *lp = fsp->lp;
1104 if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
1105 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
1106 return;
1107 if (fc_fcp_lock_pkt(fsp))
1108 return;
1109 setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
1110 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1111 fc_fcp_unlock_pkt(fsp);
1112 }
1113}
1114
1115/*
1116 * Scsi device reset handler- send a LUN RESET to the device
1117 * and wait for reset reply
1118 */
1119static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
1120 unsigned int id, unsigned int lun)
1121{
1122 int rc;
1123
1124 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
1125 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
1126 int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1127
1128 fsp->wait_for_comp = 1;
1129 init_completion(&fsp->tm_done);
1130
1131 fc_lun_reset_send((unsigned long)fsp);
1132
1133 /*
1134 * wait for completion of reset
1135 * after that make sure all commands are terminated
1136 */
1137 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
1138
1139 spin_lock_bh(&fsp->scsi_pkt_lock);
1140 fsp->state |= FC_SRB_COMPL;
1141 spin_unlock_bh(&fsp->scsi_pkt_lock);
1142
1143 del_timer_sync(&fsp->timer);
1144
1145 spin_lock_bh(&fsp->scsi_pkt_lock);
1146 if (fsp->seq_ptr) {
1147 lp->tt.exch_done(fsp->seq_ptr);
1148 fsp->seq_ptr = NULL;
1149 }
1150 fsp->wait_for_comp = 0;
1151 spin_unlock_bh(&fsp->scsi_pkt_lock);
1152
1153 if (!rc) {
1154 FC_DBG("lun reset failed\n");
1155 return FAILED;
1156 }
1157
1158 /* cdb_status holds the tmf's rsp code */
1159 if (fsp->cdb_status != FCP_TMF_CMPL)
1160 return FAILED;
1161
1162 FC_DBG("lun reset to lun %u completed\n", lun);
1163 fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
1164 return SUCCESS;
1165}
1166
1167/*
1168 * Task Managment response handler
1169 */
1170static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1171{
1172 struct fc_fcp_pkt *fsp = arg;
1173 struct fc_frame_header *fh;
1174
1175 if (IS_ERR(fp)) {
1176 /*
1177 * If there is an error just let it timeout or wait
1178 * for TMF to be aborted if it timedout.
1179 *
1180 * scsi-eh will escalate for when either happens.
1181 */
1182 return;
1183 }
1184
1185 if (fc_fcp_lock_pkt(fsp))
1186 return;
1187
1188 /*
1189 * raced with eh timeout handler.
1190 */
1191 if (!fsp->seq_ptr || !fsp->wait_for_comp) {
1192 spin_unlock_bh(&fsp->scsi_pkt_lock);
1193 return;
1194 }
1195
1196 fh = fc_frame_header_get(fp);
1197 if (fh->fh_type != FC_TYPE_BLS)
1198 fc_fcp_resp(fsp, fp);
1199 fsp->seq_ptr = NULL;
1200 fsp->lp->tt.exch_done(seq);
1201 fc_frame_free(fp);
1202 fc_fcp_unlock_pkt(fsp);
1203}
1204
1205static void fc_fcp_cleanup(struct fc_lport *lp)
1206{
1207 fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
1208}
1209
1210/*
1211 * fc_fcp_timeout: called by OS timer function.
1212 *
1213 * The timer has been inactivated and must be reactivated if desired
1214 * using fc_fcp_timer_set().
1215 *
1216 * Algorithm:
1217 *
1218 * If REC is supported, just issue it, and return. The REC exchange will
1219 * complete or time out, and recovery can continue at that point.
1220 *
1221 * Otherwise, if the response has been received without all the data,
1222 * it has been ER_TIMEOUT since the response was received.
1223 *
1224 * If the response has not been received,
1225 * we see if data was received recently. If it has been, we continue waiting,
1226 * otherwise, we abort the command.
1227 */
1228static void fc_fcp_timeout(unsigned long data)
1229{
1230 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1231 struct fc_rport *rport = fsp->rport;
1232 struct fc_rport_libfc_priv *rp = rport->dd_data;
1233
1234 if (fc_fcp_lock_pkt(fsp))
1235 return;
1236
1237 if (fsp->cdb_cmd.fc_tm_flags)
1238 goto unlock;
1239
1240 fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
1241
1242 if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
1243 fc_fcp_rec(fsp);
1244 else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
1245 jiffies))
1246 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1247 else if (fsp->state & FC_SRB_RCV_STATUS)
1248 fc_fcp_complete_locked(fsp);
1249 else
1250 fc_timeout_error(fsp);
1251 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1252unlock:
1253 fc_fcp_unlock_pkt(fsp);
1254}
1255
1256/*
1257 * Send a REC ELS request
1258 */
1259static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1260{
1261 struct fc_lport *lp;
1262 struct fc_frame *fp;
1263 struct fc_rport *rport;
1264 struct fc_rport_libfc_priv *rp;
1265
1266 lp = fsp->lp;
1267 rport = fsp->rport;
1268 rp = rport->dd_data;
1269 if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
1270 fsp->status_code = FC_HRD_ERROR;
1271 fsp->io_status = SUGGEST_RETRY << 24;
1272 fc_fcp_complete_locked(fsp);
1273 return;
1274 }
1275 fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
1276 if (!fp)
1277 goto retry;
1278
1279 fr_seq(fp) = fsp->seq_ptr;
1280 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1281 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
1282 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1283 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
1284 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1285 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1286 return;
1287 }
1288 fc_frame_free(fp);
1289retry:
1290 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1291 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1292 else
1293 fc_timeout_error(fsp);
1294}
1295
1296/*
1297 * Receive handler for REC ELS frame
1298 * if it is a reject then let the scsi layer to handle
1299 * the timeout. if it is a LS_ACC then if the io was not completed
1300 * then set the timeout and return otherwise complete the exchange
1301 * and tell the scsi layer to restart the I/O.
1302 */
1303static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1304{
1305 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
1306 struct fc_els_rec_acc *recp;
1307 struct fc_els_ls_rjt *rjt;
1308 u32 e_stat;
1309 u8 opcode;
1310 u32 offset;
1311 enum dma_data_direction data_dir;
1312 enum fc_rctl r_ctl;
1313 struct fc_rport_libfc_priv *rp;
1314
1315 if (IS_ERR(fp)) {
1316 fc_fcp_rec_error(fsp, fp);
1317 return;
1318 }
1319
1320 if (fc_fcp_lock_pkt(fsp))
1321 goto out;
1322
1323 fsp->recov_retry = 0;
1324 opcode = fc_frame_payload_op(fp);
1325 if (opcode == ELS_LS_RJT) {
1326 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1327 switch (rjt->er_reason) {
1328 default:
1329 FC_DEBUG_FCP("device %x unexpected REC reject "
1330 "reason %d expl %d\n",
1331 fsp->rport->port_id, rjt->er_reason,
1332 rjt->er_explan);
1333 /* fall through */
1334 case ELS_RJT_UNSUP:
1335 FC_DEBUG_FCP("device does not support REC\n");
1336 rp = fsp->rport->dd_data;
1337 /*
1338 * if we do not spport RECs or got some bogus
1339 * reason then resetup timer so we check for
1340 * making progress.
1341 */
1342 rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1343 fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
1344 break;
1345 case ELS_RJT_LOGIC:
1346 case ELS_RJT_UNAB:
1347 /*
1348 * If no data transfer, the command frame got dropped
1349 * so we just retry. If data was transferred, we
1350 * lost the response but the target has no record,
1351 * so we abort and retry.
1352 */
1353 if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
1354 fsp->xfer_len == 0) {
1355 fc_fcp_retry_cmd(fsp);
1356 break;
1357 }
1358 fc_timeout_error(fsp);
1359 break;
1360 }
1361 } else if (opcode == ELS_LS_ACC) {
1362 if (fsp->state & FC_SRB_ABORTED)
1363 goto unlock_out;
1364
1365 data_dir = fsp->cmd->sc_data_direction;
1366 recp = fc_frame_payload_get(fp, sizeof(*recp));
1367 offset = ntohl(recp->reca_fc4value);
1368 e_stat = ntohl(recp->reca_e_stat);
1369
1370 if (e_stat & ESB_ST_COMPLETE) {
1371
1372 /*
1373 * The exchange is complete.
1374 *
1375 * For output, we must've lost the response.
1376 * For input, all data must've been sent.
1377 * We lost may have lost the response
1378 * (and a confirmation was requested) and maybe
1379 * some data.
1380 *
1381 * If all data received, send SRR
1382 * asking for response. If partial data received,
1383 * or gaps, SRR requests data at start of gap.
1384 * Recovery via SRR relies on in-order-delivery.
1385 */
1386 if (data_dir == DMA_TO_DEVICE) {
1387 r_ctl = FC_RCTL_DD_CMD_STATUS;
1388 } else if (fsp->xfer_contig_end == offset) {
1389 r_ctl = FC_RCTL_DD_CMD_STATUS;
1390 } else {
1391 offset = fsp->xfer_contig_end;
1392 r_ctl = FC_RCTL_DD_SOL_DATA;
1393 }
1394 fc_fcp_srr(fsp, r_ctl, offset);
1395 } else if (e_stat & ESB_ST_SEQ_INIT) {
1396
1397 /*
1398 * The remote port has the initiative, so just
1399 * keep waiting for it to complete.
1400 */
1401 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1402 } else {
1403
1404 /*
1405 * The exchange is incomplete, we have seq. initiative.
1406 * Lost response with requested confirmation,
1407 * lost confirmation, lost transfer ready or
1408 * lost write data.
1409 *
1410 * For output, if not all data was received, ask
1411 * for transfer ready to be repeated.
1412 *
1413 * If we received or sent all the data, send SRR to
1414 * request response.
1415 *
1416 * If we lost a response, we may have lost some read
1417 * data as well.
1418 */
1419 r_ctl = FC_RCTL_DD_SOL_DATA;
1420 if (data_dir == DMA_TO_DEVICE) {
1421 r_ctl = FC_RCTL_DD_CMD_STATUS;
1422 if (offset < fsp->data_len)
1423 r_ctl = FC_RCTL_DD_DATA_DESC;
1424 } else if (offset == fsp->xfer_contig_end) {
1425 r_ctl = FC_RCTL_DD_CMD_STATUS;
1426 } else if (fsp->xfer_contig_end < offset) {
1427 offset = fsp->xfer_contig_end;
1428 }
1429 fc_fcp_srr(fsp, r_ctl, offset);
1430 }
1431 }
1432unlock_out:
1433 fc_fcp_unlock_pkt(fsp);
1434out:
1435 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1436 fc_frame_free(fp);
1437}
1438
1439/*
1440 * Handle error response or timeout for REC exchange.
1441 */
1442static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1443{
1444 int error = PTR_ERR(fp);
1445
1446 if (fc_fcp_lock_pkt(fsp))
1447 goto out;
1448
1449 switch (error) {
1450 case -FC_EX_CLOSED:
1451 fc_fcp_retry_cmd(fsp);
1452 break;
1453
1454 default:
1455 FC_DBG("REC %p fid %x error unexpected error %d\n",
1456 fsp, fsp->rport->port_id, error);
1457 fsp->status_code = FC_CMD_PLOGO;
1458 /* fall through */
1459
1460 case -FC_EX_TIMEOUT:
1461 /*
1462 * Assume REC or LS_ACC was lost.
1463 * The exchange manager will have aborted REC, so retry.
1464 */
1465 FC_DBG("REC fid %x error error %d retry %d/%d\n",
1466 fsp->rport->port_id, error, fsp->recov_retry,
1467 FC_MAX_RECOV_RETRY);
1468 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1469 fc_fcp_rec(fsp);
1470 else
1471 fc_timeout_error(fsp);
1472 break;
1473 }
1474 fc_fcp_unlock_pkt(fsp);
1475out:
1476 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
1477}
1478
1479/*
1480 * Time out error routine:
1481 * abort's the I/O close the exchange and
1482 * send completion notification to scsi layer
1483 */
1484static void fc_timeout_error(struct fc_fcp_pkt *fsp)
1485{
1486 fsp->status_code = FC_CMD_TIME_OUT;
1487 fsp->cdb_status = 0;
1488 fsp->io_status = 0;
1489 /*
1490 * if this fails then we let the scsi command timer fire and
1491 * scsi-ml escalate.
1492 */
1493 fc_fcp_send_abort(fsp);
1494}
1495
1496/*
1497 * Sequence retransmission request.
1498 * This is called after receiving status but insufficient data, or
1499 * when expecting status but the request has timed out.
1500 */
1501static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1502{
1503 struct fc_lport *lp = fsp->lp;
1504 struct fc_rport *rport;
1505 struct fc_rport_libfc_priv *rp;
1506 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
1507 struct fc_seq *seq;
1508 struct fcp_srr *srr;
1509 struct fc_frame *fp;
1510 u8 cdb_op;
1511
1512 rport = fsp->rport;
1513 rp = rport->dd_data;
1514 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1515
1516 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
1517 goto retry; /* shouldn't happen */
1518 fp = fc_frame_alloc(lp, sizeof(*srr));
1519 if (!fp)
1520 goto retry;
1521
1522 srr = fc_frame_payload_get(fp, sizeof(*srr));
1523 memset(srr, 0, sizeof(*srr));
1524 srr->srr_op = ELS_SRR;
1525 srr->srr_ox_id = htons(ep->oxid);
1526 srr->srr_rx_id = htons(ep->rxid);
1527 srr->srr_r_ctl = r_ctl;
1528 srr->srr_rel_off = htonl(offset);
1529
1530 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1531 fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
1532 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1533
1534 seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
1535 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1536 if (!seq) {
1537 fc_frame_free(fp);
1538 goto retry;
1539 }
1540 fsp->recov_seq = seq;
1541 fsp->xfer_len = offset;
1542 fsp->xfer_contig_end = offset;
1543 fsp->state &= ~FC_SRB_RCV_STATUS;
1544 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
1545 return;
1546retry:
1547 fc_fcp_retry_cmd(fsp);
1548}
1549
1550/*
1551 * Handle response from SRR.
1552 */
1553static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1554{
1555 struct fc_fcp_pkt *fsp = arg;
1556 struct fc_frame_header *fh;
1557
1558 if (IS_ERR(fp)) {
1559 fc_fcp_srr_error(fsp, fp);
1560 return;
1561 }
1562
1563 if (fc_fcp_lock_pkt(fsp))
1564 goto out;
1565
1566 fh = fc_frame_header_get(fp);
1567 /*
1568 * BUG? fc_fcp_srr_error calls exch_done which would release
1569 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
1570 * then fc_exch_timeout would be sending an abort. The exch_done
1571 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
1572 * an abort response though.
1573 */
1574 if (fh->fh_type == FC_TYPE_BLS) {
1575 fc_fcp_unlock_pkt(fsp);
1576 return;
1577 }
1578
1579 fsp->recov_seq = NULL;
1580 switch (fc_frame_payload_op(fp)) {
1581 case ELS_LS_ACC:
1582 fsp->recov_retry = 0;
1583 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1584 break;
1585 case ELS_LS_RJT:
1586 default:
1587 fc_timeout_error(fsp);
1588 break;
1589 }
1590 fc_fcp_unlock_pkt(fsp);
1591 fsp->lp->tt.exch_done(seq);
1592out:
1593 fc_frame_free(fp);
1594 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1595}
1596
1597static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1598{
1599 if (fc_fcp_lock_pkt(fsp))
1600 goto out;
1601 fsp->lp->tt.exch_done(fsp->recov_seq);
1602 fsp->recov_seq = NULL;
1603 switch (PTR_ERR(fp)) {
1604 case -FC_EX_TIMEOUT:
1605 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1606 fc_fcp_rec(fsp);
1607 else
1608 fc_timeout_error(fsp);
1609 break;
1610 case -FC_EX_CLOSED: /* e.g., link failure */
1611 /* fall through */
1612 default:
1613 fc_fcp_retry_cmd(fsp);
1614 break;
1615 }
1616 fc_fcp_unlock_pkt(fsp);
1617out:
1618 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1619}
1620
1621static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
1622{
1623 /* lock ? */
1624 return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
1625}
1626
1627/**
1628 * fc_queuecommand - The queuecommand function of the scsi template
1629 * @cmd: struct scsi_cmnd to be executed
1630 * @done: Callback function to be called when cmd is completed
1631 *
1632 * this is the i/o strategy routine, called by the scsi layer
1633 * this routine is called with holding the host_lock.
1634 */
1635int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1636{
1637 struct fc_lport *lp;
1638 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1639 struct fc_fcp_pkt *fsp;
1640 struct fc_rport_libfc_priv *rp;
1641 int rval;
1642 int rc = 0;
1643 struct fcoe_dev_stats *stats;
1644
1645 lp = shost_priv(sc_cmd->device->host);
1646
1647 rval = fc_remote_port_chkready(rport);
1648 if (rval) {
1649 sc_cmd->result = rval;
1650 done(sc_cmd);
1651 goto out;
1652 }
1653
1654 if (!*(struct fc_remote_port **)rport->dd_data) {
1655 /*
1656 * rport is transitioning from blocked/deleted to
1657 * online
1658 */
1659 sc_cmd->result = DID_IMM_RETRY << 16;
1660 done(sc_cmd);
1661 goto out;
1662 }
1663
1664 rp = rport->dd_data;
1665
1666 if (!fc_fcp_lport_queue_ready(lp)) {
1667 rc = SCSI_MLQUEUE_HOST_BUSY;
1668 goto out;
1669 }
1670
1671 fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
1672 if (fsp == NULL) {
1673 rc = SCSI_MLQUEUE_HOST_BUSY;
1674 goto out;
1675 }
1676
1677 /*
1678 * build the libfc request pkt
1679 */
1680 fsp->cmd = sc_cmd; /* save the cmd */
1681 fsp->lp = lp; /* save the softc ptr */
1682 fsp->rport = rport; /* set the remote port ptr */
1683 sc_cmd->scsi_done = done;
1684
1685 /*
1686 * set up the transfer length
1687 */
1688 fsp->data_len = scsi_bufflen(sc_cmd);
1689 fsp->xfer_len = 0;
1690
1691 /*
1692 * setup the data direction
1693 */
1694 stats = lp->dev_stats[smp_processor_id()];
1695 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1696 fsp->req_flags = FC_SRB_READ;
1697 stats->InputRequests++;
1698 stats->InputMegabytes = fsp->data_len;
1699 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1700 fsp->req_flags = FC_SRB_WRITE;
1701 stats->OutputRequests++;
1702 stats->OutputMegabytes = fsp->data_len;
1703 } else {
1704 fsp->req_flags = 0;
1705 stats->ControlRequests++;
1706 }
1707
1708 fsp->tgt_flags = rp->flags;
1709
1710 init_timer(&fsp->timer);
1711 fsp->timer.data = (unsigned long)fsp;
1712
1713 /*
1714 * send it to the lower layer
1715 * if we get -1 return then put the request in the pending
1716 * queue.
1717 */
1718 rval = fc_fcp_pkt_send(lp, fsp);
1719 if (rval != 0) {
1720 fsp->state = FC_SRB_FREE;
1721 fc_fcp_pkt_release(fsp);
1722 rc = SCSI_MLQUEUE_HOST_BUSY;
1723 }
1724out:
1725 return rc;
1726}
1727EXPORT_SYMBOL(fc_queuecommand);
1728
1729/**
1730 * fc_io_compl - Handle responses for completed commands
1731 * @fsp: scsi packet
1732 *
1733 * Translates a error to a Linux SCSI error.
1734 *
1735 * The fcp packet lock must be held when calling.
1736 */
1737static void fc_io_compl(struct fc_fcp_pkt *fsp)
1738{
1739 struct fc_fcp_internal *si;
1740 struct scsi_cmnd *sc_cmd;
1741 struct fc_lport *lp;
1742 unsigned long flags;
1743
1744 fsp->state |= FC_SRB_COMPL;
1745 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
1746 spin_unlock_bh(&fsp->scsi_pkt_lock);
1747 del_timer_sync(&fsp->timer);
1748 spin_lock_bh(&fsp->scsi_pkt_lock);
1749 }
1750
1751 lp = fsp->lp;
1752 si = fc_get_scsi_internal(lp);
1753 spin_lock_irqsave(lp->host->host_lock, flags);
1754 if (!fsp->cmd) {
1755 spin_unlock_irqrestore(lp->host->host_lock, flags);
1756 return;
1757 }
1758
1759 /*
1760 * if a command timed out while we had to try and throttle IO
1761 * and it is now getting cleaned up, then we are about to
1762 * try again so clear the throttled flag incase we get more
1763 * time outs.
1764 */
1765 if (si->throttled && fsp->state & FC_SRB_NOMEM)
1766 si->throttled = 0;
1767
1768 sc_cmd = fsp->cmd;
1769 fsp->cmd = NULL;
1770
1771 if (!sc_cmd->SCp.ptr) {
1772 spin_unlock_irqrestore(lp->host->host_lock, flags);
1773 return;
1774 }
1775
1776 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1777 switch (fsp->status_code) {
1778 case FC_COMPLETE:
1779 if (fsp->cdb_status == 0) {
1780 /*
1781 * good I/O status
1782 */
1783 sc_cmd->result = DID_OK << 16;
1784 if (fsp->scsi_resid)
1785 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1786 } else if (fsp->cdb_status == QUEUE_FULL) {
1787 struct scsi_device *tmp_sdev;
1788 struct scsi_device *sdev = sc_cmd->device;
1789
1790 shost_for_each_device(tmp_sdev, sdev->host) {
1791 if (tmp_sdev->id != sdev->id)
1792 continue;
1793
1794 if (tmp_sdev->queue_depth > 1) {
1795 scsi_track_queue_full(tmp_sdev,
1796 tmp_sdev->
1797 queue_depth - 1);
1798 }
1799 }
1800 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1801 } else {
1802 /*
1803 * transport level I/O was ok but scsi
1804 * has non zero status
1805 */
1806 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1807 }
1808 break;
1809 case FC_ERROR:
1810 sc_cmd->result = DID_ERROR << 16;
1811 break;
1812 case FC_DATA_UNDRUN:
1813 if (fsp->cdb_status == 0) {
1814 /*
1815 * scsi status is good but transport level
1816 * underrun. for read it should be an error??
1817 */
1818 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
1819 } else {
1820 /*
1821 * scsi got underrun, this is an error
1822 */
1823 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1824 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1825 }
1826 break;
1827 case FC_DATA_OVRRUN:
1828 /*
1829 * overrun is an error
1830 */
1831 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1832 break;
1833 case FC_CMD_ABORTED:
1834 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
1835 break;
1836 case FC_CMD_TIME_OUT:
1837 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1838 break;
1839 case FC_CMD_RESET:
1840 sc_cmd->result = (DID_RESET << 16);
1841 break;
1842 case FC_HRD_ERROR:
1843 sc_cmd->result = (DID_NO_CONNECT << 16);
1844 break;
1845 default:
1846 sc_cmd->result = (DID_ERROR << 16);
1847 break;
1848 }
1849
1850 list_del(&fsp->list);
1851 sc_cmd->SCp.ptr = NULL;
1852 sc_cmd->scsi_done(sc_cmd);
1853 spin_unlock_irqrestore(lp->host->host_lock, flags);
1854
1855 /* release ref from initial allocation in queue command */
1856 fc_fcp_pkt_release(fsp);
1857}
1858
1859/**
1860 * fc_fcp_complete - complete processing of a fcp packet
1861 * @fsp: fcp packet
1862 *
1863 * This function may sleep if a fsp timer is pending.
1864 * The host lock must not be held by caller.
1865 */
1866void fc_fcp_complete(struct fc_fcp_pkt *fsp)
1867{
1868 if (fc_fcp_lock_pkt(fsp))
1869 return;
1870
1871 fc_fcp_complete_locked(fsp);
1872 fc_fcp_unlock_pkt(fsp);
1873}
1874EXPORT_SYMBOL(fc_fcp_complete);
1875
1876/**
1877 * fc_eh_abort - Abort a command...from scsi host template
1878 * @sc_cmd: scsi command to abort
1879 *
1880 * send ABTS to the target device and wait for the response
1881 * sc_cmd is the pointer to the command to be aborted.
1882 */
1883int fc_eh_abort(struct scsi_cmnd *sc_cmd)
1884{
1885 struct fc_fcp_pkt *fsp;
1886 struct fc_lport *lp;
1887 int rc = FAILED;
1888 unsigned long flags;
1889
1890 lp = shost_priv(sc_cmd->device->host);
1891 if (lp->state != LPORT_ST_READY)
1892 return rc;
1893 else if (!(lp->link_status & FC_LINK_UP))
1894 return rc;
1895
1896 spin_lock_irqsave(lp->host->host_lock, flags);
1897 fsp = CMD_SP(sc_cmd);
1898 if (!fsp) {
1899 /* command completed while scsi eh was setting up */
1900 spin_unlock_irqrestore(lp->host->host_lock, flags);
1901 return SUCCESS;
1902 }
1903 /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
1904 fc_fcp_pkt_hold(fsp);
1905 spin_unlock_irqrestore(lp->host->host_lock, flags);
1906
1907 if (fc_fcp_lock_pkt(fsp)) {
1908 /* completed while we were waiting for timer to be deleted */
1909 rc = SUCCESS;
1910 goto release_pkt;
1911 }
1912
1913 rc = fc_fcp_pkt_abort(lp, fsp);
1914 fc_fcp_unlock_pkt(fsp);
1915
1916release_pkt:
1917 fc_fcp_pkt_release(fsp);
1918 return rc;
1919}
1920EXPORT_SYMBOL(fc_eh_abort);
1921
1922/**
1923 * fc_eh_device_reset: Reset a single LUN
1924 * @sc_cmd: scsi command
1925 *
1926 * Set from scsi host template to send tm cmd to the target and wait for the
1927 * response.
1928 */
1929int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1930{
1931 struct fc_lport *lp;
1932 struct fc_fcp_pkt *fsp;
1933 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1934 int rc = FAILED;
1935 struct fc_rport_libfc_priv *rp;
1936 int rval;
1937
1938 rval = fc_remote_port_chkready(rport);
1939 if (rval)
1940 goto out;
1941
1942 rp = rport->dd_data;
1943 lp = shost_priv(sc_cmd->device->host);
1944
1945 if (lp->state != LPORT_ST_READY)
1946 return rc;
1947
1948 fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
1949 if (fsp == NULL) {
1950 FC_DBG("could not allocate scsi_pkt\n");
1951 sc_cmd->result = DID_NO_CONNECT << 16;
1952 goto out;
1953 }
1954
1955 /*
1956 * Build the libfc request pkt. Do not set the scsi cmnd, because
1957 * the sc passed in is not setup for execution like when sent
1958 * through the queuecommand callout.
1959 */
1960 fsp->lp = lp; /* save the softc ptr */
1961 fsp->rport = rport; /* set the remote port ptr */
1962
1963 /*
1964 * flush outstanding commands
1965 */
1966 rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
1967 fsp->state = FC_SRB_FREE;
1968 fc_fcp_pkt_release(fsp);
1969
1970out:
1971 return rc;
1972}
1973EXPORT_SYMBOL(fc_eh_device_reset);
1974
1975/**
1976 * fc_eh_host_reset - The reset function will reset the ports on the host.
1977 * @sc_cmd: scsi command
1978 */
1979int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
1980{
1981 struct Scsi_Host *shost = sc_cmd->device->host;
1982 struct fc_lport *lp = shost_priv(shost);
1983 unsigned long wait_tmo;
1984
1985 lp->tt.lport_reset(lp);
1986 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
1987 while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
1988 msleep(1000);
1989
1990 if (fc_fcp_lport_queue_ready(lp)) {
1991 shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
1992 return SUCCESS;
1993 } else {
1994 shost_printk(KERN_INFO, shost, "Host reset failed. "
1995 "lport not ready.\n");
1996 return FAILED;
1997 }
1998}
1999EXPORT_SYMBOL(fc_eh_host_reset);
2000
2001/**
2002 * fc_slave_alloc - configure queue depth
2003 * @sdev: scsi device
2004 *
2005 * Configures queue depth based on host's cmd_per_len. If not set
2006 * then we use the libfc default.
2007 */
2008int fc_slave_alloc(struct scsi_device *sdev)
2009{
2010 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2011 int queue_depth;
2012
2013 if (!rport || fc_remote_port_chkready(rport))
2014 return -ENXIO;
2015
2016 if (sdev->tagged_supported) {
2017 if (sdev->host->hostt->cmd_per_lun)
2018 queue_depth = sdev->host->hostt->cmd_per_lun;
2019 else
2020 queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
2021 scsi_activate_tcq(sdev, queue_depth);
2022 }
2023 return 0;
2024}
2025EXPORT_SYMBOL(fc_slave_alloc);
2026
2027int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2028{
2029 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2030 return sdev->queue_depth;
2031}
2032EXPORT_SYMBOL(fc_change_queue_depth);
2033
2034int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
2035{
2036 if (sdev->tagged_supported) {
2037 scsi_set_tag_type(sdev, tag_type);
2038 if (tag_type)
2039 scsi_activate_tcq(sdev, sdev->queue_depth);
2040 else
2041 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2042 } else
2043 tag_type = 0;
2044
2045 return tag_type;
2046}
2047EXPORT_SYMBOL(fc_change_queue_type);
2048
2049void fc_fcp_destroy(struct fc_lport *lp)
2050{
2051 struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
2052
2053 if (!list_empty(&si->scsi_pkt_queue))
2054 printk(KERN_ERR "Leaked scsi packets.\n");
2055
2056 mempool_destroy(si->scsi_pkt_pool);
2057 kfree(si);
2058 lp->scsi_priv = NULL;
2059}
2060EXPORT_SYMBOL(fc_fcp_destroy);
2061
2062int fc_fcp_init(struct fc_lport *lp)
2063{
2064 int rc;
2065 struct fc_fcp_internal *si;
2066
2067 if (!lp->tt.fcp_cmd_send)
2068 lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
2069
2070 if (!lp->tt.fcp_cleanup)
2071 lp->tt.fcp_cleanup = fc_fcp_cleanup;
2072
2073 if (!lp->tt.fcp_abort_io)
2074 lp->tt.fcp_abort_io = fc_fcp_abort_io;
2075
2076 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
2077 if (!si)
2078 return -ENOMEM;
2079 lp->scsi_priv = si;
2080 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2081
2082 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2083 if (!si->scsi_pkt_pool) {
2084 rc = -ENOMEM;
2085 goto free_internal;
2086 }
2087 return 0;
2088
2089free_internal:
2090 kfree(si);
2091 return rc;
2092}
2093EXPORT_SYMBOL(fc_fcp_init);
2094
2095static int __init libfc_init(void)
2096{
2097 int rc;
2098
2099 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
2100 sizeof(struct fc_fcp_pkt),
2101 0, SLAB_HWCACHE_ALIGN, NULL);
2102 if (scsi_pkt_cachep == NULL) {
2103 FC_DBG("Unable to allocate SRB cache...module load failed!");
2104 return -ENOMEM;
2105 }
2106
2107 rc = fc_setup_exch_mgr();
2108 if (rc)
2109 goto destroy_pkt_cache;
2110
2111 rc = fc_setup_rport();
2112 if (rc)
2113 goto destroy_em;
2114
2115 return rc;
2116destroy_em:
2117 fc_destroy_exch_mgr();
2118destroy_pkt_cache:
2119 kmem_cache_destroy(scsi_pkt_cachep);
2120 return rc;
2121}
2122
2123static void __exit libfc_exit(void)
2124{
2125 kmem_cache_destroy(scsi_pkt_cachep);
2126 fc_destroy_exch_mgr();
2127 fc_destroy_rport();
2128}
2129
2130module_init(libfc_init);
2131module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000000..63fe00cfe667
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Frame allocation.
22 */
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <linux/crc32.h>
27
28#include <scsi/fc_frame.h>
29
30/*
31 * Check the CRC in a frame.
32 */
33u32 fc_frame_crc_check(struct fc_frame *fp)
34{
35 u32 crc;
36 u32 error;
37 const u8 *bp;
38 unsigned int len;
39
40 WARN_ON(!fc_frame_is_linear(fp));
41 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
42 len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
43 bp = (const u8 *) fr_hdr(fp);
44 crc = ~crc32(~0, bp, len);
45 error = crc ^ fr_crc(fp);
46 return error;
47}
48EXPORT_SYMBOL(fc_frame_crc_check);
49
50/*
51 * Allocate a frame intended to be sent via fcoe_xmit.
52 * Get an sk_buff for the frame and set the length.
53 */
54struct fc_frame *__fc_frame_alloc(size_t len)
55{
56 struct fc_frame *fp;
57 struct sk_buff *skb;
58
59 WARN_ON((len % sizeof(u32)) != 0);
60 len += sizeof(struct fc_frame_header);
61 skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
62 if (!skb)
63 return NULL;
64 fp = (struct fc_frame *) skb;
65 fc_frame_init(fp);
66 skb_reserve(skb, FC_FRAME_HEADROOM);
67 skb_put(skb, len);
68 return fp;
69}
70EXPORT_SYMBOL(__fc_frame_alloc);
71
72
73struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
74{
75 struct fc_frame *fp;
76 size_t fill;
77
78 fill = payload_len % 4;
79 if (fill != 0)
80 fill = 4 - fill;
81 fp = __fc_frame_alloc(payload_len + fill);
82 if (fp) {
83 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
84 /* trim is OK, we just allocated it so there are no fragments */
85 skb_trim(fp_skb(fp),
86 payload_len + sizeof(struct fc_frame_header));
87 }
88 return fp;
89}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000000..0b9bdb1fb807
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,1604 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
98/* Fabric IDs to use for point-to-point mode, chosen on whims. */
99#define FC_LOCAL_PTP_FID_LO 0x010101
100#define FC_LOCAL_PTP_FID_HI 0x010102
101
102#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
103
104static int fc_lport_debug;
105
106#define FC_DEBUG_LPORT(fmt...) \
107 do { \
108 if (fc_lport_debug) \
109 FC_DBG(fmt); \
110 } while (0)
111
112static void fc_lport_error(struct fc_lport *, struct fc_frame *);
113
114static void fc_lport_enter_reset(struct fc_lport *);
115static void fc_lport_enter_flogi(struct fc_lport *);
116static void fc_lport_enter_dns(struct fc_lport *);
117static void fc_lport_enter_rpn_id(struct fc_lport *);
118static void fc_lport_enter_rft_id(struct fc_lport *);
119static void fc_lport_enter_scr(struct fc_lport *);
120static void fc_lport_enter_ready(struct fc_lport *);
121static void fc_lport_enter_logo(struct fc_lport *);
122
123static const char *fc_lport_state_names[] = {
124 [LPORT_ST_NONE] = "none",
125 [LPORT_ST_FLOGI] = "FLOGI",
126 [LPORT_ST_DNS] = "dNS",
127 [LPORT_ST_RPN_ID] = "RPN_ID",
128 [LPORT_ST_RFT_ID] = "RFT_ID",
129 [LPORT_ST_SCR] = "SCR",
130 [LPORT_ST_READY] = "Ready",
131 [LPORT_ST_LOGO] = "LOGO",
132 [LPORT_ST_RESET] = "reset",
133};
134
135static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
136{
137 fc_frame_free(fp);
138 return 0;
139}
140
141/**
142 * fc_lport_rport_callback - Event handler for rport events
143 * @lport: The lport which is receiving the event
144 * @rport: The rport which the event has occured on
145 * @event: The event that occured
146 *
147 * Locking Note: The rport lock should not be held when calling
148 * this function.
149 */
150static void fc_lport_rport_callback(struct fc_lport *lport,
151 struct fc_rport *rport,
152 enum fc_rport_event event)
153{
154 FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
155 rport->port_id);
156
157 switch (event) {
158 case RPORT_EV_CREATED:
159 if (rport->port_id == FC_FID_DIR_SERV) {
160 mutex_lock(&lport->lp_mutex);
161 if (lport->state == LPORT_ST_DNS) {
162 lport->dns_rp = rport;
163 fc_lport_enter_rpn_id(lport);
164 } else {
165 FC_DEBUG_LPORT("Received an CREATED event on "
166 "port (%6x) for the directory "
167 "server, but the lport is not "
168 "in the DNS state, it's in the "
169 "%d state", rport->port_id,
170 lport->state);
171 lport->tt.rport_logoff(rport);
172 }
173 mutex_unlock(&lport->lp_mutex);
174 } else
175 FC_DEBUG_LPORT("Received an event for port (%6x) "
176 "which is not the directory server\n",
177 rport->port_id);
178 break;
179 case RPORT_EV_LOGO:
180 case RPORT_EV_FAILED:
181 case RPORT_EV_STOP:
182 if (rport->port_id == FC_FID_DIR_SERV) {
183 mutex_lock(&lport->lp_mutex);
184 lport->dns_rp = NULL;
185 mutex_unlock(&lport->lp_mutex);
186
187 } else
188 FC_DEBUG_LPORT("Received an event for port (%6x) "
189 "which is not the directory server\n",
190 rport->port_id);
191 break;
192 case RPORT_EV_NONE:
193 break;
194 }
195}
196
197/**
198 * fc_lport_state - Return a string which represents the lport's state
199 * @lport: The lport whose state is to converted to a string
200 */
201static const char *fc_lport_state(struct fc_lport *lport)
202{
203 const char *cp;
204
205 cp = fc_lport_state_names[lport->state];
206 if (!cp)
207 cp = "unknown";
208 return cp;
209}
210
211/**
212 * fc_lport_ptp_setup - Create an rport for point-to-point mode
213 * @lport: The lport to attach the ptp rport to
214 * @fid: The FID of the ptp rport
215 * @remote_wwpn: The WWPN of the ptp rport
216 * @remote_wwnn: The WWNN of the ptp rport
217 */
218static void fc_lport_ptp_setup(struct fc_lport *lport,
219 u32 remote_fid, u64 remote_wwpn,
220 u64 remote_wwnn)
221{
222 struct fc_disc_port dp;
223
224 dp.lp = lport;
225 dp.ids.port_id = remote_fid;
226 dp.ids.port_name = remote_wwpn;
227 dp.ids.node_name = remote_wwnn;
228 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
229
230 if (lport->ptp_rp) {
231 lport->tt.rport_logoff(lport->ptp_rp);
232 lport->ptp_rp = NULL;
233 }
234
235 lport->ptp_rp = fc_rport_rogue_create(&dp);
236
237 lport->tt.rport_login(lport->ptp_rp);
238
239 fc_lport_enter_ready(lport);
240}
241
242void fc_get_host_port_type(struct Scsi_Host *shost)
243{
244 /* TODO - currently just NPORT */
245 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
246}
247EXPORT_SYMBOL(fc_get_host_port_type);
248
249void fc_get_host_port_state(struct Scsi_Host *shost)
250{
251 struct fc_lport *lp = shost_priv(shost);
252
253 if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
254 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
255 else
256 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
257}
258EXPORT_SYMBOL(fc_get_host_port_state);
259
260void fc_get_host_speed(struct Scsi_Host *shost)
261{
262 struct fc_lport *lport = shost_priv(shost);
263
264 fc_host_speed(shost) = lport->link_speed;
265}
266EXPORT_SYMBOL(fc_get_host_speed);
267
268struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
269{
270 int i;
271 struct fc_host_statistics *fcoe_stats;
272 struct fc_lport *lp = shost_priv(shost);
273 struct timespec v0, v1;
274
275 fcoe_stats = &lp->host_stats;
276 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
277
278 jiffies_to_timespec(jiffies, &v0);
279 jiffies_to_timespec(lp->boot_time, &v1);
280 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
281
282 for_each_online_cpu(i) {
283 struct fcoe_dev_stats *stats = lp->dev_stats[i];
284 if (stats == NULL)
285 continue;
286 fcoe_stats->tx_frames += stats->TxFrames;
287 fcoe_stats->tx_words += stats->TxWords;
288 fcoe_stats->rx_frames += stats->RxFrames;
289 fcoe_stats->rx_words += stats->RxWords;
290 fcoe_stats->error_frames += stats->ErrorFrames;
291 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
292 fcoe_stats->fcp_input_requests += stats->InputRequests;
293 fcoe_stats->fcp_output_requests += stats->OutputRequests;
294 fcoe_stats->fcp_control_requests += stats->ControlRequests;
295 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
296 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
297 fcoe_stats->link_failure_count += stats->LinkFailureCount;
298 }
299 fcoe_stats->lip_count = -1;
300 fcoe_stats->nos_count = -1;
301 fcoe_stats->loss_of_sync_count = -1;
302 fcoe_stats->loss_of_signal_count = -1;
303 fcoe_stats->prim_seq_protocol_err_count = -1;
304 fcoe_stats->dumped_frames = -1;
305 return fcoe_stats;
306}
307EXPORT_SYMBOL(fc_get_host_stats);
308
309/*
310 * Fill in FLOGI command for request.
311 */
312static void
313fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
314 unsigned int op)
315{
316 struct fc_els_csp *sp;
317 struct fc_els_cssp *cp;
318
319 memset(flogi, 0, sizeof(*flogi));
320 flogi->fl_cmd = (u8) op;
321 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
322 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
323 sp = &flogi->fl_csp;
324 sp->sp_hi_ver = 0x20;
325 sp->sp_lo_ver = 0x20;
326 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
327 sp->sp_bb_data = htons((u16) lport->mfs);
328 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
329 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
330 if (op != ELS_FLOGI) {
331 sp->sp_features = htons(FC_SP_FT_CIRO);
332 sp->sp_tot_seq = htons(255); /* seq. we accept */
333 sp->sp_rel_off = htons(0x1f);
334 sp->sp_e_d_tov = htonl(lport->e_d_tov);
335
336 cp->cp_rdfs = htons((u16) lport->mfs);
337 cp->cp_con_seq = htons(255);
338 cp->cp_open_seq = 1;
339 }
340}
341
342/*
343 * Add a supported FC-4 type.
344 */
345static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
346{
347 __be32 *mp;
348
349 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
350 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
351}
352
353/**
354 * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
355 * @lport: Fibre Channel local port recieving the RLIR
356 * @sp: current sequence in the RLIR exchange
357 * @fp: RLIR request frame
358 *
359 * Locking Note: The lport lock is exected to be held before calling
360 * this function.
361 */
362static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
363 struct fc_lport *lport)
364{
365 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
366 fc_lport_state(lport));
367
368 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
369 fc_frame_free(fp);
370}
371
372/**
373 * fc_lport_recv_echo_req - Handle received ECHO request
374 * @lport: Fibre Channel local port recieving the ECHO
375 * @sp: current sequence in the ECHO exchange
376 * @fp: ECHO request frame
377 *
378 * Locking Note: The lport lock is exected to be held before calling
379 * this function.
380 */
381static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
382 struct fc_lport *lport)
383{
384 struct fc_frame *fp;
385 struct fc_exch *ep = fc_seq_exch(sp);
386 unsigned int len;
387 void *pp;
388 void *dp;
389 u32 f_ctl;
390
391 FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
392 fc_lport_state(lport));
393
394 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
395 pp = fc_frame_payload_get(in_fp, len);
396
397 if (len < sizeof(__be32))
398 len = sizeof(__be32);
399
400 fp = fc_frame_alloc(lport, len);
401 if (fp) {
402 dp = fc_frame_payload_get(fp, len);
403 memcpy(dp, pp, len);
404 *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
405 sp = lport->tt.seq_start_next(sp);
406 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
407 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
408 FC_TYPE_ELS, f_ctl, 0);
409 lport->tt.seq_send(lport, sp, fp);
410 }
411 fc_frame_free(in_fp);
412}
413
414/**
415 * fc_lport_recv_echo_req - Handle received Request Node ID data request
416 * @lport: Fibre Channel local port recieving the RNID
417 * @sp: current sequence in the RNID exchange
418 * @fp: RNID request frame
419 *
420 * Locking Note: The lport lock is exected to be held before calling
421 * this function.
422 */
423static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
424 struct fc_lport *lport)
425{
426 struct fc_frame *fp;
427 struct fc_exch *ep = fc_seq_exch(sp);
428 struct fc_els_rnid *req;
429 struct {
430 struct fc_els_rnid_resp rnid;
431 struct fc_els_rnid_cid cid;
432 struct fc_els_rnid_gen gen;
433 } *rp;
434 struct fc_seq_els_data rjt_data;
435 u8 fmt;
436 size_t len;
437 u32 f_ctl;
438
439 FC_DEBUG_LPORT("Received RNID request while in state %s\n",
440 fc_lport_state(lport));
441
442 req = fc_frame_payload_get(in_fp, sizeof(*req));
443 if (!req) {
444 rjt_data.fp = NULL;
445 rjt_data.reason = ELS_RJT_LOGIC;
446 rjt_data.explan = ELS_EXPL_NONE;
447 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
448 } else {
449 fmt = req->rnid_fmt;
450 len = sizeof(*rp);
451 if (fmt != ELS_RNIDF_GEN ||
452 ntohl(lport->rnid_gen.rnid_atype) == 0) {
453 fmt = ELS_RNIDF_NONE; /* nothing to provide */
454 len -= sizeof(rp->gen);
455 }
456 fp = fc_frame_alloc(lport, len);
457 if (fp) {
458 rp = fc_frame_payload_get(fp, len);
459 memset(rp, 0, len);
460 rp->rnid.rnid_cmd = ELS_LS_ACC;
461 rp->rnid.rnid_fmt = fmt;
462 rp->rnid.rnid_cid_len = sizeof(rp->cid);
463 rp->cid.rnid_wwpn = htonll(lport->wwpn);
464 rp->cid.rnid_wwnn = htonll(lport->wwnn);
465 if (fmt == ELS_RNIDF_GEN) {
466 rp->rnid.rnid_sid_len = sizeof(rp->gen);
467 memcpy(&rp->gen, &lport->rnid_gen,
468 sizeof(rp->gen));
469 }
470 sp = lport->tt.seq_start_next(sp);
471 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
472 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
473 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
474 FC_TYPE_ELS, f_ctl, 0);
475 lport->tt.seq_send(lport, sp, fp);
476 }
477 }
478 fc_frame_free(in_fp);
479}
480
481/**
482 * fc_lport_recv_adisc_req - Handle received Address Discovery Request
483 * @lport: Fibre Channel local port recieving the ADISC
484 * @sp: current sequence in the ADISC exchange
485 * @fp: ADISC request frame
486 *
487 * Locking Note: The lport lock is expected to be held before calling
488 * this function.
489 */
490static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
491 struct fc_lport *lport)
492{
493 struct fc_frame *fp;
494 struct fc_exch *ep = fc_seq_exch(sp);
495 struct fc_els_adisc *req, *rp;
496 struct fc_seq_els_data rjt_data;
497 size_t len;
498 u32 f_ctl;
499
500 FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
501 fc_lport_state(lport));
502
503 req = fc_frame_payload_get(in_fp, sizeof(*req));
504 if (!req) {
505 rjt_data.fp = NULL;
506 rjt_data.reason = ELS_RJT_LOGIC;
507 rjt_data.explan = ELS_EXPL_NONE;
508 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
509 } else {
510 len = sizeof(*rp);
511 fp = fc_frame_alloc(lport, len);
512 if (fp) {
513 rp = fc_frame_payload_get(fp, len);
514 memset(rp, 0, len);
515 rp->adisc_cmd = ELS_LS_ACC;
516 rp->adisc_wwpn = htonll(lport->wwpn);
517 rp->adisc_wwnn = htonll(lport->wwnn);
518 hton24(rp->adisc_port_id,
519 fc_host_port_id(lport->host));
520 sp = lport->tt.seq_start_next(sp);
521 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
522 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
523 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
524 FC_TYPE_ELS, f_ctl, 0);
525 lport->tt.seq_send(lport, sp, fp);
526 }
527 }
528 fc_frame_free(in_fp);
529}
530
531/**
532 * fc_lport_recv_logo_req - Handle received fabric LOGO request
533 * @lport: Fibre Channel local port recieving the LOGO
534 * @sp: current sequence in the LOGO exchange
535 * @fp: LOGO request frame
536 *
537 * Locking Note: The lport lock is exected to be held before calling
538 * this function.
539 */
540static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
541 struct fc_lport *lport)
542{
543 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
544 fc_lport_enter_reset(lport);
545 fc_frame_free(fp);
546}
547
548/**
549 * fc_fabric_login - Start the lport state machine
550 * @lport: The lport that should log into the fabric
551 *
552 * Locking Note: This function should not be called
553 * with the lport lock held.
554 */
555int fc_fabric_login(struct fc_lport *lport)
556{
557 int rc = -1;
558
559 mutex_lock(&lport->lp_mutex);
560 if (lport->state == LPORT_ST_NONE) {
561 fc_lport_enter_reset(lport);
562 rc = 0;
563 }
564 mutex_unlock(&lport->lp_mutex);
565
566 return rc;
567}
568EXPORT_SYMBOL(fc_fabric_login);
569
570/**
571 * fc_linkup - Handler for transport linkup events
572 * @lport: The lport whose link is up
573 */
574void fc_linkup(struct fc_lport *lport)
575{
576 FC_DEBUG_LPORT("Link is up for port (%6x)\n",
577 fc_host_port_id(lport->host));
578
579 mutex_lock(&lport->lp_mutex);
580 if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
581 lport->link_status |= FC_LINK_UP;
582
583 if (lport->state == LPORT_ST_RESET)
584 fc_lport_enter_flogi(lport);
585 }
586 mutex_unlock(&lport->lp_mutex);
587}
588EXPORT_SYMBOL(fc_linkup);
589
590/**
591 * fc_linkdown - Handler for transport linkdown events
592 * @lport: The lport whose link is down
593 */
594void fc_linkdown(struct fc_lport *lport)
595{
596 mutex_lock(&lport->lp_mutex);
597 FC_DEBUG_LPORT("Link is down for port (%6x)\n",
598 fc_host_port_id(lport->host));
599
600 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
601 lport->link_status &= ~(FC_LINK_UP);
602 fc_lport_enter_reset(lport);
603 lport->tt.fcp_cleanup(lport);
604 }
605 mutex_unlock(&lport->lp_mutex);
606}
607EXPORT_SYMBOL(fc_linkdown);
608
609/**
610 * fc_pause - Pause the flow of frames
611 * @lport: The lport to be paused
612 */
613void fc_pause(struct fc_lport *lport)
614{
615 mutex_lock(&lport->lp_mutex);
616 lport->link_status |= FC_PAUSE;
617 mutex_unlock(&lport->lp_mutex);
618}
619EXPORT_SYMBOL(fc_pause);
620
621/**
622 * fc_unpause - Unpause the flow of frames
623 * @lport: The lport to be unpaused
624 */
625void fc_unpause(struct fc_lport *lport)
626{
627 mutex_lock(&lport->lp_mutex);
628 lport->link_status &= ~(FC_PAUSE);
629 mutex_unlock(&lport->lp_mutex);
630}
631EXPORT_SYMBOL(fc_unpause);
632
633/**
634 * fc_fabric_logoff - Logout of the fabric
635 * @lport: fc_lport pointer to logoff the fabric
636 *
637 * Return value:
638 * 0 for success, -1 for failure
639 **/
640int fc_fabric_logoff(struct fc_lport *lport)
641{
642 lport->tt.disc_stop_final(lport);
643 mutex_lock(&lport->lp_mutex);
644 fc_lport_enter_logo(lport);
645 mutex_unlock(&lport->lp_mutex);
646 return 0;
647}
648EXPORT_SYMBOL(fc_fabric_logoff);
649
650/**
651 * fc_lport_destroy - unregister a fc_lport
652 * @lport: fc_lport pointer to unregister
653 *
654 * Return value:
655 * None
656 * Note:
657 * exit routine for fc_lport instance
658 * clean-up all the allocated memory
659 * and free up other system resources.
660 *
661 **/
662int fc_lport_destroy(struct fc_lport *lport)
663{
664 lport->tt.frame_send = fc_frame_drop;
665 lport->tt.fcp_abort_io(lport);
666 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
667 return 0;
668}
669EXPORT_SYMBOL(fc_lport_destroy);
670
671/**
672 * fc_set_mfs - sets up the mfs for the corresponding fc_lport
673 * @lport: fc_lport pointer to unregister
674 * @mfs: the new mfs for fc_lport
675 *
676 * Set mfs for the given fc_lport to the new mfs.
677 *
678 * Return: 0 for success
679 *
680 **/
681int fc_set_mfs(struct fc_lport *lport, u32 mfs)
682{
683 unsigned int old_mfs;
684 int rc = -EINVAL;
685
686 mutex_lock(&lport->lp_mutex);
687
688 old_mfs = lport->mfs;
689
690 if (mfs >= FC_MIN_MAX_FRAME) {
691 mfs &= ~3;
692 if (mfs > FC_MAX_FRAME)
693 mfs = FC_MAX_FRAME;
694 mfs -= sizeof(struct fc_frame_header);
695 lport->mfs = mfs;
696 rc = 0;
697 }
698
699 if (!rc && mfs < old_mfs)
700 fc_lport_enter_reset(lport);
701
702 mutex_unlock(&lport->lp_mutex);
703
704 return rc;
705}
706EXPORT_SYMBOL(fc_set_mfs);
707
708/**
709 * fc_lport_disc_callback - Callback for discovery events
710 * @lport: FC local port
711 * @event: The discovery event
712 */
713void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
714{
715 switch (event) {
716 case DISC_EV_SUCCESS:
717 FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
718 fc_host_port_id(lport->host));
719 break;
720 case DISC_EV_FAILED:
721 FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
722 fc_host_port_id(lport->host));
723 mutex_lock(&lport->lp_mutex);
724 fc_lport_enter_reset(lport);
725 mutex_unlock(&lport->lp_mutex);
726 break;
727 case DISC_EV_NONE:
728 WARN_ON(1);
729 break;
730 }
731}
732
733/**
734 * fc_rport_enter_ready - Enter the ready state and start discovery
735 * @lport: Fibre Channel local port that is ready
736 *
737 * Locking Note: The lport lock is expected to be held before calling
738 * this routine.
739 */
740static void fc_lport_enter_ready(struct fc_lport *lport)
741{
742 FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
743 fc_host_port_id(lport->host), fc_lport_state(lport));
744
745 fc_lport_state_enter(lport, LPORT_ST_READY);
746
747 lport->tt.disc_start(fc_lport_disc_callback, lport);
748}
749
750/**
751 * fc_lport_recv_flogi_req - Receive a FLOGI request
752 * @sp_in: The sequence the FLOGI is on
753 * @rx_fp: The frame the FLOGI is in
754 * @lport: The lport that recieved the request
755 *
756 * A received FLOGI request indicates a point-to-point connection.
757 * Accept it with the common service parameters indicating our N port.
758 * Set up to do a PLOGI if we have the higher-number WWPN.
759 *
760 * Locking Note: The lport lock is exected to be held before calling
761 * this function.
762 */
763static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
764 struct fc_frame *rx_fp,
765 struct fc_lport *lport)
766{
767 struct fc_frame *fp;
768 struct fc_frame_header *fh;
769 struct fc_seq *sp;
770 struct fc_exch *ep;
771 struct fc_els_flogi *flp;
772 struct fc_els_flogi *new_flp;
773 u64 remote_wwpn;
774 u32 remote_fid;
775 u32 local_fid;
776 u32 f_ctl;
777
778 FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
779 fc_lport_state(lport));
780
781 fh = fc_frame_header_get(rx_fp);
782 remote_fid = ntoh24(fh->fh_s_id);
783 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
784 if (!flp)
785 goto out;
786 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
787 if (remote_wwpn == lport->wwpn) {
788 FC_DBG("FLOGI from port with same WWPN %llx "
789 "possible configuration error\n", remote_wwpn);
790 goto out;
791 }
792 FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
793
794 /*
795 * XXX what is the right thing to do for FIDs?
796 * The originator might expect our S_ID to be 0xfffffe.
797 * But if so, both of us could end up with the same FID.
798 */
799 local_fid = FC_LOCAL_PTP_FID_LO;
800 if (remote_wwpn < lport->wwpn) {
801 local_fid = FC_LOCAL_PTP_FID_HI;
802 if (!remote_fid || remote_fid == local_fid)
803 remote_fid = FC_LOCAL_PTP_FID_LO;
804 } else if (!remote_fid) {
805 remote_fid = FC_LOCAL_PTP_FID_HI;
806 }
807
808 fc_host_port_id(lport->host) = local_fid;
809
810 fp = fc_frame_alloc(lport, sizeof(*flp));
811 if (fp) {
812 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
813 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
814 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
815 new_flp->fl_cmd = (u8) ELS_LS_ACC;
816
817 /*
818 * Send the response. If this fails, the originator should
819 * repeat the sequence.
820 */
821 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
822 ep = fc_seq_exch(sp);
823 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
824 FC_TYPE_ELS, f_ctl, 0);
825 lport->tt.seq_send(lport, sp, fp);
826
827 } else {
828 fc_lport_error(lport, fp);
829 }
830 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
831 get_unaligned_be64(&flp->fl_wwnn));
832
833 lport->tt.disc_start(fc_lport_disc_callback, lport);
834
835out:
836 sp = fr_seq(rx_fp);
837 fc_frame_free(rx_fp);
838}
839
840/**
841 * fc_lport_recv_req - The generic lport request handler
842 * @lport: The lport that received the request
843 * @sp: The sequence the request is on
844 * @fp: The frame the request is in
845 *
846 * This function will see if the lport handles the request or
847 * if an rport should handle the request.
848 *
849 * Locking Note: This function should not be called with the lport
850 * lock held becuase it will grab the lock.
851 */
852static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
853 struct fc_frame *fp)
854{
855 struct fc_frame_header *fh = fc_frame_header_get(fp);
856 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
857 struct fc_rport *rport;
858 u32 s_id;
859 u32 d_id;
860 struct fc_seq_els_data rjt_data;
861
862 mutex_lock(&lport->lp_mutex);
863
864 /*
865 * Handle special ELS cases like FLOGI, LOGO, and
866 * RSCN here. These don't require a session.
867 * Even if we had a session, it might not be ready.
868 */
869 if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
870 /*
871 * Check opcode.
872 */
873 recv = NULL;
874 switch (fc_frame_payload_op(fp)) {
875 case ELS_FLOGI:
876 recv = fc_lport_recv_flogi_req;
877 break;
878 case ELS_LOGO:
879 fh = fc_frame_header_get(fp);
880 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
881 recv = fc_lport_recv_logo_req;
882 break;
883 case ELS_RSCN:
884 recv = lport->tt.disc_recv_req;
885 break;
886 case ELS_ECHO:
887 recv = fc_lport_recv_echo_req;
888 break;
889 case ELS_RLIR:
890 recv = fc_lport_recv_rlir_req;
891 break;
892 case ELS_RNID:
893 recv = fc_lport_recv_rnid_req;
894 break;
895 case ELS_ADISC:
896 recv = fc_lport_recv_adisc_req;
897 break;
898 }
899
900 if (recv)
901 recv(sp, fp, lport);
902 else {
903 /*
904 * Find session.
905 * If this is a new incoming PLOGI, we won't find it.
906 */
907 s_id = ntoh24(fh->fh_s_id);
908 d_id = ntoh24(fh->fh_d_id);
909
910 rport = lport->tt.rport_lookup(lport, s_id);
911 if (rport)
912 lport->tt.rport_recv_req(sp, fp, rport);
913 else {
914 rjt_data.fp = NULL;
915 rjt_data.reason = ELS_RJT_UNAB;
916 rjt_data.explan = ELS_EXPL_NONE;
917 lport->tt.seq_els_rsp_send(sp,
918 ELS_LS_RJT,
919 &rjt_data);
920 fc_frame_free(fp);
921 }
922 }
923 } else {
924 FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
925 fc_frame_free(fp);
926 }
927 mutex_unlock(&lport->lp_mutex);
928
929 /*
930 * The common exch_done for all request may not be good
931 * if any request requires longer hold on exhange. XXX
932 */
933 lport->tt.exch_done(sp);
934}
935
936/**
937 * fc_lport_reset - Reset an lport
938 * @lport: The lport which should be reset
939 *
940 * Locking Note: This functions should not be called with the
941 * lport lock held.
942 */
943int fc_lport_reset(struct fc_lport *lport)
944{
945 mutex_lock(&lport->lp_mutex);
946 fc_lport_enter_reset(lport);
947 mutex_unlock(&lport->lp_mutex);
948 return 0;
949}
950EXPORT_SYMBOL(fc_lport_reset);
951
952/**
953 * fc_rport_enter_reset - Reset the local port
954 * @lport: Fibre Channel local port to be reset
955 *
956 * Locking Note: The lport lock is expected to be held before calling
957 * this routine.
958 */
959static void fc_lport_enter_reset(struct fc_lport *lport)
960{
961 FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
962 fc_host_port_id(lport->host), fc_lport_state(lport));
963
964 fc_lport_state_enter(lport, LPORT_ST_RESET);
965
966 if (lport->dns_rp)
967 lport->tt.rport_logoff(lport->dns_rp);
968
969 if (lport->ptp_rp) {
970 lport->tt.rport_logoff(lport->ptp_rp);
971 lport->ptp_rp = NULL;
972 }
973
974 lport->tt.disc_stop(lport);
975
976 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
977 fc_host_fabric_name(lport->host) = 0;
978 fc_host_port_id(lport->host) = 0;
979
980 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
981 fc_lport_enter_flogi(lport);
982}
983
984/**
985 * fc_lport_error - Handler for any errors
986 * @lport: The fc_lport object
987 * @fp: The frame pointer
988 *
989 * If the error was caused by a resource allocation failure
990 * then wait for half a second and retry, otherwise retry
991 * after the e_d_tov time.
992 */
993static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
994{
995 unsigned long delay = 0;
996 FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
997 PTR_ERR(fp), fc_lport_state(lport),
998 lport->retry_count);
999
1000 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
1001 /*
1002 * Memory allocation failure, or the exchange timed out.
1003 * Retry after delay
1004 */
1005 if (lport->retry_count < lport->max_retry_count) {
1006 lport->retry_count++;
1007 if (!fp)
1008 delay = msecs_to_jiffies(500);
1009 else
1010 delay = msecs_to_jiffies(lport->e_d_tov);
1011
1012 schedule_delayed_work(&lport->retry_work, delay);
1013 } else {
1014 switch (lport->state) {
1015 case LPORT_ST_NONE:
1016 case LPORT_ST_READY:
1017 case LPORT_ST_RESET:
1018 case LPORT_ST_RPN_ID:
1019 case LPORT_ST_RFT_ID:
1020 case LPORT_ST_SCR:
1021 case LPORT_ST_DNS:
1022 case LPORT_ST_FLOGI:
1023 case LPORT_ST_LOGO:
1024 fc_lport_enter_reset(lport);
1025 break;
1026 }
1027 }
1028 }
1029}
1030
1031/**
1032 * fc_lport_rft_id_resp - Handle response to Register Fibre
1033 * Channel Types by ID (RPN_ID) request
1034 * @sp: current sequence in RPN_ID exchange
1035 * @fp: response frame
1036 * @lp_arg: Fibre Channel host port instance
1037 *
1038 * Locking Note: This function will be called without the lport lock
1039 * held, but it will lock, call an _enter_* function or fc_lport_error
1040 * and then unlock the lport.
1041 */
1042static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1043 void *lp_arg)
1044{
1045 struct fc_lport *lport = lp_arg;
1046 struct fc_frame_header *fh;
1047 struct fc_ct_hdr *ct;
1048
1049 if (fp == ERR_PTR(-FC_EX_CLOSED))
1050 return;
1051
1052 mutex_lock(&lport->lp_mutex);
1053
1054 FC_DEBUG_LPORT("Received a RFT_ID response\n");
1055
1056 if (lport->state != LPORT_ST_RFT_ID) {
1057 FC_DBG("Received a RFT_ID response, but in state %s\n",
1058 fc_lport_state(lport));
1059 goto out;
1060 }
1061
1062 if (IS_ERR(fp)) {
1063 fc_lport_error(lport, fp);
1064 goto err;
1065 }
1066
1067 fh = fc_frame_header_get(fp);
1068 ct = fc_frame_payload_get(fp, sizeof(*ct));
1069
1070 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1071 ct->ct_fs_type == FC_FST_DIR &&
1072 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1073 ntohs(ct->ct_cmd) == FC_FS_ACC)
1074 fc_lport_enter_scr(lport);
1075 else
1076 fc_lport_error(lport, fp);
1077out:
1078 fc_frame_free(fp);
1079err:
1080 mutex_unlock(&lport->lp_mutex);
1081}
1082
1083/**
1084 * fc_lport_rpn_id_resp - Handle response to Register Port
1085 * Name by ID (RPN_ID) request
1086 * @sp: current sequence in RPN_ID exchange
1087 * @fp: response frame
1088 * @lp_arg: Fibre Channel host port instance
1089 *
1090 * Locking Note: This function will be called without the lport lock
1091 * held, but it will lock, call an _enter_* function or fc_lport_error
1092 * and then unlock the lport.
1093 */
1094static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1095 void *lp_arg)
1096{
1097 struct fc_lport *lport = lp_arg;
1098 struct fc_frame_header *fh;
1099 struct fc_ct_hdr *ct;
1100
1101 if (fp == ERR_PTR(-FC_EX_CLOSED))
1102 return;
1103
1104 mutex_lock(&lport->lp_mutex);
1105
1106 FC_DEBUG_LPORT("Received a RPN_ID response\n");
1107
1108 if (lport->state != LPORT_ST_RPN_ID) {
1109 FC_DBG("Received a RPN_ID response, but in state %s\n",
1110 fc_lport_state(lport));
1111 goto out;
1112 }
1113
1114 if (IS_ERR(fp)) {
1115 fc_lport_error(lport, fp);
1116 goto err;
1117 }
1118
1119 fh = fc_frame_header_get(fp);
1120 ct = fc_frame_payload_get(fp, sizeof(*ct));
1121 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1122 ct->ct_fs_type == FC_FST_DIR &&
1123 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1124 ntohs(ct->ct_cmd) == FC_FS_ACC)
1125 fc_lport_enter_rft_id(lport);
1126 else
1127 fc_lport_error(lport, fp);
1128
1129out:
1130 fc_frame_free(fp);
1131err:
1132 mutex_unlock(&lport->lp_mutex);
1133}
1134
1135/**
1136 * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
1137 * @sp: current sequence in SCR exchange
1138 * @fp: response frame
1139 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1140 *
1141 * Locking Note: This function will be called without the lport lock
1142 * held, but it will lock, call an _enter_* function or fc_lport_error
1143 * and then unlock the lport.
1144 */
1145static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1146 void *lp_arg)
1147{
1148 struct fc_lport *lport = lp_arg;
1149 u8 op;
1150
1151 if (fp == ERR_PTR(-FC_EX_CLOSED))
1152 return;
1153
1154 mutex_lock(&lport->lp_mutex);
1155
1156 FC_DEBUG_LPORT("Received a SCR response\n");
1157
1158 if (lport->state != LPORT_ST_SCR) {
1159 FC_DBG("Received a SCR response, but in state %s\n",
1160 fc_lport_state(lport));
1161 goto out;
1162 }
1163
1164 if (IS_ERR(fp)) {
1165 fc_lport_error(lport, fp);
1166 goto err;
1167 }
1168
1169 op = fc_frame_payload_op(fp);
1170 if (op == ELS_LS_ACC)
1171 fc_lport_enter_ready(lport);
1172 else
1173 fc_lport_error(lport, fp);
1174
1175out:
1176 fc_frame_free(fp);
1177err:
1178 mutex_unlock(&lport->lp_mutex);
1179}
1180
1181/**
1182 * fc_lport_enter_scr - Send a State Change Register (SCR) request
1183 * @lport: Fibre Channel local port to register for state changes
1184 *
1185 * Locking Note: The lport lock is expected to be held before calling
1186 * this routine.
1187 */
1188static void fc_lport_enter_scr(struct fc_lport *lport)
1189{
1190 struct fc_frame *fp;
1191
1192 FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
1193 fc_host_port_id(lport->host), fc_lport_state(lport));
1194
1195 fc_lport_state_enter(lport, LPORT_ST_SCR);
1196
1197 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1198 if (!fp) {
1199 fc_lport_error(lport, fp);
1200 return;
1201 }
1202
1203 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
1204 fc_lport_scr_resp, lport, lport->e_d_tov))
1205 fc_lport_error(lport, fp);
1206}
1207
1208/**
1209 * fc_lport_enter_rft_id - Register FC4-types with the name server
1210 * @lport: Fibre Channel local port to register
1211 *
1212 * Locking Note: The lport lock is expected to be held before calling
1213 * this routine.
1214 */
1215static void fc_lport_enter_rft_id(struct fc_lport *lport)
1216{
1217 struct fc_frame *fp;
1218 struct fc_ns_fts *lps;
1219 int i;
1220
1221 FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
1222 fc_host_port_id(lport->host), fc_lport_state(lport));
1223
1224 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1225
1226 lps = &lport->fcts;
1227 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1228 while (--i >= 0)
1229 if (ntohl(lps->ff_type_map[i]) != 0)
1230 break;
1231 if (i < 0) {
1232 /* nothing to register, move on to SCR */
1233 fc_lport_enter_scr(lport);
1234 return;
1235 }
1236
1237 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1238 sizeof(struct fc_ns_rft));
1239 if (!fp) {
1240 fc_lport_error(lport, fp);
1241 return;
1242 }
1243
1244 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
1245 fc_lport_rft_id_resp,
1246 lport, lport->e_d_tov))
1247 fc_lport_error(lport, fp);
1248}
1249
1250/**
1251 * fc_rport_enter_rft_id - Register port name with the name server
1252 * @lport: Fibre Channel local port to register
1253 *
1254 * Locking Note: The lport lock is expected to be held before calling
1255 * this routine.
1256 */
1257static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1258{
1259 struct fc_frame *fp;
1260
1261 FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
1262 fc_host_port_id(lport->host), fc_lport_state(lport));
1263
1264 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
1265
1266 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1267 sizeof(struct fc_ns_rn_id));
1268 if (!fp) {
1269 fc_lport_error(lport, fp);
1270 return;
1271 }
1272
1273 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
1274 fc_lport_rpn_id_resp,
1275 lport, lport->e_d_tov))
1276 fc_lport_error(lport, fp);
1277}
1278
1279static struct fc_rport_operations fc_lport_rport_ops = {
1280 .event_callback = fc_lport_rport_callback,
1281};
1282
1283/**
1284 * fc_rport_enter_dns - Create a rport to the name server
1285 * @lport: Fibre Channel local port requesting a rport for the name server
1286 *
1287 * Locking Note: The lport lock is expected to be held before calling
1288 * this routine.
1289 */
1290static void fc_lport_enter_dns(struct fc_lport *lport)
1291{
1292 struct fc_rport *rport;
1293 struct fc_rport_libfc_priv *rdata;
1294 struct fc_disc_port dp;
1295
1296 dp.ids.port_id = FC_FID_DIR_SERV;
1297 dp.ids.port_name = -1;
1298 dp.ids.node_name = -1;
1299 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
1300 dp.lp = lport;
1301
1302 FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
1303 fc_host_port_id(lport->host), fc_lport_state(lport));
1304
1305 fc_lport_state_enter(lport, LPORT_ST_DNS);
1306
1307 rport = fc_rport_rogue_create(&dp);
1308 if (!rport)
1309 goto err;
1310
1311 rdata = rport->dd_data;
1312 rdata->ops = &fc_lport_rport_ops;
1313 lport->tt.rport_login(rport);
1314 return;
1315
1316err:
1317 fc_lport_error(lport, NULL);
1318}
1319
1320/**
1321 * fc_lport_timeout - Handler for the retry_work timer.
1322 * @work: The work struct of the fc_lport
1323 */
1324static void fc_lport_timeout(struct work_struct *work)
1325{
1326 struct fc_lport *lport =
1327 container_of(work, struct fc_lport,
1328 retry_work.work);
1329
1330 mutex_lock(&lport->lp_mutex);
1331
1332 switch (lport->state) {
1333 case LPORT_ST_NONE:
1334 case LPORT_ST_READY:
1335 case LPORT_ST_RESET:
1336 WARN_ON(1);
1337 break;
1338 case LPORT_ST_FLOGI:
1339 fc_lport_enter_flogi(lport);
1340 break;
1341 case LPORT_ST_DNS:
1342 fc_lport_enter_dns(lport);
1343 break;
1344 case LPORT_ST_RPN_ID:
1345 fc_lport_enter_rpn_id(lport);
1346 break;
1347 case LPORT_ST_RFT_ID:
1348 fc_lport_enter_rft_id(lport);
1349 break;
1350 case LPORT_ST_SCR:
1351 fc_lport_enter_scr(lport);
1352 break;
1353 case LPORT_ST_LOGO:
1354 fc_lport_enter_logo(lport);
1355 break;
1356 }
1357
1358 mutex_unlock(&lport->lp_mutex);
1359}
1360
1361/**
1362 * fc_lport_logo_resp - Handle response to LOGO request
1363 * @sp: current sequence in LOGO exchange
1364 * @fp: response frame
1365 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1366 *
1367 * Locking Note: This function will be called without the lport lock
1368 * held, but it will lock, call an _enter_* function or fc_lport_error
1369 * and then unlock the lport.
1370 */
1371static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1372 void *lp_arg)
1373{
1374 struct fc_lport *lport = lp_arg;
1375 u8 op;
1376
1377 if (fp == ERR_PTR(-FC_EX_CLOSED))
1378 return;
1379
1380 mutex_lock(&lport->lp_mutex);
1381
1382 FC_DEBUG_LPORT("Received a LOGO response\n");
1383
1384 if (lport->state != LPORT_ST_LOGO) {
1385 FC_DBG("Received a LOGO response, but in state %s\n",
1386 fc_lport_state(lport));
1387 goto out;
1388 }
1389
1390 if (IS_ERR(fp)) {
1391 fc_lport_error(lport, fp);
1392 goto err;
1393 }
1394
1395 op = fc_frame_payload_op(fp);
1396 if (op == ELS_LS_ACC)
1397 fc_lport_enter_reset(lport);
1398 else
1399 fc_lport_error(lport, fp);
1400
1401out:
1402 fc_frame_free(fp);
1403err:
1404 mutex_unlock(&lport->lp_mutex);
1405}
1406
1407/**
1408 * fc_rport_enter_logo - Logout of the fabric
1409 * @lport: Fibre Channel local port to be logged out
1410 *
1411 * Locking Note: The lport lock is expected to be held before calling
1412 * this routine.
1413 */
1414static void fc_lport_enter_logo(struct fc_lport *lport)
1415{
1416 struct fc_frame *fp;
1417 struct fc_els_logo *logo;
1418
1419 FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
1420 fc_host_port_id(lport->host), fc_lport_state(lport));
1421
1422 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1423
1424 /* DNS session should be closed so we can release it here */
1425 if (lport->dns_rp)
1426 lport->tt.rport_logoff(lport->dns_rp);
1427
1428 fp = fc_frame_alloc(lport, sizeof(*logo));
1429 if (!fp) {
1430 fc_lport_error(lport, fp);
1431 return;
1432 }
1433
1434 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
1435 lport, lport->e_d_tov))
1436 fc_lport_error(lport, fp);
1437}
1438
1439/**
1440 * fc_lport_flogi_resp - Handle response to FLOGI request
1441 * @sp: current sequence in FLOGI exchange
1442 * @fp: response frame
1443 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1444 *
1445 * Locking Note: This function will be called without the lport lock
1446 * held, but it will lock, call an _enter_* function or fc_lport_error
1447 * and then unlock the lport.
1448 */
1449static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1450 void *lp_arg)
1451{
1452 struct fc_lport *lport = lp_arg;
1453 struct fc_frame_header *fh;
1454 struct fc_els_flogi *flp;
1455 u32 did;
1456 u16 csp_flags;
1457 unsigned int r_a_tov;
1458 unsigned int e_d_tov;
1459 u16 mfs;
1460
1461 if (fp == ERR_PTR(-FC_EX_CLOSED))
1462 return;
1463
1464 mutex_lock(&lport->lp_mutex);
1465
1466 FC_DEBUG_LPORT("Received a FLOGI response\n");
1467
1468 if (lport->state != LPORT_ST_FLOGI) {
1469 FC_DBG("Received a FLOGI response, but in state %s\n",
1470 fc_lport_state(lport));
1471 goto out;
1472 }
1473
1474 if (IS_ERR(fp)) {
1475 fc_lport_error(lport, fp);
1476 goto err;
1477 }
1478
1479 fh = fc_frame_header_get(fp);
1480 did = ntoh24(fh->fh_d_id);
1481 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1482
1483 FC_DEBUG_LPORT("Assigned fid %x\n", did);
1484 fc_host_port_id(lport->host) = did;
1485
1486 flp = fc_frame_payload_get(fp, sizeof(*flp));
1487 if (flp) {
1488 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1489 FC_SP_BB_DATA_MASK;
1490 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1491 mfs < lport->mfs)
1492 lport->mfs = mfs;
1493 csp_flags = ntohs(flp->fl_csp.sp_features);
1494 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1495 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1496 if (csp_flags & FC_SP_FT_EDTR)
1497 e_d_tov /= 1000000;
1498 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1499 if (e_d_tov > lport->e_d_tov)
1500 lport->e_d_tov = e_d_tov;
1501 lport->r_a_tov = 2 * e_d_tov;
1502 FC_DBG("Point-to-Point mode\n");
1503 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1504 get_unaligned_be64(
1505 &flp->fl_wwpn),
1506 get_unaligned_be64(
1507 &flp->fl_wwnn));
1508 } else {
1509 lport->e_d_tov = e_d_tov;
1510 lport->r_a_tov = r_a_tov;
1511 fc_host_fabric_name(lport->host) =
1512 get_unaligned_be64(&flp->fl_wwnn);
1513 fc_lport_enter_dns(lport);
1514 }
1515 }
1516
1517 if (flp) {
1518 csp_flags = ntohs(flp->fl_csp.sp_features);
1519 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1520 lport->tt.disc_start(fc_lport_disc_callback,
1521 lport);
1522 }
1523 }
1524 } else {
1525 FC_DBG("bad FLOGI response\n");
1526 }
1527
1528out:
1529 fc_frame_free(fp);
1530err:
1531 mutex_unlock(&lport->lp_mutex);
1532}
1533
1534/**
1535 * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
1536 * @lport: Fibre Channel local port to be logged in to the fabric
1537 *
1538 * Locking Note: The lport lock is expected to be held before calling
1539 * this routine.
1540 */
1541void fc_lport_enter_flogi(struct fc_lport *lport)
1542{
1543 struct fc_frame *fp;
1544
1545 FC_DEBUG_LPORT("Processing FLOGI state\n");
1546
1547 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1548
1549 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1550 if (!fp)
1551 return fc_lport_error(lport, fp);
1552
1553 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
1554 fc_lport_flogi_resp, lport, lport->e_d_tov))
1555 fc_lport_error(lport, fp);
1556}
1557
1558/* Configure a fc_lport */
1559int fc_lport_config(struct fc_lport *lport)
1560{
1561 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1562 mutex_init(&lport->lp_mutex);
1563
1564 fc_lport_state_enter(lport, LPORT_ST_NONE);
1565
1566 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1567 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1568
1569 return 0;
1570}
1571EXPORT_SYMBOL(fc_lport_config);
1572
1573int fc_lport_init(struct fc_lport *lport)
1574{
1575 if (!lport->tt.lport_recv)
1576 lport->tt.lport_recv = fc_lport_recv_req;
1577
1578 if (!lport->tt.lport_reset)
1579 lport->tt.lport_reset = fc_lport_reset;
1580
1581 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1582 fc_host_node_name(lport->host) = lport->wwnn;
1583 fc_host_port_name(lport->host) = lport->wwpn;
1584 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1585 memset(fc_host_supported_fc4s(lport->host), 0,
1586 sizeof(fc_host_supported_fc4s(lport->host)));
1587 fc_host_supported_fc4s(lport->host)[2] = 1;
1588 fc_host_supported_fc4s(lport->host)[7] = 1;
1589
1590 /* This value is also unchanging */
1591 memset(fc_host_active_fc4s(lport->host), 0,
1592 sizeof(fc_host_active_fc4s(lport->host)));
1593 fc_host_active_fc4s(lport->host)[2] = 1;
1594 fc_host_active_fc4s(lport->host)[7] = 1;
1595 fc_host_maxframe_size(lport->host) = lport->mfs;
1596 fc_host_supported_speeds(lport->host) = 0;
1597 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1598 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1599 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1600 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1601
1602 return 0;
1603}
1604EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000000..e780d8caf70e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1291 @@
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
58static int fc_rport_debug;
59
60#define FC_DEBUG_RPORT(fmt...) \
61 do { \
62 if (fc_rport_debug) \
63 FC_DBG(fmt); \
64 } while (0)
65
66struct workqueue_struct *rport_event_queue;
67
68static void fc_rport_enter_plogi(struct fc_rport *);
69static void fc_rport_enter_prli(struct fc_rport *);
70static void fc_rport_enter_rtv(struct fc_rport *);
71static void fc_rport_enter_ready(struct fc_rport *);
72static void fc_rport_enter_logo(struct fc_rport *);
73
74static void fc_rport_recv_plogi_req(struct fc_rport *,
75 struct fc_seq *, struct fc_frame *);
76static void fc_rport_recv_prli_req(struct fc_rport *,
77 struct fc_seq *, struct fc_frame *);
78static void fc_rport_recv_prlo_req(struct fc_rport *,
79 struct fc_seq *, struct fc_frame *);
80static void fc_rport_recv_logo_req(struct fc_rport *,
81 struct fc_seq *, struct fc_frame *);
82static void fc_rport_timeout(struct work_struct *);
83static void fc_rport_error(struct fc_rport *, struct fc_frame *);
84static void fc_rport_work(struct work_struct *);
85
86static const char *fc_rport_state_names[] = {
87 [RPORT_ST_NONE] = "None",
88 [RPORT_ST_INIT] = "Init",
89 [RPORT_ST_PLOGI] = "PLOGI",
90 [RPORT_ST_PRLI] = "PRLI",
91 [RPORT_ST_RTV] = "RTV",
92 [RPORT_ST_READY] = "Ready",
93 [RPORT_ST_LOGO] = "LOGO",
94};
95
96static void fc_rport_rogue_destroy(struct device *dev)
97{
98 struct fc_rport *rport = dev_to_rport(dev);
99 FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
100 kfree(rport);
101}
102
103struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
104{
105 struct fc_rport *rport;
106 struct fc_rport_libfc_priv *rdata;
107 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
108
109 if (!rport)
110 return NULL;
111
112 rdata = RPORT_TO_PRIV(rport);
113
114 rport->dd_data = rdata;
115 rport->port_id = dp->ids.port_id;
116 rport->port_name = dp->ids.port_name;
117 rport->node_name = dp->ids.node_name;
118 rport->roles = dp->ids.roles;
119 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
120 /*
121 * Note: all this libfc rogue rport code will be removed for
122 * upstream so it fine that this is really ugly and hacky right now.
123 */
124 device_initialize(&rport->dev);
125 rport->dev.release = fc_rport_rogue_destroy;
126
127 mutex_init(&rdata->rp_mutex);
128 rdata->local_port = dp->lp;
129 rdata->trans_state = FC_PORTSTATE_ROGUE;
130 rdata->rp_state = RPORT_ST_INIT;
131 rdata->event = RPORT_EV_NONE;
132 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
133 rdata->ops = NULL;
134 rdata->e_d_tov = dp->lp->e_d_tov;
135 rdata->r_a_tov = dp->lp->r_a_tov;
136 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
137 INIT_WORK(&rdata->event_work, fc_rport_work);
138 /*
139 * For good measure, but not necessary as we should only
140 * add REAL rport to the lport list.
141 */
142 INIT_LIST_HEAD(&rdata->peers);
143
144 return rport;
145}
146
147/**
148 * fc_rport_state - return a string for the state the rport is in
149 * @rport: The rport whose state we want to get a string for
150 */
151static const char *fc_rport_state(struct fc_rport *rport)
152{
153 const char *cp;
154 struct fc_rport_libfc_priv *rdata = rport->dd_data;
155
156 cp = fc_rport_state_names[rdata->rp_state];
157 if (!cp)
158 cp = "Unknown";
159 return cp;
160}
161
162/**
163 * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
164 * @rport: Pointer to Fibre Channel remote port structure
165 * @timeout: timeout in seconds
166 */
167void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
168{
169 if (timeout)
170 rport->dev_loss_tmo = timeout + 5;
171 else
172 rport->dev_loss_tmo = 30;
173}
174EXPORT_SYMBOL(fc_set_rport_loss_tmo);
175
176/**
177 * fc_plogi_get_maxframe - Get max payload from the common service parameters
178 * @flp: FLOGI payload structure
179 * @maxval: upper limit, may be less than what is in the service parameters
180 */
181static unsigned int
182fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
183{
184 unsigned int mfs;
185
186 /*
187 * Get max payload from the common service parameters and the
188 * class 3 receive data field size.
189 */
190 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
191 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
192 maxval = mfs;
193 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
194 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
195 maxval = mfs;
196 return maxval;
197}
198
199/**
200 * fc_rport_state_enter - Change the rport's state
201 * @rport: The rport whose state should change
202 * @new: The new state of the rport
203 *
204 * Locking Note: Called with the rport lock held
205 */
206static void fc_rport_state_enter(struct fc_rport *rport,
207 enum fc_rport_state new)
208{
209 struct fc_rport_libfc_priv *rdata = rport->dd_data;
210 if (rdata->rp_state != new)
211 rdata->retries = 0;
212 rdata->rp_state = new;
213}
214
215static void fc_rport_work(struct work_struct *work)
216{
217 struct fc_rport_libfc_priv *rdata =
218 container_of(work, struct fc_rport_libfc_priv, event_work);
219 enum fc_rport_event event;
220 enum fc_rport_trans_state trans_state;
221 struct fc_lport *lport = rdata->local_port;
222 struct fc_rport_operations *rport_ops;
223 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
224
225 mutex_lock(&rdata->rp_mutex);
226 event = rdata->event;
227 rport_ops = rdata->ops;
228
229 if (event == RPORT_EV_CREATED) {
230 struct fc_rport *new_rport;
231 struct fc_rport_libfc_priv *new_rdata;
232 struct fc_rport_identifiers ids;
233
234 ids.port_id = rport->port_id;
235 ids.roles = rport->roles;
236 ids.port_name = rport->port_name;
237 ids.node_name = rport->node_name;
238
239 mutex_unlock(&rdata->rp_mutex);
240
241 new_rport = fc_remote_port_add(lport->host, 0, &ids);
242 if (new_rport) {
243 /*
244 * Switch from the rogue rport to the rport
245 * returned by the FC class.
246 */
247 new_rport->maxframe_size = rport->maxframe_size;
248
249 new_rdata = new_rport->dd_data;
250 new_rdata->e_d_tov = rdata->e_d_tov;
251 new_rdata->r_a_tov = rdata->r_a_tov;
252 new_rdata->ops = rdata->ops;
253 new_rdata->local_port = rdata->local_port;
254 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
255 new_rdata->trans_state = FC_PORTSTATE_REAL;
256 mutex_init(&new_rdata->rp_mutex);
257 INIT_DELAYED_WORK(&new_rdata->retry_work,
258 fc_rport_timeout);
259 INIT_LIST_HEAD(&new_rdata->peers);
260 INIT_WORK(&new_rdata->event_work, fc_rport_work);
261
262 fc_rport_state_enter(new_rport, RPORT_ST_READY);
263 } else {
264 FC_DBG("Failed to create the rport for port "
265 "(%6x).\n", ids.port_id);
266 event = RPORT_EV_FAILED;
267 }
268 put_device(&rport->dev);
269 rport = new_rport;
270 rdata = new_rport->dd_data;
271 if (rport_ops->event_callback)
272 rport_ops->event_callback(lport, rport, event);
273 } else if ((event == RPORT_EV_FAILED) ||
274 (event == RPORT_EV_LOGO) ||
275 (event == RPORT_EV_STOP)) {
276 trans_state = rdata->trans_state;
277 mutex_unlock(&rdata->rp_mutex);
278 if (rport_ops->event_callback)
279 rport_ops->event_callback(lport, rport, event);
280 if (trans_state == FC_PORTSTATE_ROGUE)
281 put_device(&rport->dev);
282 else
283 fc_remote_port_delete(rport);
284 } else
285 mutex_unlock(&rdata->rp_mutex);
286}
287
288/**
289 * fc_rport_login - Start the remote port login state machine
290 * @rport: Fibre Channel remote port
291 *
292 * Locking Note: Called without the rport lock held. This
293 * function will hold the rport lock, call an _enter_*
294 * function and then unlock the rport.
295 */
296int fc_rport_login(struct fc_rport *rport)
297{
298 struct fc_rport_libfc_priv *rdata = rport->dd_data;
299
300 mutex_lock(&rdata->rp_mutex);
301
302 FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
303
304 fc_rport_enter_plogi(rport);
305
306 mutex_unlock(&rdata->rp_mutex);
307
308 return 0;
309}
310
311/**
312 * fc_rport_logoff - Logoff and remove an rport
313 * @rport: Fibre Channel remote port to be removed
314 *
315 * Locking Note: Called without the rport lock held. This
316 * function will hold the rport lock, call an _enter_*
317 * function and then unlock the rport.
318 */
319int fc_rport_logoff(struct fc_rport *rport)
320{
321 struct fc_rport_libfc_priv *rdata = rport->dd_data;
322
323 mutex_lock(&rdata->rp_mutex);
324
325 FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
326
327 fc_rport_enter_logo(rport);
328
329 /*
330 * Change the state to NONE so that we discard
331 * the response.
332 */
333 fc_rport_state_enter(rport, RPORT_ST_NONE);
334
335 mutex_unlock(&rdata->rp_mutex);
336
337 cancel_delayed_work_sync(&rdata->retry_work);
338
339 mutex_lock(&rdata->rp_mutex);
340
341 rdata->event = RPORT_EV_STOP;
342 queue_work(rport_event_queue, &rdata->event_work);
343
344 mutex_unlock(&rdata->rp_mutex);
345
346 return 0;
347}
348
349/**
350 * fc_rport_enter_ready - The rport is ready
351 * @rport: Fibre Channel remote port that is ready
352 *
353 * Locking Note: The rport lock is expected to be held before calling
354 * this routine.
355 */
356static void fc_rport_enter_ready(struct fc_rport *rport)
357{
358 struct fc_rport_libfc_priv *rdata = rport->dd_data;
359
360 fc_rport_state_enter(rport, RPORT_ST_READY);
361
362 FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
363
364 rdata->event = RPORT_EV_CREATED;
365 queue_work(rport_event_queue, &rdata->event_work);
366}
367
368/**
369 * fc_rport_timeout - Handler for the retry_work timer.
370 * @work: The work struct of the fc_rport_libfc_priv
371 *
372 * Locking Note: Called without the rport lock held. This
373 * function will hold the rport lock, call an _enter_*
374 * function and then unlock the rport.
375 */
376static void fc_rport_timeout(struct work_struct *work)
377{
378 struct fc_rport_libfc_priv *rdata =
379 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
380 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
381
382 mutex_lock(&rdata->rp_mutex);
383
384 switch (rdata->rp_state) {
385 case RPORT_ST_PLOGI:
386 fc_rport_enter_plogi(rport);
387 break;
388 case RPORT_ST_PRLI:
389 fc_rport_enter_prli(rport);
390 break;
391 case RPORT_ST_RTV:
392 fc_rport_enter_rtv(rport);
393 break;
394 case RPORT_ST_LOGO:
395 fc_rport_enter_logo(rport);
396 break;
397 case RPORT_ST_READY:
398 case RPORT_ST_INIT:
399 case RPORT_ST_NONE:
400 break;
401 }
402
403 mutex_unlock(&rdata->rp_mutex);
404 put_device(&rport->dev);
405}
406
407/**
408 * fc_rport_error - Handler for any errors
409 * @rport: The fc_rport object
410 * @fp: The frame pointer
411 *
412 * If the error was caused by a resource allocation failure
413 * then wait for half a second and retry, otherwise retry
414 * immediately.
415 *
416 * Locking Note: The rport lock is expected to be held before
417 * calling this routine
418 */
419static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
420{
421 struct fc_rport_libfc_priv *rdata = rport->dd_data;
422 unsigned long delay = 0;
423
424 FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
425 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
426
427 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
428 /*
429 * Memory allocation failure, or the exchange timed out.
430 * Retry after delay
431 */
432 if (rdata->retries < rdata->local_port->max_retry_count) {
433 rdata->retries++;
434 if (!fp)
435 delay = msecs_to_jiffies(500);
436 get_device(&rport->dev);
437 schedule_delayed_work(&rdata->retry_work, delay);
438 } else {
439 switch (rdata->rp_state) {
440 case RPORT_ST_PLOGI:
441 case RPORT_ST_PRLI:
442 case RPORT_ST_LOGO:
443 rdata->event = RPORT_EV_FAILED;
444 queue_work(rport_event_queue,
445 &rdata->event_work);
446 break;
447 case RPORT_ST_RTV:
448 fc_rport_enter_ready(rport);
449 break;
450 case RPORT_ST_NONE:
451 case RPORT_ST_READY:
452 case RPORT_ST_INIT:
453 break;
454 }
455 }
456 }
457}
458
459/**
460 * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
461 * @sp: current sequence in the PLOGI exchange
462 * @fp: response frame
463 * @rp_arg: Fibre Channel remote port
464 *
465 * Locking Note: This function will be called without the rport lock
466 * held, but it will lock, call an _enter_* function or fc_rport_error
467 * and then unlock the rport.
468 */
469static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
470 void *rp_arg)
471{
472 struct fc_rport *rport = rp_arg;
473 struct fc_rport_libfc_priv *rdata = rport->dd_data;
474 struct fc_lport *lport = rdata->local_port;
475 struct fc_els_flogi *plp;
476 unsigned int tov;
477 u16 csp_seq;
478 u16 cssp_seq;
479 u8 op;
480
481 mutex_lock(&rdata->rp_mutex);
482
483 FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
484 rport->port_id);
485
486 if (rdata->rp_state != RPORT_ST_PLOGI) {
487 FC_DBG("Received a PLOGI response, but in state %s\n",
488 fc_rport_state(rport));
489 goto out;
490 }
491
492 if (IS_ERR(fp)) {
493 fc_rport_error(rport, fp);
494 goto err;
495 }
496
497 op = fc_frame_payload_op(fp);
498 if (op == ELS_LS_ACC &&
499 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
500 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
501 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
502
503 tov = ntohl(plp->fl_csp.sp_e_d_tov);
504 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
505 tov /= 1000;
506 if (tov > rdata->e_d_tov)
507 rdata->e_d_tov = tov;
508 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
509 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
510 if (cssp_seq < csp_seq)
511 csp_seq = cssp_seq;
512 rdata->max_seq = csp_seq;
513 rport->maxframe_size =
514 fc_plogi_get_maxframe(plp, lport->mfs);
515
516 /*
517 * If the rport is one of the well known addresses
518 * we skip PRLI and RTV and go straight to READY.
519 */
520 if (rport->port_id >= FC_FID_DOM_MGR)
521 fc_rport_enter_ready(rport);
522 else
523 fc_rport_enter_prli(rport);
524 } else
525 fc_rport_error(rport, fp);
526
527out:
528 fc_frame_free(fp);
529err:
530 mutex_unlock(&rdata->rp_mutex);
531 put_device(&rport->dev);
532}
533
534/**
535 * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
536 * @rport: Fibre Channel remote port to send PLOGI to
537 *
538 * Locking Note: The rport lock is expected to be held before calling
539 * this routine.
540 */
541static void fc_rport_enter_plogi(struct fc_rport *rport)
542{
543 struct fc_rport_libfc_priv *rdata = rport->dd_data;
544 struct fc_lport *lport = rdata->local_port;
545 struct fc_frame *fp;
546
547 FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
548 rport->port_id, fc_rport_state(rport));
549
550 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
551
552 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
553 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
554 if (!fp) {
555 fc_rport_error(rport, fp);
556 return;
557 }
558 rdata->e_d_tov = lport->e_d_tov;
559
560 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
561 fc_rport_plogi_resp, rport, lport->e_d_tov))
562 fc_rport_error(rport, fp);
563 else
564 get_device(&rport->dev);
565}
566
567/**
568 * fc_rport_prli_resp - Process Login (PRLI) response handler
569 * @sp: current sequence in the PRLI exchange
570 * @fp: response frame
571 * @rp_arg: Fibre Channel remote port
572 *
573 * Locking Note: This function will be called without the rport lock
574 * held, but it will lock, call an _enter_* function or fc_rport_error
575 * and then unlock the rport.
576 */
577static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
578 void *rp_arg)
579{
580 struct fc_rport *rport = rp_arg;
581 struct fc_rport_libfc_priv *rdata = rport->dd_data;
582 struct {
583 struct fc_els_prli prli;
584 struct fc_els_spp spp;
585 } *pp;
586 u32 roles = FC_RPORT_ROLE_UNKNOWN;
587 u32 fcp_parm = 0;
588 u8 op;
589
590 mutex_lock(&rdata->rp_mutex);
591
592 FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
593 rport->port_id);
594
595 if (rdata->rp_state != RPORT_ST_PRLI) {
596 FC_DBG("Received a PRLI response, but in state %s\n",
597 fc_rport_state(rport));
598 goto out;
599 }
600
601 if (IS_ERR(fp)) {
602 fc_rport_error(rport, fp);
603 goto err;
604 }
605
606 op = fc_frame_payload_op(fp);
607 if (op == ELS_LS_ACC) {
608 pp = fc_frame_payload_get(fp, sizeof(*pp));
609 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
610 fcp_parm = ntohl(pp->spp.spp_params);
611 if (fcp_parm & FCP_SPPF_RETRY)
612 rdata->flags |= FC_RP_FLAGS_RETRY;
613 }
614
615 rport->supported_classes = FC_COS_CLASS3;
616 if (fcp_parm & FCP_SPPF_INIT_FCN)
617 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
618 if (fcp_parm & FCP_SPPF_TARG_FCN)
619 roles |= FC_RPORT_ROLE_FCP_TARGET;
620
621 rport->roles = roles;
622 fc_rport_enter_rtv(rport);
623
624 } else {
625 FC_DBG("Bad ELS response\n");
626 rdata->event = RPORT_EV_FAILED;
627 queue_work(rport_event_queue, &rdata->event_work);
628 }
629
630out:
631 fc_frame_free(fp);
632err:
633 mutex_unlock(&rdata->rp_mutex);
634 put_device(&rport->dev);
635}
636
637/**
638 * fc_rport_logo_resp - Logout (LOGO) response handler
639 * @sp: current sequence in the LOGO exchange
640 * @fp: response frame
641 * @rp_arg: Fibre Channel remote port
642 *
643 * Locking Note: This function will be called without the rport lock
644 * held, but it will lock, call an _enter_* function or fc_rport_error
645 * and then unlock the rport.
646 */
647static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
648 void *rp_arg)
649{
650 struct fc_rport *rport = rp_arg;
651 struct fc_rport_libfc_priv *rdata = rport->dd_data;
652 u8 op;
653
654 mutex_lock(&rdata->rp_mutex);
655
656 FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
657 rport->port_id);
658
659 if (IS_ERR(fp)) {
660 fc_rport_error(rport, fp);
661 goto err;
662 }
663
664 if (rdata->rp_state != RPORT_ST_LOGO) {
665 FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
666 fc_rport_state(rport));
667 goto out;
668 }
669
670 op = fc_frame_payload_op(fp);
671 if (op == ELS_LS_ACC) {
672 fc_rport_enter_rtv(rport);
673 } else {
674 FC_DBG("Bad ELS response\n");
675 rdata->event = RPORT_EV_LOGO;
676 queue_work(rport_event_queue, &rdata->event_work);
677 }
678
679out:
680 fc_frame_free(fp);
681err:
682 mutex_unlock(&rdata->rp_mutex);
683 put_device(&rport->dev);
684}
685
686/**
687 * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
688 * @rport: Fibre Channel remote port to send PRLI to
689 *
690 * Locking Note: The rport lock is expected to be held before calling
691 * this routine.
692 */
693static void fc_rport_enter_prli(struct fc_rport *rport)
694{
695 struct fc_rport_libfc_priv *rdata = rport->dd_data;
696 struct fc_lport *lport = rdata->local_port;
697 struct {
698 struct fc_els_prli prli;
699 struct fc_els_spp spp;
700 } *pp;
701 struct fc_frame *fp;
702
703 FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
704 rport->port_id, fc_rport_state(rport));
705
706 fc_rport_state_enter(rport, RPORT_ST_PRLI);
707
708 fp = fc_frame_alloc(lport, sizeof(*pp));
709 if (!fp) {
710 fc_rport_error(rport, fp);
711 return;
712 }
713
714 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
715 fc_rport_prli_resp, rport, lport->e_d_tov))
716 fc_rport_error(rport, fp);
717 else
718 get_device(&rport->dev);
719}
720
721/**
722 * fc_rport_els_rtv_resp - Request Timeout Value response handler
723 * @sp: current sequence in the RTV exchange
724 * @fp: response frame
725 * @rp_arg: Fibre Channel remote port
726 *
727 * Many targets don't seem to support this.
728 *
729 * Locking Note: This function will be called without the rport lock
730 * held, but it will lock, call an _enter_* function or fc_rport_error
731 * and then unlock the rport.
732 */
733static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
734 void *rp_arg)
735{
736 struct fc_rport *rport = rp_arg;
737 struct fc_rport_libfc_priv *rdata = rport->dd_data;
738 u8 op;
739
740 mutex_lock(&rdata->rp_mutex);
741
742 FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
743 rport->port_id);
744
745 if (rdata->rp_state != RPORT_ST_RTV) {
746 FC_DBG("Received a RTV response, but in state %s\n",
747 fc_rport_state(rport));
748 goto out;
749 }
750
751 if (IS_ERR(fp)) {
752 fc_rport_error(rport, fp);
753 goto err;
754 }
755
756 op = fc_frame_payload_op(fp);
757 if (op == ELS_LS_ACC) {
758 struct fc_els_rtv_acc *rtv;
759 u32 toq;
760 u32 tov;
761
762 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
763 if (rtv) {
764 toq = ntohl(rtv->rtv_toq);
765 tov = ntohl(rtv->rtv_r_a_tov);
766 if (tov == 0)
767 tov = 1;
768 rdata->r_a_tov = tov;
769 tov = ntohl(rtv->rtv_e_d_tov);
770 if (toq & FC_ELS_RTV_EDRES)
771 tov /= 1000000;
772 if (tov == 0)
773 tov = 1;
774 rdata->e_d_tov = tov;
775 }
776 }
777
778 fc_rport_enter_ready(rport);
779
780out:
781 fc_frame_free(fp);
782err:
783 mutex_unlock(&rdata->rp_mutex);
784 put_device(&rport->dev);
785}
786
787/**
788 * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
789 * @rport: Fibre Channel remote port to send RTV to
790 *
791 * Locking Note: The rport lock is expected to be held before calling
792 * this routine.
793 */
794static void fc_rport_enter_rtv(struct fc_rport *rport)
795{
796 struct fc_frame *fp;
797 struct fc_rport_libfc_priv *rdata = rport->dd_data;
798 struct fc_lport *lport = rdata->local_port;
799
800 FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
801 rport->port_id, fc_rport_state(rport));
802
803 fc_rport_state_enter(rport, RPORT_ST_RTV);
804
805 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
806 if (!fp) {
807 fc_rport_error(rport, fp);
808 return;
809 }
810
811 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
812 fc_rport_rtv_resp, rport, lport->e_d_tov))
813 fc_rport_error(rport, fp);
814 else
815 get_device(&rport->dev);
816}
817
818/**
819 * fc_rport_enter_logo - Send Logout (LOGO) request to peer
820 * @rport: Fibre Channel remote port to send LOGO to
821 *
822 * Locking Note: The rport lock is expected to be held before calling
823 * this routine.
824 */
825static void fc_rport_enter_logo(struct fc_rport *rport)
826{
827 struct fc_rport_libfc_priv *rdata = rport->dd_data;
828 struct fc_lport *lport = rdata->local_port;
829 struct fc_frame *fp;
830
831 FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
832 rport->port_id, fc_rport_state(rport));
833
834 fc_rport_state_enter(rport, RPORT_ST_LOGO);
835
836 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
837 if (!fp) {
838 fc_rport_error(rport, fp);
839 return;
840 }
841
842 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
843 fc_rport_logo_resp, rport, lport->e_d_tov))
844 fc_rport_error(rport, fp);
845 else
846 get_device(&rport->dev);
847}
848
849
850/**
851 * fc_rport_recv_req - Receive a request from a rport
852 * @sp: current sequence in the PLOGI exchange
853 * @fp: response frame
854 * @rp_arg: Fibre Channel remote port
855 *
856 * Locking Note: Called without the rport lock held. This
857 * function will hold the rport lock, call an _enter_*
858 * function and then unlock the rport.
859 */
860void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
861 struct fc_rport *rport)
862{
863 struct fc_rport_libfc_priv *rdata = rport->dd_data;
864 struct fc_lport *lport = rdata->local_port;
865
866 struct fc_frame_header *fh;
867 struct fc_seq_els_data els_data;
868 u8 op;
869
870 mutex_lock(&rdata->rp_mutex);
871
872 els_data.fp = NULL;
873 els_data.explan = ELS_EXPL_NONE;
874 els_data.reason = ELS_RJT_NONE;
875
876 fh = fc_frame_header_get(fp);
877
878 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
879 op = fc_frame_payload_op(fp);
880 switch (op) {
881 case ELS_PLOGI:
882 fc_rport_recv_plogi_req(rport, sp, fp);
883 break;
884 case ELS_PRLI:
885 fc_rport_recv_prli_req(rport, sp, fp);
886 break;
887 case ELS_PRLO:
888 fc_rport_recv_prlo_req(rport, sp, fp);
889 break;
890 case ELS_LOGO:
891 fc_rport_recv_logo_req(rport, sp, fp);
892 break;
893 case ELS_RRQ:
894 els_data.fp = fp;
895 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
896 break;
897 case ELS_REC:
898 els_data.fp = fp;
899 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
900 break;
901 default:
902 els_data.reason = ELS_RJT_UNSUP;
903 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
904 break;
905 }
906 }
907
908 mutex_unlock(&rdata->rp_mutex);
909}
910
911/**
912 * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
913 * @rport: Fibre Channel remote port that initiated PLOGI
914 * @sp: current sequence in the PLOGI exchange
915 * @fp: PLOGI request frame
916 *
917 * Locking Note: The rport lock is exected to be held before calling
918 * this function.
919 */
920static void fc_rport_recv_plogi_req(struct fc_rport *rport,
921 struct fc_seq *sp, struct fc_frame *rx_fp)
922{
923 struct fc_rport_libfc_priv *rdata = rport->dd_data;
924 struct fc_lport *lport = rdata->local_port;
925 struct fc_frame *fp = rx_fp;
926 struct fc_exch *ep;
927 struct fc_frame_header *fh;
928 struct fc_els_flogi *pl;
929 struct fc_seq_els_data rjt_data;
930 u32 sid;
931 u64 wwpn;
932 u64 wwnn;
933 enum fc_els_rjt_reason reject = 0;
934 u32 f_ctl;
935 rjt_data.fp = NULL;
936
937 fh = fc_frame_header_get(fp);
938
939 FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
940 "while in state %s\n", ntoh24(fh->fh_s_id),
941 fc_rport_state(rport));
942
943 sid = ntoh24(fh->fh_s_id);
944 pl = fc_frame_payload_get(fp, sizeof(*pl));
945 if (!pl) {
946 FC_DBG("incoming PLOGI from %x too short\n", sid);
947 WARN_ON(1);
948 /* XXX TBD: send reject? */
949 fc_frame_free(fp);
950 return;
951 }
952 wwpn = get_unaligned_be64(&pl->fl_wwpn);
953 wwnn = get_unaligned_be64(&pl->fl_wwnn);
954
955 /*
956 * If the session was just created, possibly due to the incoming PLOGI,
957 * set the state appropriately and accept the PLOGI.
958 *
959 * If we had also sent a PLOGI, and if the received PLOGI is from a
960 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
961 * "command already in progress".
962 *
963 * XXX TBD: If the session was ready before, the PLOGI should result in
964 * all outstanding exchanges being reset.
965 */
966 switch (rdata->rp_state) {
967 case RPORT_ST_INIT:
968 FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
969 "- reject\n", sid, wwpn);
970 reject = ELS_RJT_UNSUP;
971 break;
972 case RPORT_ST_PLOGI:
973 FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
974 sid, rdata->rp_state);
975 if (wwpn < lport->wwpn)
976 reject = ELS_RJT_INPROG;
977 break;
978 case RPORT_ST_PRLI:
979 case RPORT_ST_READY:
980 FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
981 "- ignored for now\n", sid, rdata->rp_state);
982 /* XXX TBD - should reset */
983 break;
984 case RPORT_ST_NONE:
985 default:
986 FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
987 "state %d\n", sid, rdata->rp_state);
988 break;
989 }
990
991 if (reject) {
992 rjt_data.reason = reject;
993 rjt_data.explan = ELS_EXPL_NONE;
994 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
995 fc_frame_free(fp);
996 } else {
997 fp = fc_frame_alloc(lport, sizeof(*pl));
998 if (fp == NULL) {
999 fp = rx_fp;
1000 rjt_data.reason = ELS_RJT_UNAB;
1001 rjt_data.explan = ELS_EXPL_NONE;
1002 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1003 fc_frame_free(fp);
1004 } else {
1005 sp = lport->tt.seq_start_next(sp);
1006 WARN_ON(!sp);
1007 fc_rport_set_name(rport, wwpn, wwnn);
1008
1009 /*
1010 * Get session payload size from incoming PLOGI.
1011 */
1012 rport->maxframe_size =
1013 fc_plogi_get_maxframe(pl, lport->mfs);
1014 fc_frame_free(rx_fp);
1015 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1016
1017 /*
1018 * Send LS_ACC. If this fails,
1019 * the originator should retry.
1020 */
1021 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1022 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1023 ep = fc_seq_exch(sp);
1024 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1025 FC_TYPE_ELS, f_ctl, 0);
1026 lport->tt.seq_send(lport, sp, fp);
1027 if (rdata->rp_state == RPORT_ST_PLOGI)
1028 fc_rport_enter_prli(rport);
1029 }
1030 }
1031}
1032
1033/**
1034 * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
1035 * @rport: Fibre Channel remote port that initiated PRLI
1036 * @sp: current sequence in the PRLI exchange
1037 * @fp: PRLI request frame
1038 *
1039 * Locking Note: The rport lock is exected to be held before calling
1040 * this function.
1041 */
1042static void fc_rport_recv_prli_req(struct fc_rport *rport,
1043 struct fc_seq *sp, struct fc_frame *rx_fp)
1044{
1045 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1046 struct fc_lport *lport = rdata->local_port;
1047 struct fc_exch *ep;
1048 struct fc_frame *fp;
1049 struct fc_frame_header *fh;
1050 struct {
1051 struct fc_els_prli prli;
1052 struct fc_els_spp spp;
1053 } *pp;
1054 struct fc_els_spp *rspp; /* request service param page */
1055 struct fc_els_spp *spp; /* response spp */
1056 unsigned int len;
1057 unsigned int plen;
1058 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1059 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1060 enum fc_els_spp_resp resp;
1061 struct fc_seq_els_data rjt_data;
1062 u32 f_ctl;
1063 u32 fcp_parm;
1064 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1065 rjt_data.fp = NULL;
1066
1067 fh = fc_frame_header_get(rx_fp);
1068
1069 FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
1070 "while in state %s\n", ntoh24(fh->fh_s_id),
1071 fc_rport_state(rport));
1072
1073 switch (rdata->rp_state) {
1074 case RPORT_ST_PRLI:
1075 case RPORT_ST_READY:
1076 reason = ELS_RJT_NONE;
1077 break;
1078 default:
1079 break;
1080 }
1081 len = fr_len(rx_fp) - sizeof(*fh);
1082 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1083 if (pp == NULL) {
1084 reason = ELS_RJT_PROT;
1085 explan = ELS_EXPL_INV_LEN;
1086 } else {
1087 plen = ntohs(pp->prli.prli_len);
1088 if ((plen % 4) != 0 || plen > len) {
1089 reason = ELS_RJT_PROT;
1090 explan = ELS_EXPL_INV_LEN;
1091 } else if (plen < len) {
1092 len = plen;
1093 }
1094 plen = pp->prli.prli_spp_len;
1095 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1096 plen > len || len < sizeof(*pp)) {
1097 reason = ELS_RJT_PROT;
1098 explan = ELS_EXPL_INV_LEN;
1099 }
1100 rspp = &pp->spp;
1101 }
1102 if (reason != ELS_RJT_NONE ||
1103 (fp = fc_frame_alloc(lport, len)) == NULL) {
1104 rjt_data.reason = reason;
1105 rjt_data.explan = explan;
1106 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1107 } else {
1108 sp = lport->tt.seq_start_next(sp);
1109 WARN_ON(!sp);
1110 pp = fc_frame_payload_get(fp, len);
1111 WARN_ON(!pp);
1112 memset(pp, 0, len);
1113 pp->prli.prli_cmd = ELS_LS_ACC;
1114 pp->prli.prli_spp_len = plen;
1115 pp->prli.prli_len = htons(len);
1116 len -= sizeof(struct fc_els_prli);
1117
1118 /*
1119 * Go through all the service parameter pages and build
1120 * response. If plen indicates longer SPP than standard,
1121 * use that. The entire response has been pre-cleared above.
1122 */
1123 spp = &pp->spp;
1124 while (len >= plen) {
1125 spp->spp_type = rspp->spp_type;
1126 spp->spp_type_ext = rspp->spp_type_ext;
1127 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1128 resp = FC_SPP_RESP_ACK;
1129 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1130 resp = FC_SPP_RESP_NO_PA;
1131 switch (rspp->spp_type) {
1132 case 0: /* common to all FC-4 types */
1133 break;
1134 case FC_TYPE_FCP:
1135 fcp_parm = ntohl(rspp->spp_params);
1136 if (fcp_parm * FCP_SPPF_RETRY)
1137 rdata->flags |= FC_RP_FLAGS_RETRY;
1138 rport->supported_classes = FC_COS_CLASS3;
1139 if (fcp_parm & FCP_SPPF_INIT_FCN)
1140 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1141 if (fcp_parm & FCP_SPPF_TARG_FCN)
1142 roles |= FC_RPORT_ROLE_FCP_TARGET;
1143 rport->roles = roles;
1144
1145 spp->spp_params =
1146 htonl(lport->service_params);
1147 break;
1148 default:
1149 resp = FC_SPP_RESP_INVL;
1150 break;
1151 }
1152 spp->spp_flags |= resp;
1153 len -= plen;
1154 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1155 spp = (struct fc_els_spp *)((char *)spp + plen);
1156 }
1157
1158 /*
1159 * Send LS_ACC. If this fails, the originator should retry.
1160 */
1161 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1162 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1163 ep = fc_seq_exch(sp);
1164 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1165 FC_TYPE_ELS, f_ctl, 0);
1166 lport->tt.seq_send(lport, sp, fp);
1167
1168 /*
1169 * Get lock and re-check state.
1170 */
1171 switch (rdata->rp_state) {
1172 case RPORT_ST_PRLI:
1173 fc_rport_enter_ready(rport);
1174 break;
1175 case RPORT_ST_READY:
1176 break;
1177 default:
1178 break;
1179 }
1180 }
1181 fc_frame_free(rx_fp);
1182}
1183
1184/**
1185 * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
1186 * @rport: Fibre Channel remote port that initiated PRLO
1187 * @sp: current sequence in the PRLO exchange
1188 * @fp: PRLO request frame
1189 *
1190 * Locking Note: The rport lock is exected to be held before calling
1191 * this function.
1192 */
1193static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1194 struct fc_frame *fp)
1195{
1196 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1197 struct fc_lport *lport = rdata->local_port;
1198
1199 struct fc_frame_header *fh;
1200 struct fc_seq_els_data rjt_data;
1201
1202 fh = fc_frame_header_get(fp);
1203
1204 FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
1205 "while in state %s\n", ntoh24(fh->fh_s_id),
1206 fc_rport_state(rport));
1207
1208 rjt_data.fp = NULL;
1209 rjt_data.reason = ELS_RJT_UNAB;
1210 rjt_data.explan = ELS_EXPL_NONE;
1211 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1212 fc_frame_free(fp);
1213}
1214
1215/**
1216 * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
1217 * @rport: Fibre Channel remote port that initiated LOGO
1218 * @sp: current sequence in the LOGO exchange
1219 * @fp: LOGO request frame
1220 *
1221 * Locking Note: The rport lock is exected to be held before calling
1222 * this function.
1223 */
1224static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1225 struct fc_frame *fp)
1226{
1227 struct fc_frame_header *fh;
1228 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1229 struct fc_lport *lport = rdata->local_port;
1230
1231 fh = fc_frame_header_get(fp);
1232
1233 FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
1234 "while in state %s\n", ntoh24(fh->fh_s_id),
1235 fc_rport_state(rport));
1236
1237 rdata->event = RPORT_EV_LOGO;
1238 queue_work(rport_event_queue, &rdata->event_work);
1239
1240 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1241 fc_frame_free(fp);
1242}
1243
1244static void fc_rport_flush_queue(void)
1245{
1246 flush_workqueue(rport_event_queue);
1247}
1248
1249
1250int fc_rport_init(struct fc_lport *lport)
1251{
1252 if (!lport->tt.rport_login)
1253 lport->tt.rport_login = fc_rport_login;
1254
1255 if (!lport->tt.rport_logoff)
1256 lport->tt.rport_logoff = fc_rport_logoff;
1257
1258 if (!lport->tt.rport_recv_req)
1259 lport->tt.rport_recv_req = fc_rport_recv_req;
1260
1261 if (!lport->tt.rport_flush_queue)
1262 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(fc_rport_init);
1267
1268int fc_setup_rport()
1269{
1270 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1271 if (!rport_event_queue)
1272 return -ENOMEM;
1273 return 0;
1274}
1275EXPORT_SYMBOL(fc_setup_rport);
1276
1277void fc_destroy_rport()
1278{
1279 destroy_workqueue(rport_event_queue);
1280}
1281EXPORT_SYMBOL(fc_destroy_rport);
1282
1283void fc_rport_terminate_io(struct fc_rport *rport)
1284{
1285 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1286 struct fc_lport *lport = rdata->local_port;
1287
1288 lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
1289 lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
1290}
1291EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
new file mode 100644
index 000000000000..6300f556bce5
--- /dev/null
+++ b/include/scsi/fc_encode.h
@@ -0,0 +1,309 @@
1/*
2 * Copyright(c) 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef _FC_ENCODE_H_
21#define _FC_ENCODE_H_
22#include <asm/unaligned.h>
23
24struct fc_ns_rft {
25 struct fc_ns_fid fid; /* port ID object */
26 struct fc_ns_fts fts; /* FC4-types object */
27};
28
29struct fc_ct_req {
30 struct fc_ct_hdr hdr;
31 union {
32 struct fc_ns_gid_ft gid;
33 struct fc_ns_rn_id rn;
34 struct fc_ns_rft rft;
35 } payload;
36};
37
38/**
39 * fill FC header fields in specified fc_frame
40 */
41static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
42 u32 did, u32 sid, enum fc_fh_type type,
43 u32 f_ctl, u32 parm_offset)
44{
45 struct fc_frame_header *fh;
46
47 fh = fc_frame_header_get(fp);
48 WARN_ON(r_ctl == 0);
49 fh->fh_r_ctl = r_ctl;
50 hton24(fh->fh_d_id, did);
51 hton24(fh->fh_s_id, sid);
52 fh->fh_type = type;
53 hton24(fh->fh_f_ctl, f_ctl);
54 fh->fh_cs_ctl = 0;
55 fh->fh_df_ctl = 0;
56 fh->fh_parm_offset = htonl(parm_offset);
57}
58
59/**
60 * fc_ct_hdr_fill- fills ct header and reset ct payload
61 * returns pointer to ct request.
62 */
63static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
64 unsigned int op, size_t req_size)
65{
66 struct fc_ct_req *ct;
67 size_t ct_plen;
68
69 ct_plen = sizeof(struct fc_ct_hdr) + req_size;
70 ct = fc_frame_payload_get(fp, ct_plen);
71 memset(ct, 0, ct_plen);
72 ct->hdr.ct_rev = FC_CT_REV;
73 ct->hdr.ct_fs_type = FC_FST_DIR;
74 ct->hdr.ct_fs_subtype = FC_NS_SUBTYPE;
75 ct->hdr.ct_cmd = htons((u16) op);
76 return ct;
77}
78
79/**
80 * fc_ct_fill - Fill in a name service request frame
81 */
82static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp,
83 unsigned int op, enum fc_rctl *r_ctl, u32 *did,
84 enum fc_fh_type *fh_type)
85{
86 struct fc_ct_req *ct;
87
88 switch (op) {
89 case FC_NS_GPN_FT:
90 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft));
91 ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
92 break;
93
94 case FC_NS_RFT_ID:
95 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft));
96 hton24(ct->payload.rft.fid.fp_fid,
97 fc_host_port_id(lport->host));
98 ct->payload.rft.fts = lport->fcts;
99 break;
100
101 case FC_NS_RPN_ID:
102 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id));
103 hton24(ct->payload.rn.fr_fid.fp_fid,
104 fc_host_port_id(lport->host));
105 ct->payload.rft.fts = lport->fcts;
106 put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn);
107 break;
108
109 default:
110 FC_DBG("Invalid op code %x \n", op);
111 return -EINVAL;
112 }
113 *r_ctl = FC_RCTL_DD_UNSOL_CTL;
114 *did = FC_FID_DIR_SERV;
115 *fh_type = FC_TYPE_CT;
116 return 0;
117}
118
119/**
120 * fc_plogi_fill - Fill in plogi request frame
121 */
122static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
123 unsigned int op)
124{
125 struct fc_els_flogi *plogi;
126 struct fc_els_csp *csp;
127 struct fc_els_cssp *cp;
128
129 plogi = fc_frame_payload_get(fp, sizeof(*plogi));
130 memset(plogi, 0, sizeof(*plogi));
131 plogi->fl_cmd = (u8) op;
132 put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
133 put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
134
135 csp = &plogi->fl_csp;
136 csp->sp_hi_ver = 0x20;
137 csp->sp_lo_ver = 0x20;
138 csp->sp_bb_cred = htons(10); /* this gets set by gateway */
139 csp->sp_bb_data = htons((u16) lport->mfs);
140 cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
141 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
142 csp->sp_features = htons(FC_SP_FT_CIRO);
143 csp->sp_tot_seq = htons(255); /* seq. we accept */
144 csp->sp_rel_off = htons(0x1f);
145 csp->sp_e_d_tov = htonl(lport->e_d_tov);
146
147 cp->cp_rdfs = htons((u16) lport->mfs);
148 cp->cp_con_seq = htons(255);
149 cp->cp_open_seq = 1;
150}
151
152/**
153 * fc_flogi_fill - Fill in a flogi request frame.
154 */
155static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
156{
157 struct fc_els_csp *sp;
158 struct fc_els_cssp *cp;
159 struct fc_els_flogi *flogi;
160
161 flogi = fc_frame_payload_get(fp, sizeof(*flogi));
162 memset(flogi, 0, sizeof(*flogi));
163 flogi->fl_cmd = (u8) ELS_FLOGI;
164 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
165 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
166 sp = &flogi->fl_csp;
167 sp->sp_hi_ver = 0x20;
168 sp->sp_lo_ver = 0x20;
169 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
170 sp->sp_bb_data = htons((u16) lport->mfs);
171 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
172 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
173}
174
175/**
176 * fc_logo_fill - Fill in a logo request frame.
177 */
178static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
179{
180 struct fc_els_logo *logo;
181
182 logo = fc_frame_payload_get(fp, sizeof(*logo));
183 memset(logo, 0, sizeof(*logo));
184 logo->fl_cmd = ELS_LOGO;
185 hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
186 logo->fl_n_port_wwn = htonll(lport->wwpn);
187}
188
189/**
190 * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
191 */
192static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
193{
194 struct fc_els_rtv *rtv;
195
196 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
197 memset(rtv, 0, sizeof(*rtv));
198 rtv->rtv_cmd = ELS_RTV;
199}
200
201/**
202 * fc_rec_fill - Fill in rec request frame
203 */
204static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
205{
206 struct fc_els_rec *rec;
207 struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
208
209 rec = fc_frame_payload_get(fp, sizeof(*rec));
210 memset(rec, 0, sizeof(*rec));
211 rec->rec_cmd = ELS_REC;
212 hton24(rec->rec_s_id, fc_host_port_id(lport->host));
213 rec->rec_ox_id = htons(ep->oxid);
214 rec->rec_rx_id = htons(ep->rxid);
215}
216
217/**
218 * fc_prli_fill - Fill in prli request frame
219 */
220static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
221{
222 struct {
223 struct fc_els_prli prli;
224 struct fc_els_spp spp;
225 } *pp;
226
227 pp = fc_frame_payload_get(fp, sizeof(*pp));
228 memset(pp, 0, sizeof(*pp));
229 pp->prli.prli_cmd = ELS_PRLI;
230 pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
231 pp->prli.prli_len = htons(sizeof(*pp));
232 pp->spp.spp_type = FC_TYPE_FCP;
233 pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
234 pp->spp.spp_params = htonl(lport->service_params);
235}
236
237/**
238 * fc_scr_fill - Fill in a scr request frame.
239 */
240static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
241{
242 struct fc_els_scr *scr;
243
244 scr = fc_frame_payload_get(fp, sizeof(*scr));
245 memset(scr, 0, sizeof(*scr));
246 scr->scr_cmd = ELS_SCR;
247 scr->scr_reg_func = ELS_SCRF_FULL;
248}
249
250/**
251 * fc_els_fill - Fill in an ELS request frame
252 */
253static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport,
254 struct fc_frame *fp, unsigned int op,
255 enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type)
256{
257 switch (op) {
258 case ELS_PLOGI:
259 fc_plogi_fill(lport, fp, ELS_PLOGI);
260 *did = rport->port_id;
261 break;
262
263 case ELS_FLOGI:
264 fc_flogi_fill(lport, fp);
265 *did = FC_FID_FLOGI;
266 break;
267
268 case ELS_LOGO:
269 fc_logo_fill(lport, fp);
270 *did = FC_FID_FLOGI;
271 /*
272 * if rport is valid then it
273 * is port logo, therefore
274 * set did to rport id.
275 */
276 if (rport)
277 *did = rport->port_id;
278 break;
279
280 case ELS_RTV:
281 fc_rtv_fill(lport, fp);
282 *did = rport->port_id;
283 break;
284
285 case ELS_REC:
286 fc_rec_fill(lport, fp);
287 *did = rport->port_id;
288 break;
289
290 case ELS_PRLI:
291 fc_prli_fill(lport, fp);
292 *did = rport->port_id;
293 break;
294
295 case ELS_SCR:
296 fc_scr_fill(lport, fp);
297 *did = FC_FID_FCTRL;
298 break;
299
300 default:
301 FC_DBG("Invalid op code %x \n", op);
302 return -EINVAL;
303 }
304
305 *r_ctl = FC_RCTL_ELS_REQ;
306 *fh_type = FC_TYPE_ELS;
307 return 0;
308}
309#endif /* _FC_ENCODE_H_ */
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
new file mode 100644
index 000000000000..04d34a71355f
--- /dev/null
+++ b/include/scsi/fc_frame.h
@@ -0,0 +1,242 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef _FC_FRAME_H_
21#define _FC_FRAME_H_
22
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <scsi/scsi_cmnd.h>
26
27#include <scsi/fc/fc_fs.h>
28#include <scsi/fc/fc_fcp.h>
29#include <scsi/fc/fc_encaps.h>
30
31/*
32 * The fc_frame interface is used to pass frame data between functions.
33 * The frame includes the data buffer, length, and SOF / EOF delimiter types.
34 * A pointer to the port structure of the receiving port is also includeded.
35 */
36
37#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
38#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
39
40/*
41 * Information about an individual fibre channel frame received or to be sent.
42 * The buffer may be in up to 4 additional non-contiguous sections,
43 * but the linear section must hold the frame header.
44 */
45#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */
46
47#define fp_skb(fp) (&((fp)->skb))
48#define fr_hdr(fp) ((fp)->skb.data)
49#define fr_len(fp) ((fp)->skb.len)
50#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
51#define fr_dev(fp) (fr_cb(fp)->fr_dev)
52#define fr_seq(fp) (fr_cb(fp)->fr_seq)
53#define fr_sof(fp) (fr_cb(fp)->fr_sof)
54#define fr_eof(fp) (fr_cb(fp)->fr_eof)
55#define fr_flags(fp) (fr_cb(fp)->fr_flags)
56#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
57#define fr_cmd(fp) (fr_cb(fp)->fr_cmd)
58#define fr_dir(fp) (fr_cmd(fp)->sc_data_direction)
59#define fr_crc(fp) (fr_cb(fp)->fr_crc)
60
61struct fc_frame {
62 struct sk_buff skb;
63};
64
65struct fcoe_rcv_info {
66 struct packet_type *ptype;
67 struct fc_lport *fr_dev; /* transport layer private pointer */
68 struct fc_seq *fr_seq; /* for use with exchange manager */
69 struct scsi_cmnd *fr_cmd; /* for use of scsi command */
70 u32 fr_crc;
71 u16 fr_max_payload; /* max FC payload */
72 enum fc_sof fr_sof; /* start of frame delimiter */
73 enum fc_eof fr_eof; /* end of frame delimiter */
74 u8 fr_flags; /* flags - see below */
75};
76
77
78/*
79 * Get fc_frame pointer for an skb that's already been imported.
80 */
81static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
82{
83 BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
84 return (struct fcoe_rcv_info *) skb->cb;
85}
86
87/*
88 * fr_flags.
89 */
90#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */
91
92/*
93 * Initialize a frame.
94 * We don't do a complete memset here for performance reasons.
95 * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
96 */
97static inline void fc_frame_init(struct fc_frame *fp)
98{
99 fr_dev(fp) = NULL;
100 fr_seq(fp) = NULL;
101 fr_flags(fp) = 0;
102}
103
104struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
105
106struct fc_frame *__fc_frame_alloc(size_t payload_len);
107
108/*
109 * Get frame for sending via port.
110 */
111static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
112 size_t payload_len)
113{
114 return __fc_frame_alloc(payload_len);
115}
116
117/*
118 * Allocate fc_frame structure and buffer. Set the initial length to
119 * payload_size + sizeof (struct fc_frame_header).
120 */
121static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
122{
123 struct fc_frame *fp;
124
125 /*
126 * Note: Since len will often be a constant multiple of 4,
127 * this check will usually be evaluated and eliminated at compile time.
128 */
129 if ((len % 4) != 0)
130 fp = fc_frame_alloc_fill(dev, len);
131 else
132 fp = _fc_frame_alloc(dev, len);
133 return fp;
134}
135
136/*
137 * Free the fc_frame structure and buffer.
138 */
139static inline void fc_frame_free(struct fc_frame *fp)
140{
141 kfree_skb(fp_skb(fp));
142}
143
144static inline int fc_frame_is_linear(struct fc_frame *fp)
145{
146 return !skb_is_nonlinear(fp_skb(fp));
147}
148
149/*
150 * Get frame header from message in fc_frame structure.
151 * This hides a cast and provides a place to add some checking.
152 */
153static inline
154struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
155{
156 WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
157 return (struct fc_frame_header *) fr_hdr(fp);
158}
159
160/*
161 * Get frame payload from message in fc_frame structure.
162 * This hides a cast and provides a place to add some checking.
163 * The len parameter is the minimum length for the payload portion.
164 * Returns NULL if the frame is too short.
165 *
166 * This assumes the interesting part of the payload is in the first part
167 * of the buffer for received data. This may not be appropriate to use for
168 * buffers being transmitted.
169 */
170static inline void *fc_frame_payload_get(const struct fc_frame *fp,
171 size_t len)
172{
173 void *pp = NULL;
174
175 if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
176 pp = fc_frame_header_get(fp) + 1;
177 return pp;
178}
179
180/*
181 * Get frame payload opcode (first byte) from message in fc_frame structure.
182 * This hides a cast and provides a place to add some checking. Return 0
183 * if the frame has no payload.
184 */
185static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
186{
187 u8 *cp;
188
189 cp = fc_frame_payload_get(fp, sizeof(u8));
190 if (!cp)
191 return 0;
192 return *cp;
193
194}
195
196/*
197 * Get FC class from frame.
198 */
199static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
200{
201 return fc_sof_class(fr_sof(fp));
202}
203
204/*
205 * Check the CRC in a frame.
206 * The CRC immediately follows the last data item *AFTER* the length.
207 * The return value is zero if the CRC matches.
208 */
209u32 fc_frame_crc_check(struct fc_frame *);
210
211static inline u8 fc_frame_rctl(const struct fc_frame *fp)
212{
213 return fc_frame_header_get(fp)->fh_r_ctl;
214}
215
216static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
217{
218 return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD;
219}
220
221static inline bool fc_frame_is_read(const struct fc_frame *fp)
222{
223 if (fc_frame_is_cmd(fp) && fr_cmd(fp))
224 return fr_dir(fp) == DMA_FROM_DEVICE;
225 return false;
226}
227
228static inline bool fc_frame_is_write(const struct fc_frame *fp)
229{
230 if (fc_frame_is_cmd(fp) && fr_cmd(fp))
231 return fr_dir(fp) == DMA_TO_DEVICE;
232 return false;
233}
234
235/*
236 * Check for leaks.
237 * Print the frame header of any currently allocated frame, assuming there
238 * should be none at this point.
239 */
240void fc_frame_leak_check(void);
241
242#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
new file mode 100644
index 000000000000..9f2876397dda
--- /dev/null
+++ b/include/scsi/libfc.h
@@ -0,0 +1,938 @@
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef _LIBFC_H_
21#define _LIBFC_H_
22
23#include <linux/timer.h>
24#include <linux/if.h>
25
26#include <scsi/scsi_transport.h>
27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/fc/fc_fcp.h>
30#include <scsi/fc/fc_ns.h>
31#include <scsi/fc/fc_els.h>
32#include <scsi/fc/fc_gs.h>
33
34#include <scsi/fc_frame.h>
35
36#define LIBFC_DEBUG
37
38#ifdef LIBFC_DEBUG
39/* Log messages */
40#define FC_DBG(fmt, args...) \
41 do { \
42 printk(KERN_INFO "%s " fmt, __func__, ##args); \
43 } while (0)
44#else
45#define FC_DBG(fmt, args...)
46#endif
47
48/*
49 * libfc error codes
50 */
51#define FC_NO_ERR 0 /* no error */
52#define FC_EX_TIMEOUT 1 /* Exchange timeout */
53#define FC_EX_CLOSED 2 /* Exchange closed */
54
55/* some helpful macros */
56
57#define ntohll(x) be64_to_cpu(x)
58#define htonll(x) cpu_to_be64(x)
59
60#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
61
62#define hton24(p, v) do { \
63 p[0] = (((v) >> 16) & 0xFF); \
64 p[1] = (((v) >> 8) & 0xFF); \
65 p[2] = ((v) & 0xFF); \
66 } while (0)
67
68/*
69 * FC HBA status
70 */
71#define FC_PAUSE (1 << 1)
72#define FC_LINK_UP (1 << 0)
73
74enum fc_lport_state {
75 LPORT_ST_NONE = 0,
76 LPORT_ST_FLOGI,
77 LPORT_ST_DNS,
78 LPORT_ST_RPN_ID,
79 LPORT_ST_RFT_ID,
80 LPORT_ST_SCR,
81 LPORT_ST_READY,
82 LPORT_ST_LOGO,
83 LPORT_ST_RESET
84};
85
86enum fc_disc_event {
87 DISC_EV_NONE = 0,
88 DISC_EV_SUCCESS,
89 DISC_EV_FAILED
90};
91
92enum fc_rport_state {
93 RPORT_ST_NONE = 0,
94 RPORT_ST_INIT, /* initialized */
95 RPORT_ST_PLOGI, /* waiting for PLOGI completion */
96 RPORT_ST_PRLI, /* waiting for PRLI completion */
97 RPORT_ST_RTV, /* waiting for RTV completion */
98 RPORT_ST_READY, /* ready for use */
99 RPORT_ST_LOGO, /* port logout sent */
100};
101
102enum fc_rport_trans_state {
103 FC_PORTSTATE_ROGUE,
104 FC_PORTSTATE_REAL,
105};
106
107/**
108 * struct fc_disc_port - temporary discovery port to hold rport identifiers
109 * @lp: Fibre Channel host port instance
110 * @peers: node for list management during discovery and RSCN processing
111 * @ids: identifiers structure to pass to fc_remote_port_add()
112 * @rport_work: work struct for starting the rport state machine
113 */
114struct fc_disc_port {
115 struct fc_lport *lp;
116 struct list_head peers;
117 struct fc_rport_identifiers ids;
118 struct work_struct rport_work;
119};
120
121enum fc_rport_event {
122 RPORT_EV_NONE = 0,
123 RPORT_EV_CREATED,
124 RPORT_EV_FAILED,
125 RPORT_EV_STOP,
126 RPORT_EV_LOGO
127};
128
129struct fc_rport_operations {
130 void (*event_callback)(struct fc_lport *, struct fc_rport *,
131 enum fc_rport_event);
132};
133
134/**
135 * struct fc_rport_libfc_priv - libfc internal information about a remote port
136 * @local_port: Fibre Channel host port instance
137 * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
138 * @flags: REC and RETRY supported flags
139 * @max_seq: maximum number of concurrent sequences
140 * @retries: retry count in current state
141 * @e_d_tov: error detect timeout value (in msec)
142 * @r_a_tov: resource allocation timeout value (in msec)
143 * @rp_mutex: mutex protects rport
144 * @retry_work:
145 * @event_callback: Callback for rport READY, FAILED or LOGO
146 */
147struct fc_rport_libfc_priv {
148 struct fc_lport *local_port;
149 enum fc_rport_state rp_state;
150 u16 flags;
151 #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
152 #define FC_RP_FLAGS_RETRY (1 << 1)
153 u16 max_seq;
154 unsigned int retries;
155 unsigned int e_d_tov;
156 unsigned int r_a_tov;
157 enum fc_rport_trans_state trans_state;
158 struct mutex rp_mutex;
159 struct delayed_work retry_work;
160 enum fc_rport_event event;
161 struct fc_rport_operations *ops;
162 struct list_head peers;
163 struct work_struct event_work;
164};
165
166#define PRIV_TO_RPORT(x) \
167 (struct fc_rport *)((void *)x - sizeof(struct fc_rport));
168#define RPORT_TO_PRIV(x) \
169 (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
170
171struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
172
173static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
174{
175 rport->node_name = wwnn;
176 rport->port_name = wwpn;
177}
178
179/*
180 * fcoe stats structure
181 */
182struct fcoe_dev_stats {
183 u64 SecondsSinceLastReset;
184 u64 TxFrames;
185 u64 TxWords;
186 u64 RxFrames;
187 u64 RxWords;
188 u64 ErrorFrames;
189 u64 DumpedFrames;
190 u64 LinkFailureCount;
191 u64 LossOfSignalCount;
192 u64 InvalidTxWordCount;
193 u64 InvalidCRCCount;
194 u64 InputRequests;
195 u64 OutputRequests;
196 u64 ControlRequests;
197 u64 InputMegabytes;
198 u64 OutputMegabytes;
199};
200
201/*
202 * els data is used for passing ELS respone specific
203 * data to send ELS response mainly using infomation
204 * in exchange and sequence in EM layer.
205 */
206struct fc_seq_els_data {
207 struct fc_frame *fp;
208 enum fc_els_rjt_reason reason;
209 enum fc_els_rjt_explan explan;
210};
211
212/*
213 * FCP request structure, one for each scsi cmd request
214 */
215struct fc_fcp_pkt {
216 /*
217 * housekeeping stuff
218 */
219 struct fc_lport *lp; /* handle to hba struct */
220 u16 state; /* scsi_pkt state state */
221 u16 tgt_flags; /* target flags */
222 atomic_t ref_cnt; /* fcp pkt ref count */
223 spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
224 * if both are held at the same time */
225 /*
226 * SCSI I/O related stuff
227 */
228 struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
229 * under host lock */
230 struct list_head list; /* tracks queued commands. access under
231 * host lock */
232 /*
233 * timeout related stuff
234 */
235 struct timer_list timer; /* command timer */
236 struct completion tm_done;
237 int wait_for_comp;
238 unsigned long start_time; /* start jiffie */
239 unsigned long end_time; /* end jiffie */
240 unsigned long last_pkt_time; /* jiffies of last frame received */
241
242 /*
243 * scsi cmd and data transfer information
244 */
245 u32 data_len;
246 /*
247 * transport related veriables
248 */
249 struct fcp_cmnd cdb_cmd;
250 size_t xfer_len;
251 u32 xfer_contig_end; /* offset of end of contiguous xfer */
252 u16 max_payload; /* max payload size in bytes */
253
254 /*
255 * scsi/fcp return status
256 */
257 u32 io_status; /* SCSI result upper 24 bits */
258 u8 cdb_status;
259 u8 status_code; /* FCP I/O status */
260 /* bit 3 Underrun bit 2: overrun */
261 u8 scsi_comp_flags;
262 u32 req_flags; /* bit 0: read bit:1 write */
263 u32 scsi_resid; /* residule length */
264
265 struct fc_rport *rport; /* remote port pointer */
266 struct fc_seq *seq_ptr; /* current sequence pointer */
267 /*
268 * Error Processing
269 */
270 u8 recov_retry; /* count of recovery retries */
271 struct fc_seq *recov_seq; /* sequence for REC or SRR */
272};
273
274/*
275 * Structure and function definitions for managing Fibre Channel Exchanges
276 * and Sequences
277 *
278 * fc_exch holds state for one exchange and links to its active sequence.
279 *
280 * fc_seq holds the state for an individual sequence.
281 */
282
283struct fc_exch_mgr;
284
285/*
286 * Sequence.
287 */
288struct fc_seq {
289 u8 id; /* seq ID */
290 u16 ssb_stat; /* status flags for sequence status block */
291 u16 cnt; /* frames sent so far on sequence */
292 u32 rec_data; /* FC-4 value for REC */
293};
294
295#define FC_EX_DONE (1 << 0) /* ep is completed */
296#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
297
298/*
299 * Exchange.
300 *
301 * Locking notes: The ex_lock protects following items:
302 * state, esb_stat, f_ctl, seq.ssb_stat
303 * seq_id
304 * sequence allocation
305 */
306struct fc_exch {
307 struct fc_exch_mgr *em; /* exchange manager */
308 u32 state; /* internal driver state */
309 u16 xid; /* our exchange ID */
310 struct list_head ex_list; /* free or busy list linkage */
311 spinlock_t ex_lock; /* lock covering exchange state */
312 atomic_t ex_refcnt; /* reference counter */
313 struct delayed_work timeout_work; /* timer for upper level protocols */
314 struct fc_lport *lp; /* fc device instance */
315 u16 oxid; /* originator's exchange ID */
316 u16 rxid; /* responder's exchange ID */
317 u32 oid; /* originator's FCID */
318 u32 sid; /* source FCID */
319 u32 did; /* destination FCID */
320 u32 esb_stat; /* exchange status for ESB */
321 u32 r_a_tov; /* r_a_tov from rport (msec) */
322 u8 seq_id; /* next sequence ID to use */
323 u32 f_ctl; /* F_CTL flags for sequences */
324 u8 fh_type; /* frame type */
325 enum fc_class class; /* class of service */
326 struct fc_seq seq; /* single sequence */
327 /*
328 * Handler for responses to this current exchange.
329 */
330 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
331 void (*destructor)(struct fc_seq *, void *);
332 /*
333 * arg is passed as void pointer to exchange
334 * resp and destructor handlers
335 */
336 void *arg;
337};
338#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
339
340struct libfc_function_template {
341
342 /**
343 * Mandatory Fields
344 *
345 * These handlers must be implemented by the LLD.
346 */
347
348 /*
349 * Interface to send a FC frame
350 */
351 int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
352
353 /**
354 * Optional Fields
355 *
356 * The LLD may choose to implement any of the following handlers.
357 * If LLD doesn't specify hander and leaves its pointer NULL then
358 * the default libfc function will be used for that handler.
359 */
360
361 /**
362 * ELS/CT interfaces
363 */
364
365 /*
366 * elsct_send - sends ELS/CT frame
367 */
368 struct fc_seq *(*elsct_send)(struct fc_lport *lport,
369 struct fc_rport *rport,
370 struct fc_frame *fp,
371 unsigned int op,
372 void (*resp)(struct fc_seq *,
373 struct fc_frame *fp,
374 void *arg),
375 void *arg, u32 timer_msec);
376 /**
377 * Exhance Manager interfaces
378 */
379
380 /*
381 * Send the FC frame payload using a new exchange and sequence.
382 *
383 * The frame pointer with some of the header's fields must be
384 * filled before calling exch_seq_send(), those fields are,
385 *
386 * - routing control
387 * - FC port did
388 * - FC port sid
389 * - FC header type
390 * - frame control
391 * - parameter or relative offset
392 *
393 * The exchange response handler is set in this routine to resp()
394 * function pointer. It can be called in two scenarios: if a timeout
395 * occurs or if a response frame is received for the exchange. The
396 * fc_frame pointer in response handler will also indicate timeout
397 * as error using IS_ERR related macros.
398 *
399 * The exchange destructor handler is also set in this routine.
400 * The destructor handler is invoked by EM layer when exchange
401 * is about to free, this can be used by caller to free its
402 * resources along with exchange free.
403 *
404 * The arg is passed back to resp and destructor handler.
405 *
406 * The timeout value (in msec) for an exchange is set if non zero
407 * timer_msec argument is specified. The timer is canceled when
408 * it fires or when the exchange is done. The exchange timeout handler
409 * is registered by EM layer.
410 */
411 struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
412 struct fc_frame *fp,
413 void (*resp)(struct fc_seq *sp,
414 struct fc_frame *fp,
415 void *arg),
416 void (*destructor)(struct fc_seq *sp,
417 void *arg),
418 void *arg, unsigned int timer_msec);
419
420 /*
421 * send a frame using existing sequence and exchange.
422 */
423 int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
424 struct fc_frame *fp);
425
426 /*
427 * Send ELS response using mainly infomation
428 * in exchange and sequence in EM layer.
429 */
430 void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
431 struct fc_seq_els_data *els_data);
432
433 /*
434 * Abort an exchange and sequence. Generally called because of a
435 * exchange timeout or an abort from the upper layer.
436 *
437 * A timer_msec can be specified for abort timeout, if non-zero
438 * timer_msec value is specified then exchange resp handler
439 * will be called with timeout error if no response to abort.
440 */
441 int (*seq_exch_abort)(const struct fc_seq *req_sp,
442 unsigned int timer_msec);
443
444 /*
445 * Indicate that an exchange/sequence tuple is complete and the memory
446 * allocated for the related objects may be freed.
447 */
448 void (*exch_done)(struct fc_seq *sp);
449
450 /*
451 * Assigns a EM and a free XID for an new exchange and then
452 * allocates a new exchange and sequence pair.
453 * The fp can be used to determine free XID.
454 */
455 struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
456
457 /*
458 * Release previously assigned XID by exch_get API.
459 * The LLD may implement this if XID is assigned by LLD
460 * in exch_get().
461 */
462 void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
463 u16 ex_id);
464
465 /*
466 * Start a new sequence on the same exchange/sequence tuple.
467 */
468 struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
469
470 /*
471 * Reset an exchange manager, completing all sequences and exchanges.
472 * If s_id is non-zero, reset only exchanges originating from that FID.
473 * If d_id is non-zero, reset only exchanges sending to that FID.
474 */
475 void (*exch_mgr_reset)(struct fc_exch_mgr *,
476 u32 s_id, u32 d_id);
477
478 void (*rport_flush_queue)(void);
479 /**
480 * Local Port interfaces
481 */
482
483 /*
484 * Receive a frame to a local port.
485 */
486 void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
487 struct fc_frame *fp);
488
489 int (*lport_reset)(struct fc_lport *);
490
491 /**
492 * Remote Port interfaces
493 */
494
495 /*
496 * Initiates the RP state machine. It is called from the LP module.
497 * This function will issue the following commands to the N_Port
498 * identified by the FC ID provided.
499 *
500 * - PLOGI
501 * - PRLI
502 * - RTV
503 */
504 int (*rport_login)(struct fc_rport *rport);
505
506 /*
507 * Logoff, and remove the rport from the transport if
508 * it had been added. This will send a LOGO to the target.
509 */
510 int (*rport_logoff)(struct fc_rport *rport);
511
512 /*
513 * Recieve a request from a remote port.
514 */
515 void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
516 struct fc_rport *);
517
518 struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
519
520 /**
521 * FCP interfaces
522 */
523
524 /*
525 * Send a fcp cmd from fsp pkt.
526 * Called with the SCSI host lock unlocked and irqs disabled.
527 *
528 * The resp handler is called when FCP_RSP received.
529 *
530 */
531 int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
532 void (*resp)(struct fc_seq *, struct fc_frame *fp,
533 void *arg));
534
535 /*
536 * Used at least durring linkdown and reset
537 */
538 void (*fcp_cleanup)(struct fc_lport *lp);
539
540 /*
541 * Abort all I/O on a local port
542 */
543 void (*fcp_abort_io)(struct fc_lport *lp);
544
545 /**
546 * Discovery interfaces
547 */
548
549 void (*disc_recv_req)(struct fc_seq *,
550 struct fc_frame *, struct fc_lport *);
551
552 /*
553 * Start discovery for a local port.
554 */
555 void (*disc_start)(void (*disc_callback)(struct fc_lport *,
556 enum fc_disc_event),
557 struct fc_lport *);
558
559 /*
560 * Stop discovery for a given lport. This will remove
561 * all discovered rports
562 */
563 void (*disc_stop) (struct fc_lport *);
564
565 /*
566 * Stop discovery for a given lport. This will block
567 * until all discovered rports are deleted from the
568 * FC transport class
569 */
570 void (*disc_stop_final) (struct fc_lport *);
571};
572
573/* information used by the discovery layer */
574struct fc_disc {
575 unsigned char retry_count;
576 unsigned char delay;
577 unsigned char pending;
578 unsigned char requested;
579 unsigned short seq_count;
580 unsigned char buf_len;
581 enum fc_disc_event event;
582
583 void (*disc_callback)(struct fc_lport *,
584 enum fc_disc_event);
585
586 struct list_head rports;
587 struct fc_lport *lport;
588 struct mutex disc_mutex;
589 struct fc_gpn_ft_resp partial_buf; /* partial name buffer */
590 struct delayed_work disc_work;
591};
592
593struct fc_lport {
594 struct list_head list;
595
596 /* Associations */
597 struct Scsi_Host *host;
598 struct fc_exch_mgr *emp;
599 struct fc_rport *dns_rp;
600 struct fc_rport *ptp_rp;
601 void *scsi_priv;
602 struct fc_disc disc;
603
604 /* Operational Information */
605 struct libfc_function_template tt;
606 u16 link_status;
607 enum fc_lport_state state;
608 unsigned long boot_time;
609
610 struct fc_host_statistics host_stats;
611 struct fcoe_dev_stats *dev_stats[NR_CPUS];
612 u64 wwpn;
613 u64 wwnn;
614 u8 retry_count;
615
616 /* Capabilities */
617 u32 sg_supp:1; /* scatter gather supported */
618 u32 seq_offload:1; /* seq offload supported */
619 u32 crc_offload:1; /* crc offload supported */
620 u32 lro_enabled:1; /* large receive offload */
621 u32 mfs; /* max FC payload size */
622 unsigned int service_params;
623 unsigned int e_d_tov;
624 unsigned int r_a_tov;
625 u8 max_retry_count;
626 u16 link_speed;
627 u16 link_supported_speeds;
628 u16 lro_xid; /* max xid for fcoe lro */
629 struct fc_ns_fts fcts; /* FC-4 type masks */
630 struct fc_els_rnid_gen rnid_gen; /* RNID information */
631
632 /* Semaphores */
633 struct mutex lp_mutex;
634
635 /* Miscellaneous */
636 struct delayed_work retry_work;
637 struct delayed_work disc_work;
638};
639
640/**
641 * FC_LPORT HELPER FUNCTIONS
642 *****************************/
643static inline void *lport_priv(const struct fc_lport *lp)
644{
645 return (void *)(lp + 1);
646}
647
648static inline int fc_lport_test_ready(struct fc_lport *lp)
649{
650 return lp->state == LPORT_ST_READY;
651}
652
653static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
654{
655 lp->wwnn = wwnn;
656}
657
658static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
659{
660 lp->wwpn = wwnn;
661}
662
663static inline void fc_lport_state_enter(struct fc_lport *lp,
664 enum fc_lport_state state)
665{
666 if (state != lp->state)
667 lp->retry_count = 0;
668 lp->state = state;
669}
670
671
672/**
673 * LOCAL PORT LAYER
674 *****************************/
675int fc_lport_init(struct fc_lport *lp);
676
677/*
678 * Destroy the specified local port by finding and freeing all
679 * fc_rports associated with it and then by freeing the fc_lport
680 * itself.
681 */
682int fc_lport_destroy(struct fc_lport *lp);
683
684/*
685 * Logout the specified local port from the fabric
686 */
687int fc_fabric_logoff(struct fc_lport *lp);
688
689/*
690 * Initiate the LP state machine. This handler will use fc_host_attr
691 * to store the FLOGI service parameters, so fc_host_attr must be
692 * initialized before calling this handler.
693 */
694int fc_fabric_login(struct fc_lport *lp);
695
696/*
697 * The link is up for the given local port.
698 */
699void fc_linkup(struct fc_lport *);
700
701/*
702 * Link is down for the given local port.
703 */
704void fc_linkdown(struct fc_lport *);
705
706/*
707 * Pause and unpause traffic.
708 */
709void fc_pause(struct fc_lport *);
710void fc_unpause(struct fc_lport *);
711
712/*
713 * Configure the local port.
714 */
715int fc_lport_config(struct fc_lport *);
716
717/*
718 * Reset the local port.
719 */
720int fc_lport_reset(struct fc_lport *);
721
722/*
723 * Set the mfs or reset
724 */
725int fc_set_mfs(struct fc_lport *lp, u32 mfs);
726
727
728/**
729 * REMOTE PORT LAYER
730 *****************************/
731int fc_rport_init(struct fc_lport *lp);
732void fc_rport_terminate_io(struct fc_rport *rp);
733
734/**
735 * DISCOVERY LAYER
736 *****************************/
737int fc_disc_init(struct fc_lport *lp);
738
739
740/**
741 * SCSI LAYER
742 *****************************/
743/*
744 * Initialize the SCSI block of libfc
745 */
746int fc_fcp_init(struct fc_lport *);
747
748/*
749 * This section provides an API which allows direct interaction
750 * with the SCSI-ml. Each of these functions satisfies a function
751 * pointer defined in Scsi_Host and therefore is always called
752 * directly from the SCSI-ml.
753 */
754int fc_queuecommand(struct scsi_cmnd *sc_cmd,
755 void (*done)(struct scsi_cmnd *));
756
757/*
758 * complete processing of a fcp packet
759 *
760 * This function may sleep if a fsp timer is pending.
761 * The host lock must not be held by caller.
762 */
763void fc_fcp_complete(struct fc_fcp_pkt *fsp);
764
765/*
766 * Send an ABTS frame to the target device. The sc_cmd argument
767 * is a pointer to the SCSI command to be aborted.
768 */
769int fc_eh_abort(struct scsi_cmnd *sc_cmd);
770
771/*
772 * Reset a LUN by sending send the tm cmd to the target.
773 */
774int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
775
776/*
777 * Reset the host adapter.
778 */
779int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
780
781/*
782 * Check rport status.
783 */
784int fc_slave_alloc(struct scsi_device *sdev);
785
786/*
787 * Adjust the queue depth.
788 */
789int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
790
791/*
792 * Change the tag type.
793 */
794int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
795
796/*
797 * Free memory pools used by the FCP layer.
798 */
799void fc_fcp_destroy(struct fc_lport *);
800
801/**
802 * ELS/CT interface
803 *****************************/
804/*
805 * Initializes ELS/CT interface
806 */
807int fc_elsct_init(struct fc_lport *lp);
808
809
810/**
811 * EXCHANGE MANAGER LAYER
812 *****************************/
813/*
814 * Initializes Exchange Manager related
815 * function pointers in struct libfc_function_template.
816 */
817int fc_exch_init(struct fc_lport *lp);
818
819/*
820 * Allocates an Exchange Manager (EM).
821 *
822 * The EM manages exchanges for their allocation and
823 * free, also allows exchange lookup for received
824 * frame.
825 *
826 * The class is used for initializing FC class of
827 * allocated exchange from EM.
828 *
829 * The min_xid and max_xid will limit new
830 * exchange ID (XID) within this range for
831 * a new exchange.
832 * The LLD may choose to have multiple EMs,
833 * e.g. one EM instance per CPU receive thread in LLD.
834 * The LLD can use exch_get() of struct libfc_function_template
835 * to specify XID for a new exchange within
836 * a specified EM instance.
837 *
838 * The em_idx to uniquely identify an EM instance.
839 */
840struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
841 enum fc_class class,
842 u16 min_xid,
843 u16 max_xid);
844
845/*
846 * Free an exchange manager.
847 */
848void fc_exch_mgr_free(struct fc_exch_mgr *mp);
849
850/*
851 * Receive a frame on specified local port and exchange manager.
852 */
853void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
854 struct fc_frame *fp);
855
856/*
857 * This function is for exch_seq_send function pointer in
858 * struct libfc_function_template, see comment block on
859 * exch_seq_send for description of this function.
860 */
861struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
862 struct fc_frame *fp,
863 void (*resp)(struct fc_seq *sp,
864 struct fc_frame *fp,
865 void *arg),
866 void (*destructor)(struct fc_seq *sp,
867 void *arg),
868 void *arg, u32 timer_msec);
869
870/*
871 * send a frame using existing sequence and exchange.
872 */
873int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp);
874
875/*
876 * Send ELS response using mainly infomation
877 * in exchange and sequence in EM layer.
878 */
879void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
880 struct fc_seq_els_data *els_data);
881
882/*
883 * This function is for seq_exch_abort function pointer in
884 * struct libfc_function_template, see comment block on
885 * seq_exch_abort for description of this function.
886 */
887int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec);
888
889/*
890 * Indicate that an exchange/sequence tuple is complete and the memory
891 * allocated for the related objects may be freed.
892 */
893void fc_exch_done(struct fc_seq *sp);
894
895/*
896 * Assigns a EM and XID for a frame and then allocates
897 * a new exchange and sequence pair.
898 * The fp can be used to determine free XID.
899 */
900struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
901
902/*
903 * Allocate a new exchange and sequence pair.
904 * if ex_id is zero then next free exchange id
905 * from specified exchange manger mp will be assigned.
906 */
907struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
908 struct fc_frame *fp, u16 ex_id);
909/*
910 * Start a new sequence on the same exchange as the supplied sequence.
911 */
912struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
913
914/*
915 * Reset an exchange manager, completing all sequences and exchanges.
916 * If s_id is non-zero, reset only exchanges originating from that FID.
917 * If d_id is non-zero, reset only exchanges sending to that FID.
918 */
919void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
920
921/*
922 * Functions for fc_functions_template
923 */
924void fc_get_host_speed(struct Scsi_Host *shost);
925void fc_get_host_port_type(struct Scsi_Host *shost);
926void fc_get_host_port_state(struct Scsi_Host *shost);
927void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
928struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
929
930/*
931 * module setup functions.
932 */
933int fc_setup_exch_mgr(void);
934void fc_destroy_exch_mgr(void);
935int fc_setup_rport(void);
936void fc_destroy_rport(void);
937
938#endif /* _LIBFC_H_ */