diff options
author | Joe Eykholt <jeykholt@cisco.com> | 2009-08-25 17:00:50 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-09-10 13:07:41 -0400 |
commit | 9fb9d32831fd687e427ec5b147bb690f468b99a0 (patch) | |
tree | c3b6c29cb94040718ea2fe00daac05abf10db714 /drivers/scsi/libfc | |
parent | 922aa210bcad4b34a7bb98ec9d318b7e59e7a5ca (diff) |
[SCSI] libfc: make fc_rport_priv the primary rport interface.
The rport and discovery modules deal with remote ports
before fc_remote_port_add() can be done, because the
full set of rport identifiers is not known at early stages.
In preparation for splitting the fc_rport/fc_rport_priv allocation,
make fc_rport_priv the primary interface for the remote port and
discovery engines.
The FCP / SCSI layers still deal with fc_rport and
fc_rport_libfc_priv, however.
Signed-off-by: Joe Eykholt <jeykholt@cisco.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/libfc')
-rw-r--r-- | drivers/scsi/libfc/fc_disc.c | 95 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_elsct.c | 4 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 2 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_lport.c | 26 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_rport.c | 364 |
5 files changed, 226 insertions, 265 deletions
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index ecc625c20520..448ffc388656 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -49,7 +49,6 @@ static void fc_disc_gpn_ft_req(struct fc_disc *); | |||
49 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); | 49 | static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); |
50 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, | 50 | static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, |
51 | struct fc_rport_identifiers *); | 51 | struct fc_rport_identifiers *); |
52 | static void fc_disc_del_target(struct fc_disc *, struct fc_rport *); | ||
53 | static void fc_disc_done(struct fc_disc *); | 52 | static void fc_disc_done(struct fc_disc *); |
54 | static void fc_disc_timeout(struct work_struct *); | 53 | static void fc_disc_timeout(struct work_struct *); |
55 | static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); | 54 | static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); |
@@ -60,27 +59,19 @@ static void fc_disc_restart(struct fc_disc *); | |||
60 | * @lport: Fibre Channel host port instance | 59 | * @lport: Fibre Channel host port instance |
61 | * @port_id: remote port port_id to match | 60 | * @port_id: remote port port_id to match |
62 | */ | 61 | */ |
63 | struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, | 62 | struct fc_rport_priv *fc_disc_lookup_rport(const struct fc_lport *lport, |
64 | u32 port_id) | 63 | u32 port_id) |
65 | { | 64 | { |
66 | const struct fc_disc *disc = &lport->disc; | 65 | const struct fc_disc *disc = &lport->disc; |
67 | struct fc_rport *rport, *found = NULL; | 66 | struct fc_rport *rport; |
68 | struct fc_rport_priv *rdata; | 67 | struct fc_rport_priv *rdata; |
69 | int disc_found = 0; | ||
70 | 68 | ||
71 | list_for_each_entry(rdata, &disc->rports, peers) { | 69 | list_for_each_entry(rdata, &disc->rports, peers) { |
72 | rport = PRIV_TO_RPORT(rdata); | 70 | rport = PRIV_TO_RPORT(rdata); |
73 | if (rport->port_id == port_id) { | 71 | if (rport->port_id == port_id) |
74 | disc_found = 1; | 72 | return rdata; |
75 | found = rport; | ||
76 | break; | ||
77 | } | ||
78 | } | 73 | } |
79 | 74 | return NULL; | |
80 | if (!disc_found) | ||
81 | found = NULL; | ||
82 | |||
83 | return found; | ||
84 | } | 75 | } |
85 | 76 | ||
86 | /** | 77 | /** |
@@ -93,21 +84,18 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, | |||
93 | void fc_disc_stop_rports(struct fc_disc *disc) | 84 | void fc_disc_stop_rports(struct fc_disc *disc) |
94 | { | 85 | { |
95 | struct fc_lport *lport; | 86 | struct fc_lport *lport; |
96 | struct fc_rport *rport; | ||
97 | struct fc_rport_priv *rdata, *next; | 87 | struct fc_rport_priv *rdata, *next; |
98 | 88 | ||
99 | lport = disc->lport; | 89 | lport = disc->lport; |
100 | 90 | ||
101 | mutex_lock(&disc->disc_mutex); | 91 | mutex_lock(&disc->disc_mutex); |
102 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { | 92 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { |
103 | rport = PRIV_TO_RPORT(rdata); | ||
104 | list_del(&rdata->peers); | 93 | list_del(&rdata->peers); |
105 | lport->tt.rport_logoff(rport); | 94 | lport->tt.rport_logoff(rdata); |
106 | } | 95 | } |
107 | 96 | ||
108 | list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) { | 97 | list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) { |
109 | rport = PRIV_TO_RPORT(rdata); | 98 | lport->tt.rport_logoff(rdata); |
110 | lport->tt.rport_logoff(rport); | ||
111 | } | 99 | } |
112 | 100 | ||
113 | mutex_unlock(&disc->disc_mutex); | 101 | mutex_unlock(&disc->disc_mutex); |
@@ -116,18 +104,18 @@ void fc_disc_stop_rports(struct fc_disc *disc) | |||
116 | /** | 104 | /** |
117 | * fc_disc_rport_callback() - Event handler for rport events | 105 | * fc_disc_rport_callback() - Event handler for rport events |
118 | * @lport: The lport which is receiving the event | 106 | * @lport: The lport which is receiving the event |
119 | * @rport: The rport which the event has occured on | 107 | * @rdata: private remote port data |
120 | * @event: The event that occured | 108 | * @event: The event that occured |
121 | * | 109 | * |
122 | * Locking Note: The rport lock should not be held when calling | 110 | * Locking Note: The rport lock should not be held when calling |
123 | * this function. | 111 | * this function. |
124 | */ | 112 | */ |
125 | static void fc_disc_rport_callback(struct fc_lport *lport, | 113 | static void fc_disc_rport_callback(struct fc_lport *lport, |
126 | struct fc_rport *rport, | 114 | struct fc_rport_priv *rdata, |
127 | enum fc_rport_event event) | 115 | enum fc_rport_event event) |
128 | { | 116 | { |
129 | struct fc_rport_priv *rdata = rport->dd_data; | ||
130 | struct fc_disc *disc = &lport->disc; | 117 | struct fc_disc *disc = &lport->disc; |
118 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
131 | 119 | ||
132 | FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, | 120 | FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event, |
133 | rport->port_id); | 121 | rport->port_id); |
@@ -169,7 +157,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
169 | struct fc_disc *disc) | 157 | struct fc_disc *disc) |
170 | { | 158 | { |
171 | struct fc_lport *lport; | 159 | struct fc_lport *lport; |
172 | struct fc_rport *rport; | ||
173 | struct fc_rport_priv *rdata; | 160 | struct fc_rport_priv *rdata; |
174 | struct fc_els_rscn *rp; | 161 | struct fc_els_rscn *rp; |
175 | struct fc_els_rscn_page *pp; | 162 | struct fc_els_rscn_page *pp; |
@@ -249,11 +236,10 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
249 | redisc, lport->state, disc->pending); | 236 | redisc, lport->state, disc->pending); |
250 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { | 237 | list_for_each_entry_safe(dp, next, &disc_ports, peers) { |
251 | list_del(&dp->peers); | 238 | list_del(&dp->peers); |
252 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); | 239 | rdata = lport->tt.rport_lookup(lport, dp->ids.port_id); |
253 | if (rport) { | 240 | if (rdata) { |
254 | rdata = rport->dd_data; | ||
255 | list_del(&rdata->peers); | 241 | list_del(&rdata->peers); |
256 | lport->tt.rport_logoff(rport); | 242 | lport->tt.rport_logoff(rdata); |
257 | } | 243 | } |
258 | fc_disc_single(disc, dp); | 244 | fc_disc_single(disc, dp); |
259 | } | 245 | } |
@@ -308,16 +294,14 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
308 | */ | 294 | */ |
309 | static void fc_disc_restart(struct fc_disc *disc) | 295 | static void fc_disc_restart(struct fc_disc *disc) |
310 | { | 296 | { |
311 | struct fc_rport *rport; | ||
312 | struct fc_rport_priv *rdata, *next; | 297 | struct fc_rport_priv *rdata, *next; |
313 | struct fc_lport *lport = disc->lport; | 298 | struct fc_lport *lport = disc->lport; |
314 | 299 | ||
315 | FC_DISC_DBG(disc, "Restarting discovery\n"); | 300 | FC_DISC_DBG(disc, "Restarting discovery\n"); |
316 | 301 | ||
317 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { | 302 | list_for_each_entry_safe(rdata, next, &disc->rports, peers) { |
318 | rport = PRIV_TO_RPORT(rdata); | ||
319 | list_del(&rdata->peers); | 303 | list_del(&rdata->peers); |
320 | lport->tt.rport_logoff(rport); | 304 | lport->tt.rport_logoff(rdata); |
321 | } | 305 | } |
322 | 306 | ||
323 | disc->requested = 1; | 307 | disc->requested = 1; |
@@ -335,6 +319,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, | |||
335 | enum fc_disc_event), | 319 | enum fc_disc_event), |
336 | struct fc_lport *lport) | 320 | struct fc_lport *lport) |
337 | { | 321 | { |
322 | struct fc_rport_priv *rdata; | ||
338 | struct fc_rport *rport; | 323 | struct fc_rport *rport; |
339 | struct fc_rport_identifiers ids; | 324 | struct fc_rport_identifiers ids; |
340 | struct fc_disc *disc = &lport->disc; | 325 | struct fc_disc *disc = &lport->disc; |
@@ -362,8 +347,9 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, | |||
362 | * Handle point-to-point mode as a simple discovery | 347 | * Handle point-to-point mode as a simple discovery |
363 | * of the remote port. Yucky, yucky, yuck, yuck! | 348 | * of the remote port. Yucky, yucky, yuck, yuck! |
364 | */ | 349 | */ |
365 | rport = disc->lport->ptp_rp; | 350 | rdata = disc->lport->ptp_rp; |
366 | if (rport) { | 351 | if (rdata) { |
352 | rport = PRIV_TO_RPORT(rdata); | ||
367 | ids.port_id = rport->port_id; | 353 | ids.port_id = rport->port_id; |
368 | ids.port_name = rport->port_name; | 354 | ids.port_name = rport->port_name; |
369 | ids.node_name = rport->node_name; | 355 | ids.node_name = rport->node_name; |
@@ -418,7 +404,9 @@ static int fc_disc_new_target(struct fc_disc *disc, | |||
418 | * assigned the same FCID. This should be rare. | 404 | * assigned the same FCID. This should be rare. |
419 | * Delete the old one and fall thru to re-create. | 405 | * Delete the old one and fall thru to re-create. |
420 | */ | 406 | */ |
421 | fc_disc_del_target(disc, rport); | 407 | rdata = rport->dd_data; |
408 | list_del(&rdata->peers); | ||
409 | lport->tt.rport_logoff(rdata); | ||
422 | rport = NULL; | 410 | rport = NULL; |
423 | } | 411 | } |
424 | } | 412 | } |
@@ -426,38 +414,27 @@ static int fc_disc_new_target(struct fc_disc *disc, | |||
426 | ids->port_id != fc_host_port_id(lport->host) && | 414 | ids->port_id != fc_host_port_id(lport->host) && |
427 | ids->port_name != lport->wwpn) { | 415 | ids->port_name != lport->wwpn) { |
428 | if (!rport) { | 416 | if (!rport) { |
429 | rport = lport->tt.rport_lookup(lport, ids->port_id); | 417 | rdata = lport->tt.rport_lookup(lport, ids->port_id); |
430 | if (!rport) { | 418 | if (!rport) { |
431 | rport = lport->tt.rport_create(lport, ids); | 419 | rdata = lport->tt.rport_create(lport, ids); |
432 | } | 420 | } |
433 | if (!rport) | 421 | if (!rdata) |
434 | error = -ENOMEM; | 422 | error = -ENOMEM; |
423 | else | ||
424 | rport = PRIV_TO_RPORT(rdata); | ||
435 | } | 425 | } |
436 | if (rport) { | 426 | if (rport) { |
437 | rdata = rport->dd_data; | 427 | rdata = rport->dd_data; |
438 | rdata->ops = &fc_disc_rport_ops; | 428 | rdata->ops = &fc_disc_rport_ops; |
439 | rdata->rp_state = RPORT_ST_INIT; | 429 | rdata->rp_state = RPORT_ST_INIT; |
440 | list_add_tail(&rdata->peers, &disc->rogue_rports); | 430 | list_add_tail(&rdata->peers, &disc->rogue_rports); |
441 | lport->tt.rport_login(rport); | 431 | lport->tt.rport_login(rdata); |
442 | } | 432 | } |
443 | } | 433 | } |
444 | return error; | 434 | return error; |
445 | } | 435 | } |
446 | 436 | ||
447 | /** | 437 | /** |
448 | * fc_disc_del_target() - Delete a target | ||
449 | * @disc: FC discovery context | ||
450 | * @rport: The remote port to be removed | ||
451 | */ | ||
452 | static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) | ||
453 | { | ||
454 | struct fc_lport *lport = disc->lport; | ||
455 | struct fc_rport_priv *rdata = rport->dd_data; | ||
456 | list_del(&rdata->peers); | ||
457 | lport->tt.rport_logoff(rport); | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * fc_disc_done() - Discovery has been completed | 438 | * fc_disc_done() - Discovery has been completed |
462 | * @disc: FC discovery context | 439 | * @disc: FC discovery context |
463 | * Locking Note: This function expects that the disc mutex is locked before | 440 | * Locking Note: This function expects that the disc mutex is locked before |
@@ -573,7 +550,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
573 | size_t tlen; | 550 | size_t tlen; |
574 | int error = 0; | 551 | int error = 0; |
575 | struct fc_rport_identifiers ids; | 552 | struct fc_rport_identifiers ids; |
576 | struct fc_rport *rport; | ||
577 | struct fc_rport_priv *rdata; | 553 | struct fc_rport_priv *rdata; |
578 | 554 | ||
579 | lport = disc->lport; | 555 | lport = disc->lport; |
@@ -622,14 +598,13 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
622 | 598 | ||
623 | if (ids.port_id != fc_host_port_id(lport->host) && | 599 | if (ids.port_id != fc_host_port_id(lport->host) && |
624 | ids.port_name != lport->wwpn) { | 600 | ids.port_name != lport->wwpn) { |
625 | rport = lport->tt.rport_create(lport, &ids); | 601 | rdata = lport->tt.rport_create(lport, &ids); |
626 | if (rport) { | 602 | if (rdata) { |
627 | rdata = rport->dd_data; | ||
628 | rdata->ops = &fc_disc_rport_ops; | 603 | rdata->ops = &fc_disc_rport_ops; |
629 | rdata->local_port = lport; | 604 | rdata->local_port = lport; |
630 | list_add_tail(&rdata->peers, | 605 | list_add_tail(&rdata->peers, |
631 | &disc->rogue_rports); | 606 | &disc->rogue_rports); |
632 | lport->tt.rport_login(rport); | 607 | lport->tt.rport_login(rdata); |
633 | } else | 608 | } else |
634 | printk(KERN_WARNING "libfc: Failed to allocate " | 609 | printk(KERN_WARNING "libfc: Failed to allocate " |
635 | "memory for the newly discovered port " | 610 | "memory for the newly discovered port " |
@@ -766,7 +741,6 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
766 | static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) | 741 | static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) |
767 | { | 742 | { |
768 | struct fc_lport *lport; | 743 | struct fc_lport *lport; |
769 | struct fc_rport *new_rport; | ||
770 | struct fc_rport_priv *rdata; | 744 | struct fc_rport_priv *rdata; |
771 | 745 | ||
772 | lport = disc->lport; | 746 | lport = disc->lport; |
@@ -774,13 +748,12 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) | |||
774 | if (dp->ids.port_id == fc_host_port_id(lport->host)) | 748 | if (dp->ids.port_id == fc_host_port_id(lport->host)) |
775 | goto out; | 749 | goto out; |
776 | 750 | ||
777 | new_rport = lport->tt.rport_create(lport, &dp->ids); | 751 | rdata = lport->tt.rport_create(lport, &dp->ids); |
778 | if (new_rport) { | 752 | if (rdata) { |
779 | rdata = new_rport->dd_data; | ||
780 | rdata->ops = &fc_disc_rport_ops; | 753 | rdata->ops = &fc_disc_rport_ops; |
781 | kfree(dp); | 754 | kfree(dp); |
782 | list_add_tail(&rdata->peers, &disc->rogue_rports); | 755 | list_add_tail(&rdata->peers, &disc->rogue_rports); |
783 | lport->tt.rport_login(new_rport); | 756 | lport->tt.rport_login(rdata); |
784 | } | 757 | } |
785 | return; | 758 | return; |
786 | out: | 759 | out: |
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index 5878b34bff18..2b8a3bbc0399 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * fc_elsct_send - sends ELS/CT frame | 32 | * fc_elsct_send - sends ELS/CT frame |
33 | */ | 33 | */ |
34 | static struct fc_seq *fc_elsct_send(struct fc_lport *lport, | 34 | static struct fc_seq *fc_elsct_send(struct fc_lport *lport, |
35 | struct fc_rport *rport, | 35 | struct fc_rport_priv *rdata, |
36 | struct fc_frame *fp, | 36 | struct fc_frame *fp, |
37 | unsigned int op, | 37 | unsigned int op, |
38 | void (*resp)(struct fc_seq *, | 38 | void (*resp)(struct fc_seq *, |
@@ -47,7 +47,7 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, | |||
47 | 47 | ||
48 | /* ELS requests */ | 48 | /* ELS requests */ |
49 | if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) | 49 | if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) |
50 | rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type); | 50 | rc = fc_els_fill(lport, rdata, fp, op, &r_ctl, &did, &fh_type); |
51 | else | 51 | else |
52 | /* CT requests */ | 52 | /* CT requests */ |
53 | rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); | 53 | rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 7d5ffcbbf39b..a622096eb315 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1308,7 +1308,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | |||
1308 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, | 1308 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, |
1309 | fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, | 1309 | fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, |
1310 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | 1310 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); |
1311 | if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, | 1311 | if (lp->tt.elsct_send(lp, rport->dd_data, fp, ELS_REC, fc_fcp_rec_resp, |
1312 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { | 1312 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { |
1313 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ | 1313 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ |
1314 | return; | 1314 | return; |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index a78161cf1811..3c15abd35ffa 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -133,16 +133,18 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) | |||
133 | /** | 133 | /** |
134 | * fc_lport_rport_callback() - Event handler for rport events | 134 | * fc_lport_rport_callback() - Event handler for rport events |
135 | * @lport: The lport which is receiving the event | 135 | * @lport: The lport which is receiving the event |
136 | * @rport: The rport which the event has occured on | 136 | * @rdata: private remote port data |
137 | * @event: The event that occured | 137 | * @event: The event that occured |
138 | * | 138 | * |
139 | * Locking Note: The rport lock should not be held when calling | 139 | * Locking Note: The rport lock should not be held when calling |
140 | * this function. | 140 | * this function. |
141 | */ | 141 | */ |
142 | static void fc_lport_rport_callback(struct fc_lport *lport, | 142 | static void fc_lport_rport_callback(struct fc_lport *lport, |
143 | struct fc_rport *rport, | 143 | struct fc_rport_priv *rdata, |
144 | enum fc_rport_event event) | 144 | enum fc_rport_event event) |
145 | { | 145 | { |
146 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
147 | |||
146 | FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, | 148 | FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, |
147 | rport->port_id); | 149 | rport->port_id); |
148 | 150 | ||
@@ -151,7 +153,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
151 | if (rport->port_id == FC_FID_DIR_SERV) { | 153 | if (rport->port_id == FC_FID_DIR_SERV) { |
152 | mutex_lock(&lport->lp_mutex); | 154 | mutex_lock(&lport->lp_mutex); |
153 | if (lport->state == LPORT_ST_DNS) { | 155 | if (lport->state == LPORT_ST_DNS) { |
154 | lport->dns_rp = rport; | 156 | lport->dns_rp = rdata; |
155 | fc_lport_enter_rpn_id(lport); | 157 | fc_lport_enter_rpn_id(lport); |
156 | } else { | 158 | } else { |
157 | FC_LPORT_DBG(lport, "Received an CREATED event " | 159 | FC_LPORT_DBG(lport, "Received an CREATED event " |
@@ -160,7 +162,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
160 | "in the DNS state, it's in the " | 162 | "in the DNS state, it's in the " |
161 | "%d state", rport->port_id, | 163 | "%d state", rport->port_id, |
162 | lport->state); | 164 | lport->state); |
163 | lport->tt.rport_logoff(rport); | 165 | lport->tt.rport_logoff(rdata); |
164 | } | 166 | } |
165 | mutex_unlock(&lport->lp_mutex); | 167 | mutex_unlock(&lport->lp_mutex); |
166 | } else | 168 | } else |
@@ -832,7 +834,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
832 | { | 834 | { |
833 | struct fc_frame_header *fh = fc_frame_header_get(fp); | 835 | struct fc_frame_header *fh = fc_frame_header_get(fp); |
834 | void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); | 836 | void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); |
835 | struct fc_rport *rport; | 837 | struct fc_rport_priv *rdata; |
836 | u32 s_id; | 838 | u32 s_id; |
837 | u32 d_id; | 839 | u32 d_id; |
838 | struct fc_seq_els_data rjt_data; | 840 | struct fc_seq_els_data rjt_data; |
@@ -888,9 +890,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
888 | s_id = ntoh24(fh->fh_s_id); | 890 | s_id = ntoh24(fh->fh_s_id); |
889 | d_id = ntoh24(fh->fh_d_id); | 891 | d_id = ntoh24(fh->fh_d_id); |
890 | 892 | ||
891 | rport = lport->tt.rport_lookup(lport, s_id); | 893 | rdata = lport->tt.rport_lookup(lport, s_id); |
892 | if (rport) | 894 | if (rdata) |
893 | lport->tt.rport_recv_req(sp, fp, rport); | 895 | lport->tt.rport_recv_req(sp, fp, rdata); |
894 | else { | 896 | else { |
895 | rjt_data.fp = NULL; | 897 | rjt_data.fp = NULL; |
896 | rjt_data.reason = ELS_RJT_UNAB; | 898 | rjt_data.reason = ELS_RJT_UNAB; |
@@ -1304,7 +1306,6 @@ static struct fc_rport_operations fc_lport_rport_ops = { | |||
1304 | */ | 1306 | */ |
1305 | static void fc_lport_enter_dns(struct fc_lport *lport) | 1307 | static void fc_lport_enter_dns(struct fc_lport *lport) |
1306 | { | 1308 | { |
1307 | struct fc_rport *rport; | ||
1308 | struct fc_rport_priv *rdata; | 1309 | struct fc_rport_priv *rdata; |
1309 | struct fc_rport_identifiers ids; | 1310 | struct fc_rport_identifiers ids; |
1310 | 1311 | ||
@@ -1318,13 +1319,12 @@ static void fc_lport_enter_dns(struct fc_lport *lport) | |||
1318 | 1319 | ||
1319 | fc_lport_state_enter(lport, LPORT_ST_DNS); | 1320 | fc_lport_state_enter(lport, LPORT_ST_DNS); |
1320 | 1321 | ||
1321 | rport = lport->tt.rport_create(lport, &ids); | 1322 | rdata = lport->tt.rport_create(lport, &ids); |
1322 | if (!rport) | 1323 | if (!rdata) |
1323 | goto err; | 1324 | goto err; |
1324 | 1325 | ||
1325 | rdata = rport->dd_data; | ||
1326 | rdata->ops = &fc_lport_rport_ops; | 1326 | rdata->ops = &fc_lport_rport_ops; |
1327 | lport->tt.rport_login(rport); | 1327 | lport->tt.rport_login(rdata); |
1328 | return; | 1328 | return; |
1329 | 1329 | ||
1330 | err: | 1330 | err: |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 2fbc94aaf343..13d3d758fb0e 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -57,23 +57,23 @@ | |||
57 | 57 | ||
58 | struct workqueue_struct *rport_event_queue; | 58 | struct workqueue_struct *rport_event_queue; |
59 | 59 | ||
60 | static void fc_rport_enter_plogi(struct fc_rport *); | 60 | static void fc_rport_enter_plogi(struct fc_rport_priv *); |
61 | static void fc_rport_enter_prli(struct fc_rport *); | 61 | static void fc_rport_enter_prli(struct fc_rport_priv *); |
62 | static void fc_rport_enter_rtv(struct fc_rport *); | 62 | static void fc_rport_enter_rtv(struct fc_rport_priv *); |
63 | static void fc_rport_enter_ready(struct fc_rport *); | 63 | static void fc_rport_enter_ready(struct fc_rport_priv *); |
64 | static void fc_rport_enter_logo(struct fc_rport *); | 64 | static void fc_rport_enter_logo(struct fc_rport_priv *); |
65 | 65 | ||
66 | static void fc_rport_recv_plogi_req(struct fc_rport *, | 66 | static void fc_rport_recv_plogi_req(struct fc_rport_priv *, |
67 | struct fc_seq *, struct fc_frame *); | 67 | struct fc_seq *, struct fc_frame *); |
68 | static void fc_rport_recv_prli_req(struct fc_rport *, | 68 | static void fc_rport_recv_prli_req(struct fc_rport_priv *, |
69 | struct fc_seq *, struct fc_frame *); | 69 | struct fc_seq *, struct fc_frame *); |
70 | static void fc_rport_recv_prlo_req(struct fc_rport *, | 70 | static void fc_rport_recv_prlo_req(struct fc_rport_priv *, |
71 | struct fc_seq *, struct fc_frame *); | 71 | struct fc_seq *, struct fc_frame *); |
72 | static void fc_rport_recv_logo_req(struct fc_rport *, | 72 | static void fc_rport_recv_logo_req(struct fc_rport_priv *, |
73 | struct fc_seq *, struct fc_frame *); | 73 | struct fc_seq *, struct fc_frame *); |
74 | static void fc_rport_timeout(struct work_struct *); | 74 | static void fc_rport_timeout(struct work_struct *); |
75 | static void fc_rport_error(struct fc_rport *, struct fc_frame *); | 75 | static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); |
76 | static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); | 76 | static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); |
77 | static void fc_rport_work(struct work_struct *); | 77 | static void fc_rport_work(struct work_struct *); |
78 | 78 | ||
79 | static const char *fc_rport_state_names[] = { | 79 | static const char *fc_rport_state_names[] = { |
@@ -89,12 +89,14 @@ static const char *fc_rport_state_names[] = { | |||
89 | static void fc_rport_rogue_destroy(struct device *dev) | 89 | static void fc_rport_rogue_destroy(struct device *dev) |
90 | { | 90 | { |
91 | struct fc_rport *rport = dev_to_rport(dev); | 91 | struct fc_rport *rport = dev_to_rport(dev); |
92 | FC_RPORT_DBG(rport, "Destroying rogue rport\n"); | 92 | struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport); |
93 | |||
94 | FC_RPORT_DBG(rdata, "Destroying rogue rport\n"); | ||
93 | kfree(rport); | 95 | kfree(rport); |
94 | } | 96 | } |
95 | 97 | ||
96 | struct fc_rport *fc_rport_rogue_create(struct fc_lport *lport, | 98 | struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport, |
97 | struct fc_rport_identifiers *ids) | 99 | struct fc_rport_identifiers *ids) |
98 | { | 100 | { |
99 | struct fc_rport *rport; | 101 | struct fc_rport *rport; |
100 | struct fc_rport_priv *rdata; | 102 | struct fc_rport_priv *rdata; |
@@ -135,17 +137,16 @@ struct fc_rport *fc_rport_rogue_create(struct fc_lport *lport, | |||
135 | */ | 137 | */ |
136 | INIT_LIST_HEAD(&rdata->peers); | 138 | INIT_LIST_HEAD(&rdata->peers); |
137 | 139 | ||
138 | return rport; | 140 | return rdata; |
139 | } | 141 | } |
140 | 142 | ||
141 | /** | 143 | /** |
142 | * fc_rport_state() - return a string for the state the rport is in | 144 | * fc_rport_state() - return a string for the state the rport is in |
143 | * @rport: The rport whose state we want to get a string for | 145 | * @rdata: remote port private data |
144 | */ | 146 | */ |
145 | static const char *fc_rport_state(struct fc_rport *rport) | 147 | static const char *fc_rport_state(struct fc_rport_priv *rdata) |
146 | { | 148 | { |
147 | const char *cp; | 149 | const char *cp; |
148 | struct fc_rport_priv *rdata = rport->dd_data; | ||
149 | 150 | ||
150 | cp = fc_rport_state_names[rdata->rp_state]; | 151 | cp = fc_rport_state_names[rdata->rp_state]; |
151 | if (!cp) | 152 | if (!cp) |
@@ -192,15 +193,14 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, | |||
192 | 193 | ||
193 | /** | 194 | /** |
194 | * fc_rport_state_enter() - Change the rport's state | 195 | * fc_rport_state_enter() - Change the rport's state |
195 | * @rport: The rport whose state should change | 196 | * @rdata: The rport whose state should change |
196 | * @new: The new state of the rport | 197 | * @new: The new state of the rport |
197 | * | 198 | * |
198 | * Locking Note: Called with the rport lock held | 199 | * Locking Note: Called with the rport lock held |
199 | */ | 200 | */ |
200 | static void fc_rport_state_enter(struct fc_rport *rport, | 201 | static void fc_rport_state_enter(struct fc_rport_priv *rdata, |
201 | enum fc_rport_state new) | 202 | enum fc_rport_state new) |
202 | { | 203 | { |
203 | struct fc_rport_priv *rdata = rport->dd_data; | ||
204 | if (rdata->rp_state != new) | 204 | if (rdata->rp_state != new) |
205 | rdata->retries = 0; | 205 | rdata->retries = 0; |
206 | rdata->rp_state = new; | 206 | rdata->rp_state = new; |
@@ -255,7 +255,7 @@ static void fc_rport_work(struct work_struct *work) | |||
255 | INIT_LIST_HEAD(&new_rdata->peers); | 255 | INIT_LIST_HEAD(&new_rdata->peers); |
256 | INIT_WORK(&new_rdata->event_work, fc_rport_work); | 256 | INIT_WORK(&new_rdata->event_work, fc_rport_work); |
257 | 257 | ||
258 | fc_rport_state_enter(new_rport, RPORT_ST_READY); | 258 | fc_rport_state_enter(new_rdata, RPORT_ST_READY); |
259 | } else { | 259 | } else { |
260 | printk(KERN_WARNING "libfc: Failed to allocate " | 260 | printk(KERN_WARNING "libfc: Failed to allocate " |
261 | " memory for rport (%6x)\n", ids.port_id); | 261 | " memory for rport (%6x)\n", ids.port_id); |
@@ -263,20 +263,20 @@ static void fc_rport_work(struct work_struct *work) | |||
263 | } | 263 | } |
264 | if (rport->port_id != FC_FID_DIR_SERV) | 264 | if (rport->port_id != FC_FID_DIR_SERV) |
265 | if (rport_ops->event_callback) | 265 | if (rport_ops->event_callback) |
266 | rport_ops->event_callback(lport, rport, | 266 | rport_ops->event_callback(lport, rdata, |
267 | RPORT_EV_FAILED); | 267 | RPORT_EV_FAILED); |
268 | put_device(&rport->dev); | 268 | put_device(&rport->dev); |
269 | rport = new_rport; | 269 | rport = new_rport; |
270 | rdata = new_rport->dd_data; | 270 | rdata = new_rport->dd_data; |
271 | if (rport_ops->event_callback) | 271 | if (rport_ops->event_callback) |
272 | rport_ops->event_callback(lport, rport, event); | 272 | rport_ops->event_callback(lport, rdata, event); |
273 | } else if ((event == RPORT_EV_FAILED) || | 273 | } else if ((event == RPORT_EV_FAILED) || |
274 | (event == RPORT_EV_LOGO) || | 274 | (event == RPORT_EV_LOGO) || |
275 | (event == RPORT_EV_STOP)) { | 275 | (event == RPORT_EV_STOP)) { |
276 | trans_state = rdata->trans_state; | 276 | trans_state = rdata->trans_state; |
277 | mutex_unlock(&rdata->rp_mutex); | 277 | mutex_unlock(&rdata->rp_mutex); |
278 | if (rport_ops->event_callback) | 278 | if (rport_ops->event_callback) |
279 | rport_ops->event_callback(lport, rport, event); | 279 | rport_ops->event_callback(lport, rdata, event); |
280 | cancel_delayed_work_sync(&rdata->retry_work); | 280 | cancel_delayed_work_sync(&rdata->retry_work); |
281 | if (trans_state == FC_PORTSTATE_ROGUE) | 281 | if (trans_state == FC_PORTSTATE_ROGUE) |
282 | put_device(&rport->dev); | 282 | put_device(&rport->dev); |
@@ -292,21 +292,19 @@ static void fc_rport_work(struct work_struct *work) | |||
292 | 292 | ||
293 | /** | 293 | /** |
294 | * fc_rport_login() - Start the remote port login state machine | 294 | * fc_rport_login() - Start the remote port login state machine |
295 | * @rport: Fibre Channel remote port | 295 | * @rdata: private remote port |
296 | * | 296 | * |
297 | * Locking Note: Called without the rport lock held. This | 297 | * Locking Note: Called without the rport lock held. This |
298 | * function will hold the rport lock, call an _enter_* | 298 | * function will hold the rport lock, call an _enter_* |
299 | * function and then unlock the rport. | 299 | * function and then unlock the rport. |
300 | */ | 300 | */ |
301 | int fc_rport_login(struct fc_rport *rport) | 301 | int fc_rport_login(struct fc_rport_priv *rdata) |
302 | { | 302 | { |
303 | struct fc_rport_priv *rdata = rport->dd_data; | ||
304 | |||
305 | mutex_lock(&rdata->rp_mutex); | 303 | mutex_lock(&rdata->rp_mutex); |
306 | 304 | ||
307 | FC_RPORT_DBG(rport, "Login to port\n"); | 305 | FC_RPORT_DBG(rdata, "Login to port\n"); |
308 | 306 | ||
309 | fc_rport_enter_plogi(rport); | 307 | fc_rport_enter_plogi(rdata); |
310 | 308 | ||
311 | mutex_unlock(&rdata->rp_mutex); | 309 | mutex_unlock(&rdata->rp_mutex); |
312 | 310 | ||
@@ -315,7 +313,7 @@ int fc_rport_login(struct fc_rport *rport) | |||
315 | 313 | ||
316 | /** | 314 | /** |
317 | * fc_rport_enter_delete() - schedule a remote port to be deleted. | 315 | * fc_rport_enter_delete() - schedule a remote port to be deleted. |
318 | * @rport: Fibre Channel remote port | 316 | * @rdata: private remote port |
319 | * @event: event to report as the reason for deletion | 317 | * @event: event to report as the reason for deletion |
320 | * | 318 | * |
321 | * Locking Note: Called with the rport lock held. | 319 | * Locking Note: Called with the rport lock held. |
@@ -327,17 +325,15 @@ int fc_rport_login(struct fc_rport *rport) | |||
327 | * Since we have the mutex, even if fc_rport_work() is already started, | 325 | * Since we have the mutex, even if fc_rport_work() is already started, |
328 | * it'll see the new event. | 326 | * it'll see the new event. |
329 | */ | 327 | */ |
330 | static void fc_rport_enter_delete(struct fc_rport *rport, | 328 | static void fc_rport_enter_delete(struct fc_rport_priv *rdata, |
331 | enum fc_rport_event event) | 329 | enum fc_rport_event event) |
332 | { | 330 | { |
333 | struct fc_rport_priv *rdata = rport->dd_data; | ||
334 | |||
335 | if (rdata->rp_state == RPORT_ST_DELETE) | 331 | if (rdata->rp_state == RPORT_ST_DELETE) |
336 | return; | 332 | return; |
337 | 333 | ||
338 | FC_RPORT_DBG(rport, "Delete port\n"); | 334 | FC_RPORT_DBG(rdata, "Delete port\n"); |
339 | 335 | ||
340 | fc_rport_state_enter(rport, RPORT_ST_DELETE); | 336 | fc_rport_state_enter(rdata, RPORT_ST_DELETE); |
341 | 337 | ||
342 | if (rdata->event == RPORT_EV_NONE) | 338 | if (rdata->event == RPORT_EV_NONE) |
343 | queue_work(rport_event_queue, &rdata->event_work); | 339 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -346,33 +342,31 @@ static void fc_rport_enter_delete(struct fc_rport *rport, | |||
346 | 342 | ||
347 | /** | 343 | /** |
348 | * fc_rport_logoff() - Logoff and remove an rport | 344 | * fc_rport_logoff() - Logoff and remove an rport |
349 | * @rport: Fibre Channel remote port to be removed | 345 | * @rdata: private remote port |
350 | * | 346 | * |
351 | * Locking Note: Called without the rport lock held. This | 347 | * Locking Note: Called without the rport lock held. This |
352 | * function will hold the rport lock, call an _enter_* | 348 | * function will hold the rport lock, call an _enter_* |
353 | * function and then unlock the rport. | 349 | * function and then unlock the rport. |
354 | */ | 350 | */ |
355 | int fc_rport_logoff(struct fc_rport *rport) | 351 | int fc_rport_logoff(struct fc_rport_priv *rdata) |
356 | { | 352 | { |
357 | struct fc_rport_priv *rdata = rport->dd_data; | ||
358 | |||
359 | mutex_lock(&rdata->rp_mutex); | 353 | mutex_lock(&rdata->rp_mutex); |
360 | 354 | ||
361 | FC_RPORT_DBG(rport, "Remove port\n"); | 355 | FC_RPORT_DBG(rdata, "Remove port\n"); |
362 | 356 | ||
363 | if (rdata->rp_state == RPORT_ST_DELETE) { | 357 | if (rdata->rp_state == RPORT_ST_DELETE) { |
364 | FC_RPORT_DBG(rport, "Port in Delete state, not removing\n"); | 358 | FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); |
365 | mutex_unlock(&rdata->rp_mutex); | 359 | mutex_unlock(&rdata->rp_mutex); |
366 | goto out; | 360 | goto out; |
367 | } | 361 | } |
368 | 362 | ||
369 | fc_rport_enter_logo(rport); | 363 | fc_rport_enter_logo(rdata); |
370 | 364 | ||
371 | /* | 365 | /* |
372 | * Change the state to Delete so that we discard | 366 | * Change the state to Delete so that we discard |
373 | * the response. | 367 | * the response. |
374 | */ | 368 | */ |
375 | fc_rport_enter_delete(rport, RPORT_EV_STOP); | 369 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); |
376 | mutex_unlock(&rdata->rp_mutex); | 370 | mutex_unlock(&rdata->rp_mutex); |
377 | 371 | ||
378 | out: | 372 | out: |
@@ -381,18 +375,16 @@ out: | |||
381 | 375 | ||
382 | /** | 376 | /** |
383 | * fc_rport_enter_ready() - The rport is ready | 377 | * fc_rport_enter_ready() - The rport is ready |
384 | * @rport: Fibre Channel remote port that is ready | 378 | * @rdata: private remote port |
385 | * | 379 | * |
386 | * Locking Note: The rport lock is expected to be held before calling | 380 | * Locking Note: The rport lock is expected to be held before calling |
387 | * this routine. | 381 | * this routine. |
388 | */ | 382 | */ |
389 | static void fc_rport_enter_ready(struct fc_rport *rport) | 383 | static void fc_rport_enter_ready(struct fc_rport_priv *rdata) |
390 | { | 384 | { |
391 | struct fc_rport_priv *rdata = rport->dd_data; | 385 | fc_rport_state_enter(rdata, RPORT_ST_READY); |
392 | |||
393 | fc_rport_state_enter(rport, RPORT_ST_READY); | ||
394 | 386 | ||
395 | FC_RPORT_DBG(rport, "Port is Ready\n"); | 387 | FC_RPORT_DBG(rdata, "Port is Ready\n"); |
396 | 388 | ||
397 | if (rdata->event == RPORT_EV_NONE) | 389 | if (rdata->event == RPORT_EV_NONE) |
398 | queue_work(rport_event_queue, &rdata->event_work); | 390 | queue_work(rport_event_queue, &rdata->event_work); |
@@ -411,22 +403,21 @@ static void fc_rport_timeout(struct work_struct *work) | |||
411 | { | 403 | { |
412 | struct fc_rport_priv *rdata = | 404 | struct fc_rport_priv *rdata = |
413 | container_of(work, struct fc_rport_priv, retry_work.work); | 405 | container_of(work, struct fc_rport_priv, retry_work.work); |
414 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
415 | 406 | ||
416 | mutex_lock(&rdata->rp_mutex); | 407 | mutex_lock(&rdata->rp_mutex); |
417 | 408 | ||
418 | switch (rdata->rp_state) { | 409 | switch (rdata->rp_state) { |
419 | case RPORT_ST_PLOGI: | 410 | case RPORT_ST_PLOGI: |
420 | fc_rport_enter_plogi(rport); | 411 | fc_rport_enter_plogi(rdata); |
421 | break; | 412 | break; |
422 | case RPORT_ST_PRLI: | 413 | case RPORT_ST_PRLI: |
423 | fc_rport_enter_prli(rport); | 414 | fc_rport_enter_prli(rdata); |
424 | break; | 415 | break; |
425 | case RPORT_ST_RTV: | 416 | case RPORT_ST_RTV: |
426 | fc_rport_enter_rtv(rport); | 417 | fc_rport_enter_rtv(rdata); |
427 | break; | 418 | break; |
428 | case RPORT_ST_LOGO: | 419 | case RPORT_ST_LOGO: |
429 | fc_rport_enter_logo(rport); | 420 | fc_rport_enter_logo(rdata); |
430 | break; | 421 | break; |
431 | case RPORT_ST_READY: | 422 | case RPORT_ST_READY: |
432 | case RPORT_ST_INIT: | 423 | case RPORT_ST_INIT: |
@@ -439,27 +430,25 @@ static void fc_rport_timeout(struct work_struct *work) | |||
439 | 430 | ||
440 | /** | 431 | /** |
441 | * fc_rport_error() - Error handler, called once retries have been exhausted | 432 | * fc_rport_error() - Error handler, called once retries have been exhausted |
442 | * @rport: The fc_rport object | 433 | * @rdata: private remote port |
443 | * @fp: The frame pointer | 434 | * @fp: The frame pointer |
444 | * | 435 | * |
445 | * Locking Note: The rport lock is expected to be held before | 436 | * Locking Note: The rport lock is expected to be held before |
446 | * calling this routine | 437 | * calling this routine |
447 | */ | 438 | */ |
448 | static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | 439 | static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) |
449 | { | 440 | { |
450 | struct fc_rport_priv *rdata = rport->dd_data; | 441 | FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", |
451 | 442 | PTR_ERR(fp), fc_rport_state(rdata), rdata->retries); | |
452 | FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", | ||
453 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); | ||
454 | 443 | ||
455 | switch (rdata->rp_state) { | 444 | switch (rdata->rp_state) { |
456 | case RPORT_ST_PLOGI: | 445 | case RPORT_ST_PLOGI: |
457 | case RPORT_ST_PRLI: | 446 | case RPORT_ST_PRLI: |
458 | case RPORT_ST_LOGO: | 447 | case RPORT_ST_LOGO: |
459 | fc_rport_enter_delete(rport, RPORT_EV_FAILED); | 448 | fc_rport_enter_delete(rdata, RPORT_EV_FAILED); |
460 | break; | 449 | break; |
461 | case RPORT_ST_RTV: | 450 | case RPORT_ST_RTV: |
462 | fc_rport_enter_ready(rport); | 451 | fc_rport_enter_ready(rdata); |
463 | break; | 452 | break; |
464 | case RPORT_ST_DELETE: | 453 | case RPORT_ST_DELETE: |
465 | case RPORT_ST_READY: | 454 | case RPORT_ST_READY: |
@@ -470,7 +459,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | |||
470 | 459 | ||
471 | /** | 460 | /** |
472 | * fc_rport_error_retry() - Error handler when retries are desired | 461 | * fc_rport_error_retry() - Error handler when retries are desired |
473 | * @rport: The fc_rport object | 462 | * @rdata: private remote port data |
474 | * @fp: The frame pointer | 463 | * @fp: The frame pointer |
475 | * | 464 | * |
476 | * If the error was an exchange timeout retry immediately, | 465 | * If the error was an exchange timeout retry immediately, |
@@ -479,18 +468,18 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | |||
479 | * Locking Note: The rport lock is expected to be held before | 468 | * Locking Note: The rport lock is expected to be held before |
480 | * calling this routine | 469 | * calling this routine |
481 | */ | 470 | */ |
482 | static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | 471 | static void fc_rport_error_retry(struct fc_rport_priv *rdata, |
472 | struct fc_frame *fp) | ||
483 | { | 473 | { |
484 | struct fc_rport_priv *rdata = rport->dd_data; | ||
485 | unsigned long delay = FC_DEF_E_D_TOV; | 474 | unsigned long delay = FC_DEF_E_D_TOV; |
486 | 475 | ||
487 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ | 476 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ |
488 | if (PTR_ERR(fp) == -FC_EX_CLOSED) | 477 | if (PTR_ERR(fp) == -FC_EX_CLOSED) |
489 | return fc_rport_error(rport, fp); | 478 | return fc_rport_error(rdata, fp); |
490 | 479 | ||
491 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { | 480 | if (rdata->retries < rdata->local_port->max_rport_retry_count) { |
492 | FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", | 481 | FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", |
493 | PTR_ERR(fp), fc_rport_state(rport)); | 482 | PTR_ERR(fp), fc_rport_state(rdata)); |
494 | rdata->retries++; | 483 | rdata->retries++; |
495 | /* no additional delay on exchange timeouts */ | 484 | /* no additional delay on exchange timeouts */ |
496 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) | 485 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) |
@@ -499,24 +488,24 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | |||
499 | return; | 488 | return; |
500 | } | 489 | } |
501 | 490 | ||
502 | return fc_rport_error(rport, fp); | 491 | return fc_rport_error(rdata, fp); |
503 | } | 492 | } |
504 | 493 | ||
505 | /** | 494 | /** |
506 | * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response | 495 | * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response |
507 | * @sp: current sequence in the PLOGI exchange | 496 | * @sp: current sequence in the PLOGI exchange |
508 | * @fp: response frame | 497 | * @fp: response frame |
509 | * @rp_arg: Fibre Channel remote port | 498 | * @rdata_arg: private remote port data |
510 | * | 499 | * |
511 | * Locking Note: This function will be called without the rport lock | 500 | * Locking Note: This function will be called without the rport lock |
512 | * held, but it will lock, call an _enter_* function or fc_rport_error | 501 | * held, but it will lock, call an _enter_* function or fc_rport_error |
513 | * and then unlock the rport. | 502 | * and then unlock the rport. |
514 | */ | 503 | */ |
515 | static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | 504 | static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, |
516 | void *rp_arg) | 505 | void *rdata_arg) |
517 | { | 506 | { |
518 | struct fc_rport *rport = rp_arg; | 507 | struct fc_rport_priv *rdata = rdata_arg; |
519 | struct fc_rport_priv *rdata = rport->dd_data; | 508 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
520 | struct fc_lport *lport = rdata->local_port; | 509 | struct fc_lport *lport = rdata->local_port; |
521 | struct fc_els_flogi *plp = NULL; | 510 | struct fc_els_flogi *plp = NULL; |
522 | unsigned int tov; | 511 | unsigned int tov; |
@@ -526,18 +515,18 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
526 | 515 | ||
527 | mutex_lock(&rdata->rp_mutex); | 516 | mutex_lock(&rdata->rp_mutex); |
528 | 517 | ||
529 | FC_RPORT_DBG(rport, "Received a PLOGI response\n"); | 518 | FC_RPORT_DBG(rdata, "Received a PLOGI response\n"); |
530 | 519 | ||
531 | if (rdata->rp_state != RPORT_ST_PLOGI) { | 520 | if (rdata->rp_state != RPORT_ST_PLOGI) { |
532 | FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " | 521 | FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state " |
533 | "%s\n", fc_rport_state(rport)); | 522 | "%s\n", fc_rport_state(rdata)); |
534 | if (IS_ERR(fp)) | 523 | if (IS_ERR(fp)) |
535 | goto err; | 524 | goto err; |
536 | goto out; | 525 | goto out; |
537 | } | 526 | } |
538 | 527 | ||
539 | if (IS_ERR(fp)) { | 528 | if (IS_ERR(fp)) { |
540 | fc_rport_error_retry(rport, fp); | 529 | fc_rport_error_retry(rdata, fp); |
541 | goto err; | 530 | goto err; |
542 | } | 531 | } |
543 | 532 | ||
@@ -565,11 +554,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
565 | * we skip PRLI and RTV and go straight to READY. | 554 | * we skip PRLI and RTV and go straight to READY. |
566 | */ | 555 | */ |
567 | if (rport->port_id >= FC_FID_DOM_MGR) | 556 | if (rport->port_id >= FC_FID_DOM_MGR) |
568 | fc_rport_enter_ready(rport); | 557 | fc_rport_enter_ready(rdata); |
569 | else | 558 | else |
570 | fc_rport_enter_prli(rport); | 559 | fc_rport_enter_prli(rdata); |
571 | } else | 560 | } else |
572 | fc_rport_error_retry(rport, fp); | 561 | fc_rport_error_retry(rdata, fp); |
573 | 562 | ||
574 | out: | 563 | out: |
575 | fc_frame_free(fp); | 564 | fc_frame_free(fp); |
@@ -580,33 +569,33 @@ err: | |||
580 | 569 | ||
581 | /** | 570 | /** |
582 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer | 571 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer |
583 | * @rport: Fibre Channel remote port to send PLOGI to | 572 | * @rdata: private remote port data |
584 | * | 573 | * |
585 | * Locking Note: The rport lock is expected to be held before calling | 574 | * Locking Note: The rport lock is expected to be held before calling |
586 | * this routine. | 575 | * this routine. |
587 | */ | 576 | */ |
588 | static void fc_rport_enter_plogi(struct fc_rport *rport) | 577 | static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) |
589 | { | 578 | { |
590 | struct fc_rport_priv *rdata = rport->dd_data; | ||
591 | struct fc_lport *lport = rdata->local_port; | 579 | struct fc_lport *lport = rdata->local_port; |
580 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
592 | struct fc_frame *fp; | 581 | struct fc_frame *fp; |
593 | 582 | ||
594 | FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", | 583 | FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", |
595 | fc_rport_state(rport)); | 584 | fc_rport_state(rdata)); |
596 | 585 | ||
597 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); | 586 | fc_rport_state_enter(rdata, RPORT_ST_PLOGI); |
598 | 587 | ||
599 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; | 588 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; |
600 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); | 589 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); |
601 | if (!fp) { | 590 | if (!fp) { |
602 | fc_rport_error_retry(rport, fp); | 591 | fc_rport_error_retry(rdata, fp); |
603 | return; | 592 | return; |
604 | } | 593 | } |
605 | rdata->e_d_tov = lport->e_d_tov; | 594 | rdata->e_d_tov = lport->e_d_tov; |
606 | 595 | ||
607 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, | 596 | if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PLOGI, |
608 | fc_rport_plogi_resp, rport, lport->e_d_tov)) | 597 | fc_rport_plogi_resp, rdata, lport->e_d_tov)) |
609 | fc_rport_error_retry(rport, fp); | 598 | fc_rport_error_retry(rdata, fp); |
610 | else | 599 | else |
611 | get_device(&rport->dev); | 600 | get_device(&rport->dev); |
612 | } | 601 | } |
@@ -615,17 +604,17 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) | |||
615 | * fc_rport_prli_resp() - Process Login (PRLI) response handler | 604 | * fc_rport_prli_resp() - Process Login (PRLI) response handler |
616 | * @sp: current sequence in the PRLI exchange | 605 | * @sp: current sequence in the PRLI exchange |
617 | * @fp: response frame | 606 | * @fp: response frame |
618 | * @rp_arg: Fibre Channel remote port | 607 | * @rdata_arg: private remote port data |
619 | * | 608 | * |
620 | * Locking Note: This function will be called without the rport lock | 609 | * Locking Note: This function will be called without the rport lock |
621 | * held, but it will lock, call an _enter_* function or fc_rport_error | 610 | * held, but it will lock, call an _enter_* function or fc_rport_error |
622 | * and then unlock the rport. | 611 | * and then unlock the rport. |
623 | */ | 612 | */ |
624 | static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | 613 | static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, |
625 | void *rp_arg) | 614 | void *rdata_arg) |
626 | { | 615 | { |
627 | struct fc_rport *rport = rp_arg; | 616 | struct fc_rport_priv *rdata = rdata_arg; |
628 | struct fc_rport_priv *rdata = rport->dd_data; | 617 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
629 | struct { | 618 | struct { |
630 | struct fc_els_prli prli; | 619 | struct fc_els_prli prli; |
631 | struct fc_els_spp spp; | 620 | struct fc_els_spp spp; |
@@ -636,18 +625,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
636 | 625 | ||
637 | mutex_lock(&rdata->rp_mutex); | 626 | mutex_lock(&rdata->rp_mutex); |
638 | 627 | ||
639 | FC_RPORT_DBG(rport, "Received a PRLI response\n"); | 628 | FC_RPORT_DBG(rdata, "Received a PRLI response\n"); |
640 | 629 | ||
641 | if (rdata->rp_state != RPORT_ST_PRLI) { | 630 | if (rdata->rp_state != RPORT_ST_PRLI) { |
642 | FC_RPORT_DBG(rport, "Received a PRLI response, but in state " | 631 | FC_RPORT_DBG(rdata, "Received a PRLI response, but in state " |
643 | "%s\n", fc_rport_state(rport)); | 632 | "%s\n", fc_rport_state(rdata)); |
644 | if (IS_ERR(fp)) | 633 | if (IS_ERR(fp)) |
645 | goto err; | 634 | goto err; |
646 | goto out; | 635 | goto out; |
647 | } | 636 | } |
648 | 637 | ||
649 | if (IS_ERR(fp)) { | 638 | if (IS_ERR(fp)) { |
650 | fc_rport_error_retry(rport, fp); | 639 | fc_rport_error_retry(rdata, fp); |
651 | goto err; | 640 | goto err; |
652 | } | 641 | } |
653 | 642 | ||
@@ -667,11 +656,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
667 | roles |= FC_RPORT_ROLE_FCP_TARGET; | 656 | roles |= FC_RPORT_ROLE_FCP_TARGET; |
668 | 657 | ||
669 | rport->roles = roles; | 658 | rport->roles = roles; |
670 | fc_rport_enter_rtv(rport); | 659 | fc_rport_enter_rtv(rdata); |
671 | 660 | ||
672 | } else { | 661 | } else { |
673 | FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); | 662 | FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); |
674 | fc_rport_enter_delete(rport, RPORT_EV_FAILED); | 663 | fc_rport_enter_delete(rdata, RPORT_EV_FAILED); |
675 | } | 664 | } |
676 | 665 | ||
677 | out: | 666 | out: |
@@ -685,42 +674,42 @@ err: | |||
685 | * fc_rport_logo_resp() - Logout (LOGO) response handler | 674 | * fc_rport_logo_resp() - Logout (LOGO) response handler |
686 | * @sp: current sequence in the LOGO exchange | 675 | * @sp: current sequence in the LOGO exchange |
687 | * @fp: response frame | 676 | * @fp: response frame |
688 | * @rp_arg: Fibre Channel remote port | 677 | * @rdata_arg: private remote port data |
689 | * | 678 | * |
690 | * Locking Note: This function will be called without the rport lock | 679 | * Locking Note: This function will be called without the rport lock |
691 | * held, but it will lock, call an _enter_* function or fc_rport_error | 680 | * held, but it will lock, call an _enter_* function or fc_rport_error |
692 | * and then unlock the rport. | 681 | * and then unlock the rport. |
693 | */ | 682 | */ |
694 | static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | 683 | static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, |
695 | void *rp_arg) | 684 | void *rdata_arg) |
696 | { | 685 | { |
697 | struct fc_rport *rport = rp_arg; | 686 | struct fc_rport_priv *rdata = rdata_arg; |
698 | struct fc_rport_priv *rdata = rport->dd_data; | 687 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
699 | u8 op; | 688 | u8 op; |
700 | 689 | ||
701 | mutex_lock(&rdata->rp_mutex); | 690 | mutex_lock(&rdata->rp_mutex); |
702 | 691 | ||
703 | FC_RPORT_DBG(rport, "Received a LOGO response\n"); | 692 | FC_RPORT_DBG(rdata, "Received a LOGO response\n"); |
704 | 693 | ||
705 | if (rdata->rp_state != RPORT_ST_LOGO) { | 694 | if (rdata->rp_state != RPORT_ST_LOGO) { |
706 | FC_RPORT_DBG(rport, "Received a LOGO response, but in state " | 695 | FC_RPORT_DBG(rdata, "Received a LOGO response, but in state " |
707 | "%s\n", fc_rport_state(rport)); | 696 | "%s\n", fc_rport_state(rdata)); |
708 | if (IS_ERR(fp)) | 697 | if (IS_ERR(fp)) |
709 | goto err; | 698 | goto err; |
710 | goto out; | 699 | goto out; |
711 | } | 700 | } |
712 | 701 | ||
713 | if (IS_ERR(fp)) { | 702 | if (IS_ERR(fp)) { |
714 | fc_rport_error_retry(rport, fp); | 703 | fc_rport_error_retry(rdata, fp); |
715 | goto err; | 704 | goto err; |
716 | } | 705 | } |
717 | 706 | ||
718 | op = fc_frame_payload_op(fp); | 707 | op = fc_frame_payload_op(fp); |
719 | if (op == ELS_LS_ACC) { | 708 | if (op == ELS_LS_ACC) { |
720 | fc_rport_enter_rtv(rport); | 709 | fc_rport_enter_rtv(rdata); |
721 | } else { | 710 | } else { |
722 | FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); | 711 | FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n"); |
723 | fc_rport_enter_delete(rport, RPORT_EV_LOGO); | 712 | fc_rport_enter_delete(rdata, RPORT_EV_LOGO); |
724 | } | 713 | } |
725 | 714 | ||
726 | out: | 715 | out: |
@@ -732,14 +721,14 @@ err: | |||
732 | 721 | ||
733 | /** | 722 | /** |
734 | * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer | 723 | * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer |
735 | * @rport: Fibre Channel remote port to send PRLI to | 724 | * @rdata: private remote port data |
736 | * | 725 | * |
737 | * Locking Note: The rport lock is expected to be held before calling | 726 | * Locking Note: The rport lock is expected to be held before calling |
738 | * this routine. | 727 | * this routine. |
739 | */ | 728 | */ |
740 | static void fc_rport_enter_prli(struct fc_rport *rport) | 729 | static void fc_rport_enter_prli(struct fc_rport_priv *rdata) |
741 | { | 730 | { |
742 | struct fc_rport_priv *rdata = rport->dd_data; | 731 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
743 | struct fc_lport *lport = rdata->local_port; | 732 | struct fc_lport *lport = rdata->local_port; |
744 | struct { | 733 | struct { |
745 | struct fc_els_prli prli; | 734 | struct fc_els_prli prli; |
@@ -747,20 +736,20 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
747 | } *pp; | 736 | } *pp; |
748 | struct fc_frame *fp; | 737 | struct fc_frame *fp; |
749 | 738 | ||
750 | FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", | 739 | FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n", |
751 | fc_rport_state(rport)); | 740 | fc_rport_state(rdata)); |
752 | 741 | ||
753 | fc_rport_state_enter(rport, RPORT_ST_PRLI); | 742 | fc_rport_state_enter(rdata, RPORT_ST_PRLI); |
754 | 743 | ||
755 | fp = fc_frame_alloc(lport, sizeof(*pp)); | 744 | fp = fc_frame_alloc(lport, sizeof(*pp)); |
756 | if (!fp) { | 745 | if (!fp) { |
757 | fc_rport_error_retry(rport, fp); | 746 | fc_rport_error_retry(rdata, fp); |
758 | return; | 747 | return; |
759 | } | 748 | } |
760 | 749 | ||
761 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, | 750 | if (!lport->tt.elsct_send(lport, rdata, fp, ELS_PRLI, |
762 | fc_rport_prli_resp, rport, lport->e_d_tov)) | 751 | fc_rport_prli_resp, rdata, lport->e_d_tov)) |
763 | fc_rport_error_retry(rport, fp); | 752 | fc_rport_error_retry(rdata, fp); |
764 | else | 753 | else |
765 | get_device(&rport->dev); | 754 | get_device(&rport->dev); |
766 | } | 755 | } |
@@ -769,7 +758,7 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
769 | * fc_rport_els_rtv_resp() - Request Timeout Value response handler | 758 | * fc_rport_els_rtv_resp() - Request Timeout Value response handler |
770 | * @sp: current sequence in the RTV exchange | 759 | * @sp: current sequence in the RTV exchange |
771 | * @fp: response frame | 760 | * @fp: response frame |
772 | * @rp_arg: Fibre Channel remote port | 761 | * @rdata_arg: private remote port data |
773 | * | 762 | * |
774 | * Many targets don't seem to support this. | 763 | * Many targets don't seem to support this. |
775 | * | 764 | * |
@@ -778,26 +767,26 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
778 | * and then unlock the rport. | 767 | * and then unlock the rport. |
779 | */ | 768 | */ |
780 | static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | 769 | static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, |
781 | void *rp_arg) | 770 | void *rdata_arg) |
782 | { | 771 | { |
783 | struct fc_rport *rport = rp_arg; | 772 | struct fc_rport_priv *rdata = rdata_arg; |
784 | struct fc_rport_priv *rdata = rport->dd_data; | 773 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
785 | u8 op; | 774 | u8 op; |
786 | 775 | ||
787 | mutex_lock(&rdata->rp_mutex); | 776 | mutex_lock(&rdata->rp_mutex); |
788 | 777 | ||
789 | FC_RPORT_DBG(rport, "Received a RTV response\n"); | 778 | FC_RPORT_DBG(rdata, "Received a RTV response\n"); |
790 | 779 | ||
791 | if (rdata->rp_state != RPORT_ST_RTV) { | 780 | if (rdata->rp_state != RPORT_ST_RTV) { |
792 | FC_RPORT_DBG(rport, "Received a RTV response, but in state " | 781 | FC_RPORT_DBG(rdata, "Received a RTV response, but in state " |
793 | "%s\n", fc_rport_state(rport)); | 782 | "%s\n", fc_rport_state(rdata)); |
794 | if (IS_ERR(fp)) | 783 | if (IS_ERR(fp)) |
795 | goto err; | 784 | goto err; |
796 | goto out; | 785 | goto out; |
797 | } | 786 | } |
798 | 787 | ||
799 | if (IS_ERR(fp)) { | 788 | if (IS_ERR(fp)) { |
800 | fc_rport_error(rport, fp); | 789 | fc_rport_error(rdata, fp); |
801 | goto err; | 790 | goto err; |
802 | } | 791 | } |
803 | 792 | ||
@@ -823,7 +812,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
823 | } | 812 | } |
824 | } | 813 | } |
825 | 814 | ||
826 | fc_rport_enter_ready(rport); | 815 | fc_rport_enter_ready(rdata); |
827 | 816 | ||
828 | out: | 817 | out: |
829 | fc_frame_free(fp); | 818 | fc_frame_free(fp); |
@@ -834,62 +823,62 @@ err: | |||
834 | 823 | ||
835 | /** | 824 | /** |
836 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer | 825 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer |
837 | * @rport: Fibre Channel remote port to send RTV to | 826 | * @rdata: private remote port data |
838 | * | 827 | * |
839 | * Locking Note: The rport lock is expected to be held before calling | 828 | * Locking Note: The rport lock is expected to be held before calling |
840 | * this routine. | 829 | * this routine. |
841 | */ | 830 | */ |
842 | static void fc_rport_enter_rtv(struct fc_rport *rport) | 831 | static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) |
843 | { | 832 | { |
844 | struct fc_frame *fp; | 833 | struct fc_frame *fp; |
845 | struct fc_rport_priv *rdata = rport->dd_data; | ||
846 | struct fc_lport *lport = rdata->local_port; | 834 | struct fc_lport *lport = rdata->local_port; |
835 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
847 | 836 | ||
848 | FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", | 837 | FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", |
849 | fc_rport_state(rport)); | 838 | fc_rport_state(rdata)); |
850 | 839 | ||
851 | fc_rport_state_enter(rport, RPORT_ST_RTV); | 840 | fc_rport_state_enter(rdata, RPORT_ST_RTV); |
852 | 841 | ||
853 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); | 842 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); |
854 | if (!fp) { | 843 | if (!fp) { |
855 | fc_rport_error_retry(rport, fp); | 844 | fc_rport_error_retry(rdata, fp); |
856 | return; | 845 | return; |
857 | } | 846 | } |
858 | 847 | ||
859 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, | 848 | if (!lport->tt.elsct_send(lport, rdata, fp, ELS_RTV, |
860 | fc_rport_rtv_resp, rport, lport->e_d_tov)) | 849 | fc_rport_rtv_resp, rdata, lport->e_d_tov)) |
861 | fc_rport_error_retry(rport, fp); | 850 | fc_rport_error_retry(rdata, fp); |
862 | else | 851 | else |
863 | get_device(&rport->dev); | 852 | get_device(&rport->dev); |
864 | } | 853 | } |
865 | 854 | ||
866 | /** | 855 | /** |
867 | * fc_rport_enter_logo() - Send Logout (LOGO) request to peer | 856 | * fc_rport_enter_logo() - Send Logout (LOGO) request to peer |
868 | * @rport: Fibre Channel remote port to send LOGO to | 857 | * @rdata: private remote port data |
869 | * | 858 | * |
870 | * Locking Note: The rport lock is expected to be held before calling | 859 | * Locking Note: The rport lock is expected to be held before calling |
871 | * this routine. | 860 | * this routine. |
872 | */ | 861 | */ |
873 | static void fc_rport_enter_logo(struct fc_rport *rport) | 862 | static void fc_rport_enter_logo(struct fc_rport_priv *rdata) |
874 | { | 863 | { |
875 | struct fc_rport_priv *rdata = rport->dd_data; | ||
876 | struct fc_lport *lport = rdata->local_port; | 864 | struct fc_lport *lport = rdata->local_port; |
865 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | ||
877 | struct fc_frame *fp; | 866 | struct fc_frame *fp; |
878 | 867 | ||
879 | FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", | 868 | FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n", |
880 | fc_rport_state(rport)); | 869 | fc_rport_state(rdata)); |
881 | 870 | ||
882 | fc_rport_state_enter(rport, RPORT_ST_LOGO); | 871 | fc_rport_state_enter(rdata, RPORT_ST_LOGO); |
883 | 872 | ||
884 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); | 873 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); |
885 | if (!fp) { | 874 | if (!fp) { |
886 | fc_rport_error_retry(rport, fp); | 875 | fc_rport_error_retry(rdata, fp); |
887 | return; | 876 | return; |
888 | } | 877 | } |
889 | 878 | ||
890 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, | 879 | if (!lport->tt.elsct_send(lport, rdata, fp, ELS_LOGO, |
891 | fc_rport_logo_resp, rport, lport->e_d_tov)) | 880 | fc_rport_logo_resp, rdata, lport->e_d_tov)) |
892 | fc_rport_error_retry(rport, fp); | 881 | fc_rport_error_retry(rdata, fp); |
893 | else | 882 | else |
894 | get_device(&rport->dev); | 883 | get_device(&rport->dev); |
895 | } | 884 | } |
@@ -899,16 +888,15 @@ static void fc_rport_enter_logo(struct fc_rport *rport) | |||
899 | * fc_rport_recv_req() - Receive a request from a rport | 888 | * fc_rport_recv_req() - Receive a request from a rport |
900 | * @sp: current sequence in the PLOGI exchange | 889 | * @sp: current sequence in the PLOGI exchange |
901 | * @fp: response frame | 890 | * @fp: response frame |
902 | * @rp_arg: Fibre Channel remote port | 891 | * @rdata_arg: private remote port data |
903 | * | 892 | * |
904 | * Locking Note: Called without the rport lock held. This | 893 | * Locking Note: Called without the rport lock held. This |
905 | * function will hold the rport lock, call an _enter_* | 894 | * function will hold the rport lock, call an _enter_* |
906 | * function and then unlock the rport. | 895 | * function and then unlock the rport. |
907 | */ | 896 | */ |
908 | void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | 897 | void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, |
909 | struct fc_rport *rport) | 898 | struct fc_rport_priv *rdata) |
910 | { | 899 | { |
911 | struct fc_rport_priv *rdata = rport->dd_data; | ||
912 | struct fc_lport *lport = rdata->local_port; | 900 | struct fc_lport *lport = rdata->local_port; |
913 | 901 | ||
914 | struct fc_frame_header *fh; | 902 | struct fc_frame_header *fh; |
@@ -927,16 +915,16 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
927 | op = fc_frame_payload_op(fp); | 915 | op = fc_frame_payload_op(fp); |
928 | switch (op) { | 916 | switch (op) { |
929 | case ELS_PLOGI: | 917 | case ELS_PLOGI: |
930 | fc_rport_recv_plogi_req(rport, sp, fp); | 918 | fc_rport_recv_plogi_req(rdata, sp, fp); |
931 | break; | 919 | break; |
932 | case ELS_PRLI: | 920 | case ELS_PRLI: |
933 | fc_rport_recv_prli_req(rport, sp, fp); | 921 | fc_rport_recv_prli_req(rdata, sp, fp); |
934 | break; | 922 | break; |
935 | case ELS_PRLO: | 923 | case ELS_PRLO: |
936 | fc_rport_recv_prlo_req(rport, sp, fp); | 924 | fc_rport_recv_prlo_req(rdata, sp, fp); |
937 | break; | 925 | break; |
938 | case ELS_LOGO: | 926 | case ELS_LOGO: |
939 | fc_rport_recv_logo_req(rport, sp, fp); | 927 | fc_rport_recv_logo_req(rdata, sp, fp); |
940 | break; | 928 | break; |
941 | case ELS_RRQ: | 929 | case ELS_RRQ: |
942 | els_data.fp = fp; | 930 | els_data.fp = fp; |
@@ -958,17 +946,17 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
958 | 946 | ||
959 | /** | 947 | /** |
960 | * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request | 948 | * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request |
961 | * @rport: Fibre Channel remote port that initiated PLOGI | 949 | * @rdata: private remote port data |
962 | * @sp: current sequence in the PLOGI exchange | 950 | * @sp: current sequence in the PLOGI exchange |
963 | * @fp: PLOGI request frame | 951 | * @fp: PLOGI request frame |
964 | * | 952 | * |
965 | * Locking Note: The rport lock is exected to be held before calling | 953 | * Locking Note: The rport lock is exected to be held before calling |
966 | * this function. | 954 | * this function. |
967 | */ | 955 | */ |
968 | static void fc_rport_recv_plogi_req(struct fc_rport *rport, | 956 | static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata, |
969 | struct fc_seq *sp, struct fc_frame *rx_fp) | 957 | struct fc_seq *sp, struct fc_frame *rx_fp) |
970 | { | 958 | { |
971 | struct fc_rport_priv *rdata = rport->dd_data; | 959 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
972 | struct fc_lport *lport = rdata->local_port; | 960 | struct fc_lport *lport = rdata->local_port; |
973 | struct fc_frame *fp = rx_fp; | 961 | struct fc_frame *fp = rx_fp; |
974 | struct fc_exch *ep; | 962 | struct fc_exch *ep; |
@@ -984,13 +972,13 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
984 | 972 | ||
985 | fh = fc_frame_header_get(fp); | 973 | fh = fc_frame_header_get(fp); |
986 | 974 | ||
987 | FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", | 975 | FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n", |
988 | fc_rport_state(rport)); | 976 | fc_rport_state(rdata)); |
989 | 977 | ||
990 | sid = ntoh24(fh->fh_s_id); | 978 | sid = ntoh24(fh->fh_s_id); |
991 | pl = fc_frame_payload_get(fp, sizeof(*pl)); | 979 | pl = fc_frame_payload_get(fp, sizeof(*pl)); |
992 | if (!pl) { | 980 | if (!pl) { |
993 | FC_RPORT_DBG(rport, "Received PLOGI too short\n"); | 981 | FC_RPORT_DBG(rdata, "Received PLOGI too short\n"); |
994 | WARN_ON(1); | 982 | WARN_ON(1); |
995 | /* XXX TBD: send reject? */ | 983 | /* XXX TBD: send reject? */ |
996 | fc_frame_free(fp); | 984 | fc_frame_free(fp); |
@@ -1012,25 +1000,25 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
1012 | */ | 1000 | */ |
1013 | switch (rdata->rp_state) { | 1001 | switch (rdata->rp_state) { |
1014 | case RPORT_ST_INIT: | 1002 | case RPORT_ST_INIT: |
1015 | FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " | 1003 | FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT " |
1016 | "- reject\n", (unsigned long long)wwpn); | 1004 | "- reject\n", (unsigned long long)wwpn); |
1017 | reject = ELS_RJT_UNSUP; | 1005 | reject = ELS_RJT_UNSUP; |
1018 | break; | 1006 | break; |
1019 | case RPORT_ST_PLOGI: | 1007 | case RPORT_ST_PLOGI: |
1020 | FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", | 1008 | FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n", |
1021 | rdata->rp_state); | 1009 | rdata->rp_state); |
1022 | if (wwpn < lport->wwpn) | 1010 | if (wwpn < lport->wwpn) |
1023 | reject = ELS_RJT_INPROG; | 1011 | reject = ELS_RJT_INPROG; |
1024 | break; | 1012 | break; |
1025 | case RPORT_ST_PRLI: | 1013 | case RPORT_ST_PRLI: |
1026 | case RPORT_ST_READY: | 1014 | case RPORT_ST_READY: |
1027 | FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " | 1015 | FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " |
1028 | "- ignored for now\n", rdata->rp_state); | 1016 | "- ignored for now\n", rdata->rp_state); |
1029 | /* XXX TBD - should reset */ | 1017 | /* XXX TBD - should reset */ |
1030 | break; | 1018 | break; |
1031 | case RPORT_ST_DELETE: | 1019 | case RPORT_ST_DELETE: |
1032 | default: | 1020 | default: |
1033 | FC_RPORT_DBG(rport, "Received PLOGI in unexpected " | 1021 | FC_RPORT_DBG(rdata, "Received PLOGI in unexpected " |
1034 | "state %d\n", rdata->rp_state); | 1022 | "state %d\n", rdata->rp_state); |
1035 | fc_frame_free(fp); | 1023 | fc_frame_free(fp); |
1036 | return; | 1024 | return; |
@@ -1074,24 +1062,24 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
1074 | FC_TYPE_ELS, f_ctl, 0); | 1062 | FC_TYPE_ELS, f_ctl, 0); |
1075 | lport->tt.seq_send(lport, sp, fp); | 1063 | lport->tt.seq_send(lport, sp, fp); |
1076 | if (rdata->rp_state == RPORT_ST_PLOGI) | 1064 | if (rdata->rp_state == RPORT_ST_PLOGI) |
1077 | fc_rport_enter_prli(rport); | 1065 | fc_rport_enter_prli(rdata); |
1078 | } | 1066 | } |
1079 | } | 1067 | } |
1080 | } | 1068 | } |
1081 | 1069 | ||
1082 | /** | 1070 | /** |
1083 | * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request | 1071 | * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request |
1084 | * @rport: Fibre Channel remote port that initiated PRLI | 1072 | * @rdata: private remote port data |
1085 | * @sp: current sequence in the PRLI exchange | 1073 | * @sp: current sequence in the PRLI exchange |
1086 | * @fp: PRLI request frame | 1074 | * @fp: PRLI request frame |
1087 | * | 1075 | * |
1088 | * Locking Note: The rport lock is exected to be held before calling | 1076 | * Locking Note: The rport lock is exected to be held before calling |
1089 | * this function. | 1077 | * this function. |
1090 | */ | 1078 | */ |
1091 | static void fc_rport_recv_prli_req(struct fc_rport *rport, | 1079 | static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, |
1092 | struct fc_seq *sp, struct fc_frame *rx_fp) | 1080 | struct fc_seq *sp, struct fc_frame *rx_fp) |
1093 | { | 1081 | { |
1094 | struct fc_rport_priv *rdata = rport->dd_data; | 1082 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); |
1095 | struct fc_lport *lport = rdata->local_port; | 1083 | struct fc_lport *lport = rdata->local_port; |
1096 | struct fc_exch *ep; | 1084 | struct fc_exch *ep; |
1097 | struct fc_frame *fp; | 1085 | struct fc_frame *fp; |
@@ -1115,8 +1103,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1115 | 1103 | ||
1116 | fh = fc_frame_header_get(rx_fp); | 1104 | fh = fc_frame_header_get(rx_fp); |
1117 | 1105 | ||
1118 | FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", | 1106 | FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", |
1119 | fc_rport_state(rport)); | 1107 | fc_rport_state(rdata)); |
1120 | 1108 | ||
1121 | switch (rdata->rp_state) { | 1109 | switch (rdata->rp_state) { |
1122 | case RPORT_ST_PRLI: | 1110 | case RPORT_ST_PRLI: |
@@ -1220,7 +1208,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1220 | */ | 1208 | */ |
1221 | switch (rdata->rp_state) { | 1209 | switch (rdata->rp_state) { |
1222 | case RPORT_ST_PRLI: | 1210 | case RPORT_ST_PRLI: |
1223 | fc_rport_enter_ready(rport); | 1211 | fc_rport_enter_ready(rdata); |
1224 | break; | 1212 | break; |
1225 | case RPORT_ST_READY: | 1213 | case RPORT_ST_READY: |
1226 | break; | 1214 | break; |
@@ -1233,17 +1221,17 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1233 | 1221 | ||
1234 | /** | 1222 | /** |
1235 | * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request | 1223 | * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request |
1236 | * @rport: Fibre Channel remote port that initiated PRLO | 1224 | * @rdata: private remote port data |
1237 | * @sp: current sequence in the PRLO exchange | 1225 | * @sp: current sequence in the PRLO exchange |
1238 | * @fp: PRLO request frame | 1226 | * @fp: PRLO request frame |
1239 | * | 1227 | * |
1240 | * Locking Note: The rport lock is exected to be held before calling | 1228 | * Locking Note: The rport lock is exected to be held before calling |
1241 | * this function. | 1229 | * this function. |
1242 | */ | 1230 | */ |
1243 | static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | 1231 | static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, |
1232 | struct fc_seq *sp, | ||
1244 | struct fc_frame *fp) | 1233 | struct fc_frame *fp) |
1245 | { | 1234 | { |
1246 | struct fc_rport_priv *rdata = rport->dd_data; | ||
1247 | struct fc_lport *lport = rdata->local_port; | 1235 | struct fc_lport *lport = rdata->local_port; |
1248 | 1236 | ||
1249 | struct fc_frame_header *fh; | 1237 | struct fc_frame_header *fh; |
@@ -1251,8 +1239,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1251 | 1239 | ||
1252 | fh = fc_frame_header_get(fp); | 1240 | fh = fc_frame_header_get(fp); |
1253 | 1241 | ||
1254 | FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", | 1242 | FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", |
1255 | fc_rport_state(rport)); | 1243 | fc_rport_state(rdata)); |
1256 | 1244 | ||
1257 | if (rdata->rp_state == RPORT_ST_DELETE) { | 1245 | if (rdata->rp_state == RPORT_ST_DELETE) { |
1258 | fc_frame_free(fp); | 1246 | fc_frame_free(fp); |
@@ -1268,24 +1256,24 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1268 | 1256 | ||
1269 | /** | 1257 | /** |
1270 | * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request | 1258 | * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request |
1271 | * @rport: Fibre Channel remote port that initiated LOGO | 1259 | * @rdata: private remote port data |
1272 | * @sp: current sequence in the LOGO exchange | 1260 | * @sp: current sequence in the LOGO exchange |
1273 | * @fp: LOGO request frame | 1261 | * @fp: LOGO request frame |
1274 | * | 1262 | * |
1275 | * Locking Note: The rport lock is exected to be held before calling | 1263 | * Locking Note: The rport lock is exected to be held before calling |
1276 | * this function. | 1264 | * this function. |
1277 | */ | 1265 | */ |
1278 | static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, | 1266 | static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata, |
1267 | struct fc_seq *sp, | ||
1279 | struct fc_frame *fp) | 1268 | struct fc_frame *fp) |
1280 | { | 1269 | { |
1281 | struct fc_frame_header *fh; | 1270 | struct fc_frame_header *fh; |
1282 | struct fc_rport_priv *rdata = rport->dd_data; | ||
1283 | struct fc_lport *lport = rdata->local_port; | 1271 | struct fc_lport *lport = rdata->local_port; |
1284 | 1272 | ||
1285 | fh = fc_frame_header_get(fp); | 1273 | fh = fc_frame_header_get(fp); |
1286 | 1274 | ||
1287 | FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", | 1275 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", |
1288 | fc_rport_state(rport)); | 1276 | fc_rport_state(rdata)); |
1289 | 1277 | ||
1290 | if (rdata->rp_state == RPORT_ST_DELETE) { | 1278 | if (rdata->rp_state == RPORT_ST_DELETE) { |
1291 | fc_frame_free(fp); | 1279 | fc_frame_free(fp); |
@@ -1293,7 +1281,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1293 | } | 1281 | } |
1294 | 1282 | ||
1295 | rdata->event = RPORT_EV_LOGO; | 1283 | rdata->event = RPORT_EV_LOGO; |
1296 | fc_rport_state_enter(rport, RPORT_ST_DELETE); | 1284 | fc_rport_state_enter(rdata, RPORT_ST_DELETE); |
1297 | queue_work(rport_event_queue, &rdata->event_work); | 1285 | queue_work(rport_event_queue, &rdata->event_work); |
1298 | 1286 | ||
1299 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | 1287 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); |