diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 2262 |
1 files changed, 1401 insertions, 861 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 61caa8d379e..f2f4639eab5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include "lpfc.h" | 36 | #include "lpfc.h" |
37 | #include "lpfc_logmsg.h" | 37 | #include "lpfc_logmsg.h" |
38 | #include "lpfc_crtn.h" | 38 | #include "lpfc_crtn.h" |
39 | #include "lpfc_vport.h" | ||
40 | #include "lpfc_debugfs.h" | ||
39 | 41 | ||
40 | /* AlpaArray for assignment of scsid for scan-down and bind_method */ | 42 | /* AlpaArray for assignment of scsid for scan-down and bind_method */ |
41 | static uint8_t lpfcAlpaArray[] = { | 43 | static uint8_t lpfcAlpaArray[] = { |
@@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = { | |||
54 | 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 | 56 | 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 |
55 | }; | 57 | }; |
56 | 58 | ||
57 | static void lpfc_disc_timeout_handler(struct lpfc_hba *); | 59 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
58 | 60 | ||
59 | void | 61 | void |
60 | lpfc_terminate_rport_io(struct fc_rport *rport) | 62 | lpfc_terminate_rport_io(struct fc_rport *rport) |
@@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport) | |||
74 | return; | 76 | return; |
75 | } | 77 | } |
76 | 78 | ||
77 | phba = ndlp->nlp_phba; | 79 | phba = ndlp->vport->phba; |
80 | |||
81 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, | ||
82 | "rport terminate: sid:x%x did:x%x flg:x%x", | ||
83 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); | ||
78 | 84 | ||
79 | spin_lock_irq(phba->host->host_lock); | ||
80 | if (ndlp->nlp_sid != NLP_NO_SID) { | 85 | if (ndlp->nlp_sid != NLP_NO_SID) { |
81 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], | 86 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], |
82 | ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); | 87 | ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); |
83 | } | 88 | } |
84 | spin_unlock_irq(phba->host->host_lock); | ||
85 | 89 | ||
86 | return; | 90 | return; |
87 | } | 91 | } |
@@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
94 | { | 98 | { |
95 | struct lpfc_rport_data *rdata; | 99 | struct lpfc_rport_data *rdata; |
96 | struct lpfc_nodelist * ndlp; | 100 | struct lpfc_nodelist * ndlp; |
97 | uint8_t *name; | 101 | struct lpfc_vport *vport; |
98 | int warn_on = 0; | 102 | struct lpfc_hba *phba; |
99 | struct lpfc_hba *phba; | 103 | struct completion devloss_compl; |
104 | struct lpfc_work_evt *evtp; | ||
100 | 105 | ||
101 | rdata = rport->dd_data; | 106 | rdata = rport->dd_data; |
102 | ndlp = rdata->pnode; | 107 | ndlp = rdata->pnode; |
103 | 108 | ||
104 | if (!ndlp) { | 109 | if (!ndlp) { |
105 | if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) | 110 | if (rport->scsi_target_id != -1) { |
106 | printk(KERN_ERR "Cannot find remote node" | 111 | printk(KERN_ERR "Cannot find remote node" |
107 | " for rport in dev_loss_tmo_callbk x%x\n", | 112 | " for rport in dev_loss_tmo_callbk x%x\n", |
108 | rport->port_id); | 113 | rport->port_id); |
114 | } | ||
109 | return; | 115 | return; |
110 | } | 116 | } |
111 | 117 | ||
112 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) | 118 | vport = ndlp->vport; |
119 | phba = vport->phba; | ||
120 | |||
121 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | ||
122 | "rport devlosscb: sid:x%x did:x%x flg:x%x", | ||
123 | ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); | ||
124 | |||
125 | init_completion(&devloss_compl); | ||
126 | evtp = &ndlp->dev_loss_evt; | ||
127 | |||
128 | if (!list_empty(&evtp->evt_listp)) | ||
129 | return; | ||
130 | |||
131 | spin_lock_irq(&phba->hbalock); | ||
132 | evtp->evt_arg1 = ndlp; | ||
133 | evtp->evt_arg2 = &devloss_compl; | ||
134 | evtp->evt = LPFC_EVT_DEV_LOSS; | ||
135 | list_add_tail(&evtp->evt_listp, &phba->work_list); | ||
136 | if (phba->work_wait) | ||
137 | wake_up(phba->work_wait); | ||
138 | |||
139 | spin_unlock_irq(&phba->hbalock); | ||
140 | |||
141 | wait_for_completion(&devloss_compl); | ||
142 | |||
143 | return; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * This function is called from the worker thread when dev_loss_tmo | ||
148 | * expire. | ||
149 | */ | ||
150 | void | ||
151 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | ||
152 | { | ||
153 | struct lpfc_rport_data *rdata; | ||
154 | struct fc_rport *rport; | ||
155 | struct lpfc_vport *vport; | ||
156 | struct lpfc_hba *phba; | ||
157 | uint8_t *name; | ||
158 | int warn_on = 0; | ||
159 | |||
160 | rport = ndlp->rport; | ||
161 | |||
162 | if (!rport) | ||
163 | return; | ||
164 | |||
165 | rdata = rport->dd_data; | ||
166 | name = (uint8_t *) &ndlp->nlp_portname; | ||
167 | vport = ndlp->vport; | ||
168 | phba = vport->phba; | ||
169 | |||
170 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | ||
171 | "rport devlosstmo:did:x%x type:x%x id:x%x", | ||
172 | ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); | ||
173 | |||
174 | if (!(vport->load_flag & FC_UNLOADING) && | ||
175 | ndlp->nlp_state == NLP_STE_MAPPED_NODE) | ||
113 | return; | 176 | return; |
114 | 177 | ||
115 | name = (uint8_t *)&ndlp->nlp_portname; | 178 | if (ndlp->nlp_type & NLP_FABRIC) { |
116 | phba = ndlp->nlp_phba; | 179 | int put_node; |
180 | int put_rport; | ||
117 | 181 | ||
118 | spin_lock_irq(phba->host->host_lock); | 182 | /* We will clean up these Nodes in linkup */ |
183 | put_node = rdata->pnode != NULL; | ||
184 | put_rport = ndlp->rport != NULL; | ||
185 | rdata->pnode = NULL; | ||
186 | ndlp->rport = NULL; | ||
187 | if (put_node) | ||
188 | lpfc_nlp_put(ndlp); | ||
189 | if (put_rport) | ||
190 | put_device(&rport->dev); | ||
191 | return; | ||
192 | } | ||
119 | 193 | ||
120 | if (ndlp->nlp_sid != NLP_NO_SID) { | 194 | if (ndlp->nlp_sid != NLP_NO_SID) { |
121 | warn_on = 1; | 195 | warn_on = 1; |
122 | /* flush the target */ | 196 | /* flush the target */ |
123 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], | 197 | lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], |
124 | ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); | 198 | ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); |
125 | } | 199 | } |
126 | if (phba->fc_flag & FC_UNLOADING) | 200 | if (vport->load_flag & FC_UNLOADING) |
127 | warn_on = 0; | 201 | warn_on = 0; |
128 | 202 | ||
129 | spin_unlock_irq(phba->host->host_lock); | ||
130 | |||
131 | if (warn_on) { | 203 | if (warn_on) { |
132 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 204 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
133 | "%d:0203 Devloss timeout on " | 205 | "%d (%d):0203 Devloss timeout on " |
134 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " | 206 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " |
135 | "NPort x%x Data: x%x x%x x%x\n", | 207 | "NPort x%x Data: x%x x%x x%x\n", |
136 | phba->brd_no, | 208 | phba->brd_no, vport->vpi, |
137 | *name, *(name+1), *(name+2), *(name+3), | 209 | *name, *(name+1), *(name+2), *(name+3), |
138 | *(name+4), *(name+5), *(name+6), *(name+7), | 210 | *(name+4), *(name+5), *(name+6), *(name+7), |
139 | ndlp->nlp_DID, ndlp->nlp_flag, | 211 | ndlp->nlp_DID, ndlp->nlp_flag, |
140 | ndlp->nlp_state, ndlp->nlp_rpi); | 212 | ndlp->nlp_state, ndlp->nlp_rpi); |
141 | } else { | 213 | } else { |
142 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 214 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
143 | "%d:0204 Devloss timeout on " | 215 | "%d (%d):0204 Devloss timeout on " |
144 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " | 216 | "WWPN %x:%x:%x:%x:%x:%x:%x:%x " |
145 | "NPort x%x Data: x%x x%x x%x\n", | 217 | "NPort x%x Data: x%x x%x x%x\n", |
146 | phba->brd_no, | 218 | phba->brd_no, vport->vpi, |
147 | *name, *(name+1), *(name+2), *(name+3), | 219 | *name, *(name+1), *(name+2), *(name+3), |
148 | *(name+4), *(name+5), *(name+6), *(name+7), | 220 | *(name+4), *(name+5), *(name+6), *(name+7), |
149 | ndlp->nlp_DID, ndlp->nlp_flag, | 221 | ndlp->nlp_DID, ndlp->nlp_flag, |
150 | ndlp->nlp_state, ndlp->nlp_rpi); | 222 | ndlp->nlp_state, ndlp->nlp_rpi); |
151 | } | 223 | } |
152 | 224 | ||
153 | if (!(phba->fc_flag & FC_UNLOADING) && | 225 | if (!(vport->load_flag & FC_UNLOADING) && |
154 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && | 226 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && |
155 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && | 227 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && |
156 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) | 228 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) |
157 | lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); | 229 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
158 | else { | 230 | else { |
231 | int put_node; | ||
232 | int put_rport; | ||
233 | |||
234 | put_node = rdata->pnode != NULL; | ||
235 | put_rport = ndlp->rport != NULL; | ||
159 | rdata->pnode = NULL; | 236 | rdata->pnode = NULL; |
160 | ndlp->rport = NULL; | 237 | ndlp->rport = NULL; |
161 | lpfc_nlp_put(ndlp); | 238 | if (put_node) |
162 | put_device(&rport->dev); | 239 | lpfc_nlp_put(ndlp); |
240 | if (put_rport) | ||
241 | put_device(&rport->dev); | ||
163 | } | 242 | } |
243 | } | ||
244 | |||
164 | 245 | ||
246 | void | ||
247 | lpfc_worker_wake_up(struct lpfc_hba *phba) | ||
248 | { | ||
249 | wake_up(phba->work_wait); | ||
165 | return; | 250 | return; |
166 | } | 251 | } |
167 | 252 | ||
168 | static void | 253 | static void |
169 | lpfc_work_list_done(struct lpfc_hba * phba) | 254 | lpfc_work_list_done(struct lpfc_hba *phba) |
170 | { | 255 | { |
171 | struct lpfc_work_evt *evtp = NULL; | 256 | struct lpfc_work_evt *evtp = NULL; |
172 | struct lpfc_nodelist *ndlp; | 257 | struct lpfc_nodelist *ndlp; |
258 | struct lpfc_vport *vport; | ||
173 | int free_evt; | 259 | int free_evt; |
174 | 260 | ||
175 | spin_lock_irq(phba->host->host_lock); | 261 | spin_lock_irq(&phba->hbalock); |
176 | while(!list_empty(&phba->work_list)) { | 262 | while (!list_empty(&phba->work_list)) { |
177 | list_remove_head((&phba->work_list), evtp, typeof(*evtp), | 263 | list_remove_head((&phba->work_list), evtp, typeof(*evtp), |
178 | evt_listp); | 264 | evt_listp); |
179 | spin_unlock_irq(phba->host->host_lock); | 265 | spin_unlock_irq(&phba->hbalock); |
180 | free_evt = 1; | 266 | free_evt = 1; |
181 | switch (evtp->evt) { | 267 | switch (evtp->evt) { |
268 | case LPFC_EVT_DEV_LOSS_DELAY: | ||
269 | free_evt = 0; /* evt is part of ndlp */ | ||
270 | ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); | ||
271 | vport = ndlp->vport; | ||
272 | if (!vport) | ||
273 | break; | ||
274 | |||
275 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | ||
276 | "rport devlossdly:did:x%x flg:x%x", | ||
277 | ndlp->nlp_DID, ndlp->nlp_flag, 0); | ||
278 | |||
279 | if (!(vport->load_flag & FC_UNLOADING) && | ||
280 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && | ||
281 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { | ||
282 | lpfc_disc_state_machine(vport, ndlp, NULL, | ||
283 | NLP_EVT_DEVICE_RM); | ||
284 | } | ||
285 | break; | ||
182 | case LPFC_EVT_ELS_RETRY: | 286 | case LPFC_EVT_ELS_RETRY: |
183 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); | 287 | ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); |
184 | lpfc_els_retry_delay_handler(ndlp); | 288 | lpfc_els_retry_delay_handler(ndlp); |
289 | free_evt = 0; /* evt is part of ndlp */ | ||
290 | break; | ||
291 | case LPFC_EVT_DEV_LOSS: | ||
292 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); | ||
293 | lpfc_nlp_get(ndlp); | ||
294 | lpfc_dev_loss_tmo_handler(ndlp); | ||
185 | free_evt = 0; | 295 | free_evt = 0; |
296 | complete((struct completion *)(evtp->evt_arg2)); | ||
297 | lpfc_nlp_put(ndlp); | ||
186 | break; | 298 | break; |
187 | case LPFC_EVT_ONLINE: | 299 | case LPFC_EVT_ONLINE: |
188 | if (phba->hba_state < LPFC_LINK_DOWN) | 300 | if (phba->link_state < LPFC_LINK_DOWN) |
189 | *(int *)(evtp->evt_arg1) = lpfc_online(phba); | 301 | *(int *) (evtp->evt_arg1) = lpfc_online(phba); |
190 | else | 302 | else |
191 | *(int *)(evtp->evt_arg1) = 0; | 303 | *(int *) (evtp->evt_arg1) = 0; |
192 | complete((struct completion *)(evtp->evt_arg2)); | 304 | complete((struct completion *)(evtp->evt_arg2)); |
193 | break; | 305 | break; |
194 | case LPFC_EVT_OFFLINE_PREP: | 306 | case LPFC_EVT_OFFLINE_PREP: |
195 | if (phba->hba_state >= LPFC_LINK_DOWN) | 307 | if (phba->link_state >= LPFC_LINK_DOWN) |
196 | lpfc_offline_prep(phba); | 308 | lpfc_offline_prep(phba); |
197 | *(int *)(evtp->evt_arg1) = 0; | 309 | *(int *)(evtp->evt_arg1) = 0; |
198 | complete((struct completion *)(evtp->evt_arg2)); | 310 | complete((struct completion *)(evtp->evt_arg2)); |
@@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba) | |||
218 | case LPFC_EVT_KILL: | 330 | case LPFC_EVT_KILL: |
219 | lpfc_offline(phba); | 331 | lpfc_offline(phba); |
220 | *(int *)(evtp->evt_arg1) | 332 | *(int *)(evtp->evt_arg1) |
221 | = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba); | 333 | = (phba->pport->stopped) |
334 | ? 0 : lpfc_sli_brdkill(phba); | ||
222 | lpfc_unblock_mgmt_io(phba); | 335 | lpfc_unblock_mgmt_io(phba); |
223 | complete((struct completion *)(evtp->evt_arg2)); | 336 | complete((struct completion *)(evtp->evt_arg2)); |
224 | break; | 337 | break; |
225 | } | 338 | } |
226 | if (free_evt) | 339 | if (free_evt) |
227 | kfree(evtp); | 340 | kfree(evtp); |
228 | spin_lock_irq(phba->host->host_lock); | 341 | spin_lock_irq(&phba->hbalock); |
229 | } | 342 | } |
230 | spin_unlock_irq(phba->host->host_lock); | 343 | spin_unlock_irq(&phba->hbalock); |
231 | 344 | ||
232 | } | 345 | } |
233 | 346 | ||
234 | static void | 347 | void |
235 | lpfc_work_done(struct lpfc_hba * phba) | 348 | lpfc_work_done(struct lpfc_hba *phba) |
236 | { | 349 | { |
237 | struct lpfc_sli_ring *pring; | 350 | struct lpfc_sli_ring *pring; |
238 | int i; | 351 | uint32_t ha_copy, status, control, work_port_events; |
239 | uint32_t ha_copy; | 352 | struct lpfc_vport *vport; |
240 | uint32_t control; | ||
241 | uint32_t work_hba_events; | ||
242 | 353 | ||
243 | spin_lock_irq(phba->host->host_lock); | 354 | spin_lock_irq(&phba->hbalock); |
244 | ha_copy = phba->work_ha; | 355 | ha_copy = phba->work_ha; |
245 | phba->work_ha = 0; | 356 | phba->work_ha = 0; |
246 | work_hba_events=phba->work_hba_events; | 357 | spin_unlock_irq(&phba->hbalock); |
247 | spin_unlock_irq(phba->host->host_lock); | ||
248 | 358 | ||
249 | if (ha_copy & HA_ERATT) | 359 | if (ha_copy & HA_ERATT) |
250 | lpfc_handle_eratt(phba); | 360 | lpfc_handle_eratt(phba); |
@@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba) | |||
255 | if (ha_copy & HA_LATT) | 365 | if (ha_copy & HA_LATT) |
256 | lpfc_handle_latt(phba); | 366 | lpfc_handle_latt(phba); |
257 | 367 | ||
258 | if (work_hba_events & WORKER_DISC_TMO) | 368 | spin_lock_irq(&phba->hbalock); |
259 | lpfc_disc_timeout_handler(phba); | 369 | list_for_each_entry(vport, &phba->port_list, listentry) { |
260 | 370 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | |
261 | if (work_hba_events & WORKER_ELS_TMO) | 371 | |
262 | lpfc_els_timeout_handler(phba); | 372 | if (!scsi_host_get(shost)) { |
263 | 373 | continue; | |
264 | if (work_hba_events & WORKER_MBOX_TMO) | 374 | } |
265 | lpfc_mbox_timeout_handler(phba); | 375 | spin_unlock_irq(&phba->hbalock); |
266 | 376 | work_port_events = vport->work_port_events; | |
267 | if (work_hba_events & WORKER_FDMI_TMO) | 377 | |
268 | lpfc_fdmi_tmo_handler(phba); | 378 | if (work_port_events & WORKER_DISC_TMO) |
269 | 379 | lpfc_disc_timeout_handler(vport); | |
270 | spin_lock_irq(phba->host->host_lock); | 380 | |
271 | phba->work_hba_events &= ~work_hba_events; | 381 | if (work_port_events & WORKER_ELS_TMO) |
272 | spin_unlock_irq(phba->host->host_lock); | 382 | lpfc_els_timeout_handler(vport); |
273 | 383 | ||
274 | for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { | 384 | if (work_port_events & WORKER_HB_TMO) |
275 | pring = &phba->sli.ring[i]; | 385 | lpfc_hb_timeout_handler(phba); |
276 | if ((ha_copy & HA_RXATT) | 386 | |
277 | || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { | 387 | if (work_port_events & WORKER_MBOX_TMO) |
278 | if (pring->flag & LPFC_STOP_IOCB_MASK) { | 388 | lpfc_mbox_timeout_handler(phba); |
279 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | 389 | |
280 | } else { | 390 | if (work_port_events & WORKER_FABRIC_BLOCK_TMO) |
281 | lpfc_sli_handle_slow_ring_event(phba, pring, | 391 | lpfc_unblock_fabric_iocbs(phba); |
282 | (ha_copy & | 392 | |
283 | HA_RXMASK)); | 393 | if (work_port_events & WORKER_FDMI_TMO) |
284 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | 394 | lpfc_fdmi_timeout_handler(vport); |
285 | } | 395 | |
286 | /* | 396 | if (work_port_events & WORKER_RAMP_DOWN_QUEUE) |
287 | * Turn on Ring interrupts | 397 | lpfc_ramp_down_queue_handler(phba); |
288 | */ | 398 | |
289 | spin_lock_irq(phba->host->host_lock); | 399 | if (work_port_events & WORKER_RAMP_UP_QUEUE) |
290 | control = readl(phba->HCregaddr); | 400 | lpfc_ramp_up_queue_handler(phba); |
291 | control |= (HC_R0INT_ENA << i); | 401 | |
402 | spin_lock_irq(&vport->work_port_lock); | ||
403 | vport->work_port_events &= ~work_port_events; | ||
404 | spin_unlock_irq(&vport->work_port_lock); | ||
405 | scsi_host_put(shost); | ||
406 | spin_lock_irq(&phba->hbalock); | ||
407 | } | ||
408 | spin_unlock_irq(&phba->hbalock); | ||
409 | |||
410 | pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
411 | status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); | ||
412 | status >>= (4*LPFC_ELS_RING); | ||
413 | if ((status & HA_RXMASK) | ||
414 | || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { | ||
415 | if (pring->flag & LPFC_STOP_IOCB_MASK) { | ||
416 | pring->flag |= LPFC_DEFERRED_RING_EVENT; | ||
417 | } else { | ||
418 | lpfc_sli_handle_slow_ring_event(phba, pring, | ||
419 | (status & | ||
420 | HA_RXMASK)); | ||
421 | pring->flag &= ~LPFC_DEFERRED_RING_EVENT; | ||
422 | } | ||
423 | /* | ||
424 | * Turn on Ring interrupts | ||
425 | */ | ||
426 | spin_lock_irq(&phba->hbalock); | ||
427 | control = readl(phba->HCregaddr); | ||
428 | if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { | ||
429 | control |= (HC_R0INT_ENA << LPFC_ELS_RING); | ||
292 | writel(control, phba->HCregaddr); | 430 | writel(control, phba->HCregaddr); |
293 | readl(phba->HCregaddr); /* flush */ | 431 | readl(phba->HCregaddr); /* flush */ |
294 | spin_unlock_irq(phba->host->host_lock); | ||
295 | } | 432 | } |
433 | spin_unlock_irq(&phba->hbalock); | ||
296 | } | 434 | } |
297 | 435 | lpfc_work_list_done(phba); | |
298 | lpfc_work_list_done (phba); | ||
299 | |||
300 | } | 436 | } |
301 | 437 | ||
302 | static int | 438 | static int |
303 | check_work_wait_done(struct lpfc_hba *phba) { | 439 | check_work_wait_done(struct lpfc_hba *phba) |
440 | { | ||
441 | struct lpfc_vport *vport; | ||
442 | struct lpfc_sli_ring *pring; | ||
443 | int rc = 0; | ||
444 | |||
445 | spin_lock_irq(&phba->hbalock); | ||
446 | list_for_each_entry(vport, &phba->port_list, listentry) { | ||
447 | if (vport->work_port_events) { | ||
448 | rc = 1; | ||
449 | goto exit; | ||
450 | } | ||
451 | } | ||
304 | 452 | ||
305 | spin_lock_irq(phba->host->host_lock); | 453 | if (phba->work_ha || (!list_empty(&phba->work_list)) || |
306 | if (phba->work_ha || | ||
307 | phba->work_hba_events || | ||
308 | (!list_empty(&phba->work_list)) || | ||
309 | kthread_should_stop()) { | 454 | kthread_should_stop()) { |
310 | spin_unlock_irq(phba->host->host_lock); | 455 | rc = 1; |
311 | return 1; | 456 | goto exit; |
312 | } else { | ||
313 | spin_unlock_irq(phba->host->host_lock); | ||
314 | return 0; | ||
315 | } | 457 | } |
458 | |||
459 | pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
460 | if (pring->flag & LPFC_DEFERRED_RING_EVENT) | ||
461 | rc = 1; | ||
462 | exit: | ||
463 | if (rc) | ||
464 | phba->work_found++; | ||
465 | else | ||
466 | phba->work_found = 0; | ||
467 | |||
468 | spin_unlock_irq(&phba->hbalock); | ||
469 | return rc; | ||
316 | } | 470 | } |
317 | 471 | ||
472 | |||
318 | int | 473 | int |
319 | lpfc_do_work(void *p) | 474 | lpfc_do_work(void *p) |
320 | { | 475 | { |
@@ -324,11 +479,13 @@ lpfc_do_work(void *p) | |||
324 | 479 | ||
325 | set_user_nice(current, -20); | 480 | set_user_nice(current, -20); |
326 | phba->work_wait = &work_waitq; | 481 | phba->work_wait = &work_waitq; |
482 | phba->work_found = 0; | ||
327 | 483 | ||
328 | while (1) { | 484 | while (1) { |
329 | 485 | ||
330 | rc = wait_event_interruptible(work_waitq, | 486 | rc = wait_event_interruptible(work_waitq, |
331 | check_work_wait_done(phba)); | 487 | check_work_wait_done(phba)); |
488 | |||
332 | BUG_ON(rc); | 489 | BUG_ON(rc); |
333 | 490 | ||
334 | if (kthread_should_stop()) | 491 | if (kthread_should_stop()) |
@@ -336,6 +493,17 @@ lpfc_do_work(void *p) | |||
336 | 493 | ||
337 | lpfc_work_done(phba); | 494 | lpfc_work_done(phba); |
338 | 495 | ||
496 | /* If there is alot of slow ring work, like during link up | ||
497 | * check_work_wait_done() may cause this thread to not give | ||
498 | * up the CPU for very long periods of time. This may cause | ||
499 | * soft lockups or other problems. To avoid these situations | ||
500 | * give up the CPU here after LPFC_MAX_WORKER_ITERATION | ||
501 | * consecutive iterations. | ||
502 | */ | ||
503 | if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) { | ||
504 | phba->work_found = 0; | ||
505 | schedule(); | ||
506 | } | ||
339 | } | 507 | } |
340 | phba->work_wait = NULL; | 508 | phba->work_wait = NULL; |
341 | return 0; | 509 | return 0; |
@@ -347,16 +515,17 @@ lpfc_do_work(void *p) | |||
347 | * embedding it in the IOCB. | 515 | * embedding it in the IOCB. |
348 | */ | 516 | */ |
349 | int | 517 | int |
350 | lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, | 518 | lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, |
351 | uint32_t evt) | 519 | uint32_t evt) |
352 | { | 520 | { |
353 | struct lpfc_work_evt *evtp; | 521 | struct lpfc_work_evt *evtp; |
522 | unsigned long flags; | ||
354 | 523 | ||
355 | /* | 524 | /* |
356 | * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will | 525 | * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will |
357 | * be queued to worker thread for processing | 526 | * be queued to worker thread for processing |
358 | */ | 527 | */ |
359 | evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); | 528 | evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); |
360 | if (!evtp) | 529 | if (!evtp) |
361 | return 0; | 530 | return 0; |
362 | 531 | ||
@@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, | |||
364 | evtp->evt_arg2 = arg2; | 533 | evtp->evt_arg2 = arg2; |
365 | evtp->evt = evt; | 534 | evtp->evt = evt; |
366 | 535 | ||
367 | spin_lock_irq(phba->host->host_lock); | 536 | spin_lock_irqsave(&phba->hbalock, flags); |
368 | list_add_tail(&evtp->evt_listp, &phba->work_list); | 537 | list_add_tail(&evtp->evt_listp, &phba->work_list); |
369 | if (phba->work_wait) | 538 | if (phba->work_wait) |
370 | wake_up(phba->work_wait); | 539 | lpfc_worker_wake_up(phba); |
371 | spin_unlock_irq(phba->host->host_lock); | 540 | spin_unlock_irqrestore(&phba->hbalock, flags); |
372 | 541 | ||
373 | return 1; | 542 | return 1; |
374 | } | 543 | } |
375 | 544 | ||
376 | int | 545 | void |
377 | lpfc_linkdown(struct lpfc_hba *phba) | 546 | lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) |
378 | { | 547 | { |
379 | struct lpfc_sli *psli; | 548 | struct lpfc_hba *phba = vport->phba; |
380 | struct lpfc_nodelist *ndlp, *next_ndlp; | 549 | struct lpfc_nodelist *ndlp, *next_ndlp; |
381 | LPFC_MBOXQ_t *mb; | 550 | int rc; |
382 | int rc; | ||
383 | 551 | ||
384 | psli = &phba->sli; | 552 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
385 | /* sysfs or selective reset may call this routine to clean up */ | 553 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
386 | if (phba->hba_state >= LPFC_LINK_DOWN) { | 554 | continue; |
387 | if (phba->hba_state == LPFC_LINK_DOWN) | ||
388 | return 0; | ||
389 | 555 | ||
390 | spin_lock_irq(phba->host->host_lock); | 556 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) |
391 | phba->hba_state = LPFC_LINK_DOWN; | 557 | lpfc_unreg_rpi(vport, ndlp); |
392 | spin_unlock_irq(phba->host->host_lock); | 558 | |
559 | /* Leave Fabric nodes alone on link down */ | ||
560 | if (!remove && ndlp->nlp_type & NLP_FABRIC) | ||
561 | continue; | ||
562 | rc = lpfc_disc_state_machine(vport, ndlp, NULL, | ||
563 | remove | ||
564 | ? NLP_EVT_DEVICE_RM | ||
565 | : NLP_EVT_DEVICE_RECOVERY); | ||
393 | } | 566 | } |
567 | if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { | ||
568 | lpfc_mbx_unreg_vpi(vport); | ||
569 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | static void | ||
574 | lpfc_linkdown_port(struct lpfc_vport *vport) | ||
575 | { | ||
576 | struct lpfc_nodelist *ndlp, *next_ndlp; | ||
577 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
394 | 578 | ||
395 | fc_host_post_event(phba->host, fc_get_event_number(), | 579 | fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); |
396 | FCH_EVT_LINKDOWN, 0); | ||
397 | 580 | ||
398 | /* Clean up any firmware default rpi's */ | 581 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
399 | if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { | 582 | "Link Down: state:x%x rtry:x%x flg:x%x", |
400 | lpfc_unreg_did(phba, 0xffffffff, mb); | 583 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); |
401 | mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; | ||
402 | if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) | ||
403 | == MBX_NOT_FINISHED) { | ||
404 | mempool_free( mb, phba->mbox_mem_pool); | ||
405 | } | ||
406 | } | ||
407 | 584 | ||
408 | /* Cleanup any outstanding RSCN activity */ | 585 | /* Cleanup any outstanding RSCN activity */ |
409 | lpfc_els_flush_rscn(phba); | 586 | lpfc_els_flush_rscn(vport); |
410 | 587 | ||
411 | /* Cleanup any outstanding ELS commands */ | 588 | /* Cleanup any outstanding ELS commands */ |
412 | lpfc_els_flush_cmd(phba); | 589 | lpfc_els_flush_cmd(vport); |
413 | 590 | ||
414 | /* | 591 | lpfc_cleanup_rpis(vport, 0); |
415 | * Issue a LINK DOWN event to all nodes. | 592 | |
416 | */ | 593 | /* free any ndlp's on unused list */ |
417 | list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { | 594 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) |
418 | /* free any ndlp's on unused list */ | 595 | /* free any ndlp's in unused state */ |
419 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 596 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
420 | lpfc_drop_node(phba, ndlp); | 597 | lpfc_drop_node(vport, ndlp); |
421 | else /* otherwise, force node recovery. */ | 598 | |
422 | rc = lpfc_disc_state_machine(phba, ndlp, NULL, | 599 | /* Turn off discovery timer if its running */ |
423 | NLP_EVT_DEVICE_RECOVERY); | 600 | lpfc_can_disctmo(vport); |
601 | } | ||
602 | |||
603 | int | ||
604 | lpfc_linkdown(struct lpfc_hba *phba) | ||
605 | { | ||
606 | struct lpfc_vport *vport = phba->pport; | ||
607 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
608 | struct lpfc_vport *port_iterator; | ||
609 | LPFC_MBOXQ_t *mb; | ||
610 | |||
611 | if (phba->link_state == LPFC_LINK_DOWN) { | ||
612 | return 0; | ||
613 | } | ||
614 | spin_lock_irq(&phba->hbalock); | ||
615 | if (phba->link_state > LPFC_LINK_DOWN) { | ||
616 | phba->link_state = LPFC_LINK_DOWN; | ||
617 | phba->pport->fc_flag &= ~FC_LBIT; | ||
618 | } | ||
619 | spin_unlock_irq(&phba->hbalock); | ||
620 | |||
621 | list_for_each_entry(port_iterator, &phba->port_list, listentry) { | ||
622 | |||
623 | /* Issue a LINK DOWN event to all nodes */ | ||
624 | lpfc_linkdown_port(port_iterator); | ||
625 | } | ||
626 | |||
627 | /* Clean up any firmware default rpi's */ | ||
628 | mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
629 | if (mb) { | ||
630 | lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); | ||
631 | mb->vport = vport; | ||
632 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
633 | if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) | ||
634 | == MBX_NOT_FINISHED) { | ||
635 | mempool_free(mb, phba->mbox_mem_pool); | ||
636 | } | ||
424 | } | 637 | } |
425 | 638 | ||
426 | /* Setup myDID for link up if we are in pt2pt mode */ | 639 | /* Setup myDID for link up if we are in pt2pt mode */ |
427 | if (phba->fc_flag & FC_PT2PT) { | 640 | if (phba->pport->fc_flag & FC_PT2PT) { |
428 | phba->fc_myDID = 0; | 641 | phba->pport->fc_myDID = 0; |
429 | if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { | 642 | mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
643 | if (mb) { | ||
430 | lpfc_config_link(phba, mb); | 644 | lpfc_config_link(phba, mb); |
431 | mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; | 645 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
432 | if (lpfc_sli_issue_mbox | 646 | mb->vport = vport; |
433 | (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) | 647 | if (lpfc_sli_issue_mbox(phba, mb, |
648 | (MBX_NOWAIT | MBX_STOP_IOCB)) | ||
434 | == MBX_NOT_FINISHED) { | 649 | == MBX_NOT_FINISHED) { |
435 | mempool_free( mb, phba->mbox_mem_pool); | 650 | mempool_free(mb, phba->mbox_mem_pool); |
436 | } | 651 | } |
437 | } | 652 | } |
438 | spin_lock_irq(phba->host->host_lock); | 653 | spin_lock_irq(shost->host_lock); |
439 | phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); | 654 | phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); |
440 | spin_unlock_irq(phba->host->host_lock); | 655 | spin_unlock_irq(shost->host_lock); |
441 | } | 656 | } |
442 | spin_lock_irq(phba->host->host_lock); | ||
443 | phba->fc_flag &= ~FC_LBIT; | ||
444 | spin_unlock_irq(phba->host->host_lock); | ||
445 | |||
446 | /* Turn off discovery timer if its running */ | ||
447 | lpfc_can_disctmo(phba); | ||
448 | 657 | ||
449 | /* Must process IOCBs on all rings to handle ABORTed I/Os */ | ||
450 | return 0; | 658 | return 0; |
451 | } | 659 | } |
452 | 660 | ||
453 | static int | 661 | static void |
454 | lpfc_linkup(struct lpfc_hba *phba) | 662 | lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) |
455 | { | 663 | { |
456 | struct lpfc_nodelist *ndlp, *next_ndlp; | 664 | struct lpfc_nodelist *ndlp; |
457 | 665 | ||
458 | fc_host_post_event(phba->host, fc_get_event_number(), | 666 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
459 | FCH_EVT_LINKUP, 0); | 667 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
460 | 668 | continue; | |
461 | spin_lock_irq(phba->host->host_lock); | 669 | |
462 | phba->hba_state = LPFC_LINK_UP; | 670 | if (ndlp->nlp_type & NLP_FABRIC) { |
463 | phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | | 671 | /* On Linkup its safe to clean up the ndlp |
464 | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); | 672 | * from Fabric connections. |
465 | phba->fc_flag |= FC_NDISC_ACTIVE; | 673 | */ |
466 | phba->fc_ns_retry = 0; | 674 | if (ndlp->nlp_DID != Fabric_DID) |
467 | spin_unlock_irq(phba->host->host_lock); | 675 | lpfc_unreg_rpi(vport, ndlp); |
468 | 676 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | |
469 | 677 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { | |
470 | if (phba->fc_flag & FC_LBIT) { | 678 | /* Fail outstanding IO now since device is |
471 | list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { | 679 | * marked for PLOGI. |
472 | if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) { | 680 | */ |
473 | if (ndlp->nlp_type & NLP_FABRIC) { | 681 | lpfc_unreg_rpi(vport, ndlp); |
474 | /* | ||
475 | * On Linkup its safe to clean up the | ||
476 | * ndlp from Fabric connections. | ||
477 | */ | ||
478 | lpfc_nlp_set_state(phba, ndlp, | ||
479 | NLP_STE_UNUSED_NODE); | ||
480 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { | ||
481 | /* | ||
482 | * Fail outstanding IO now since | ||
483 | * device is marked for PLOGI. | ||
484 | */ | ||
485 | lpfc_unreg_rpi(phba, ndlp); | ||
486 | } | ||
487 | } | ||
488 | } | 682 | } |
489 | } | 683 | } |
684 | } | ||
490 | 685 | ||
491 | /* free any ndlp's on unused list */ | 686 | static void |
492 | list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, | 687 | lpfc_linkup_port(struct lpfc_vport *vport) |
493 | nlp_listp) { | 688 | { |
689 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
690 | struct lpfc_nodelist *ndlp, *next_ndlp; | ||
691 | struct lpfc_hba *phba = vport->phba; | ||
692 | |||
693 | if ((vport->load_flag & FC_UNLOADING) != 0) | ||
694 | return; | ||
695 | |||
696 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | ||
697 | "Link Up: top:x%x speed:x%x flg:x%x", | ||
698 | phba->fc_topology, phba->fc_linkspeed, phba->link_flag); | ||
699 | |||
700 | /* If NPIV is not enabled, only bring the physical port up */ | ||
701 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | ||
702 | (vport != phba->pport)) | ||
703 | return; | ||
704 | |||
705 | fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); | ||
706 | |||
707 | spin_lock_irq(shost->host_lock); | ||
708 | vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | | ||
709 | FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); | ||
710 | vport->fc_flag |= FC_NDISC_ACTIVE; | ||
711 | vport->fc_ns_retry = 0; | ||
712 | spin_unlock_irq(shost->host_lock); | ||
713 | |||
714 | if (vport->fc_flag & FC_LBIT) | ||
715 | lpfc_linkup_cleanup_nodes(vport); | ||
716 | |||
717 | /* free any ndlp's in unused state */ | ||
718 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | ||
719 | nlp_listp) | ||
494 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 720 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
495 | lpfc_drop_node(phba, ndlp); | 721 | lpfc_drop_node(vport, ndlp); |
722 | } | ||
723 | |||
724 | static int | ||
725 | lpfc_linkup(struct lpfc_hba *phba) | ||
726 | { | ||
727 | struct lpfc_vport *vport; | ||
728 | |||
729 | phba->link_state = LPFC_LINK_UP; | ||
730 | |||
731 | /* Unblock fabric iocbs if they are blocked */ | ||
732 | clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); | ||
733 | del_timer_sync(&phba->fabric_block_timer); | ||
734 | |||
735 | list_for_each_entry(vport, &phba->port_list, listentry) { | ||
736 | lpfc_linkup_port(vport); | ||
496 | } | 737 | } |
738 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) | ||
739 | lpfc_issue_clear_la(phba, phba->pport); | ||
497 | 740 | ||
498 | return 0; | 741 | return 0; |
499 | } | 742 | } |
@@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba) | |||
505 | * handed off to the SLI layer. | 748 | * handed off to the SLI layer. |
506 | */ | 749 | */ |
507 | void | 750 | void |
508 | lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 751 | lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
509 | { | 752 | { |
510 | struct lpfc_sli *psli; | 753 | struct lpfc_vport *vport = pmb->vport; |
511 | MAILBOX_t *mb; | 754 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
755 | struct lpfc_sli *psli = &phba->sli; | ||
756 | MAILBOX_t *mb = &pmb->mb; | ||
512 | uint32_t control; | 757 | uint32_t control; |
513 | 758 | ||
514 | psli = &phba->sli; | ||
515 | mb = &pmb->mb; | ||
516 | /* Since we don't do discovery right now, turn these off here */ | 759 | /* Since we don't do discovery right now, turn these off here */ |
517 | psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; | 760 | psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; |
518 | psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; | 761 | psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; |
@@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
522 | if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { | 765 | if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { |
523 | /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ | 766 | /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ |
524 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 767 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
525 | "%d:0320 CLEAR_LA mbxStatus error x%x hba " | 768 | "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba " |
526 | "state x%x\n", | 769 | "state x%x\n", |
527 | phba->brd_no, mb->mbxStatus, phba->hba_state); | 770 | phba->brd_no, vport->vpi, mb->mbxStatus, |
771 | vport->port_state); | ||
528 | 772 | ||
529 | phba->hba_state = LPFC_HBA_ERROR; | 773 | phba->link_state = LPFC_HBA_ERROR; |
530 | goto out; | 774 | goto out; |
531 | } | 775 | } |
532 | 776 | ||
533 | if (phba->fc_flag & FC_ABORT_DISCOVERY) | 777 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
534 | goto out; | 778 | phba->link_state = LPFC_HBA_READY; |
535 | 779 | ||
536 | phba->num_disc_nodes = 0; | 780 | spin_lock_irq(&phba->hbalock); |
537 | /* go thru NPR list and issue ELS PLOGIs */ | 781 | psli->sli_flag |= LPFC_PROCESS_LA; |
538 | if (phba->fc_npr_cnt) { | 782 | control = readl(phba->HCregaddr); |
539 | lpfc_els_disc_plogi(phba); | 783 | control |= HC_LAINT_ENA; |
540 | } | 784 | writel(control, phba->HCregaddr); |
785 | readl(phba->HCregaddr); /* flush */ | ||
786 | spin_unlock_irq(&phba->hbalock); | ||
787 | return; | ||
788 | |||
789 | vport->num_disc_nodes = 0; | ||
790 | /* go thru NPR nodes and issue ELS PLOGIs */ | ||
791 | if (vport->fc_npr_cnt) | ||
792 | lpfc_els_disc_plogi(vport); | ||
541 | 793 | ||
542 | if (!phba->num_disc_nodes) { | 794 | if (!vport->num_disc_nodes) { |
543 | spin_lock_irq(phba->host->host_lock); | 795 | spin_lock_irq(shost->host_lock); |
544 | phba->fc_flag &= ~FC_NDISC_ACTIVE; | 796 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
545 | spin_unlock_irq(phba->host->host_lock); | 797 | spin_unlock_irq(shost->host_lock); |
546 | } | 798 | } |
547 | 799 | ||
548 | phba->hba_state = LPFC_HBA_READY; | 800 | vport->port_state = LPFC_VPORT_READY; |
549 | 801 | ||
550 | out: | 802 | out: |
551 | /* Device Discovery completes */ | 803 | /* Device Discovery completes */ |
552 | lpfc_printf_log(phba, | 804 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
553 | KERN_INFO, | 805 | "%d (%d):0225 Device Discovery completes\n", |
554 | LOG_DISCOVERY, | 806 | phba->brd_no, vport->vpi); |
555 | "%d:0225 Device Discovery completes\n", | ||
556 | phba->brd_no); | ||
557 | 807 | ||
558 | mempool_free( pmb, phba->mbox_mem_pool); | 808 | mempool_free(pmb, phba->mbox_mem_pool); |
559 | 809 | ||
560 | spin_lock_irq(phba->host->host_lock); | 810 | spin_lock_irq(shost->host_lock); |
561 | phba->fc_flag &= ~FC_ABORT_DISCOVERY; | 811 | vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK); |
562 | if (phba->fc_flag & FC_ESTABLISH_LINK) { | 812 | spin_unlock_irq(shost->host_lock); |
563 | phba->fc_flag &= ~FC_ESTABLISH_LINK; | ||
564 | } | ||
565 | spin_unlock_irq(phba->host->host_lock); | ||
566 | 813 | ||
567 | del_timer_sync(&phba->fc_estabtmo); | 814 | del_timer_sync(&phba->fc_estabtmo); |
568 | 815 | ||
569 | lpfc_can_disctmo(phba); | 816 | lpfc_can_disctmo(vport); |
570 | 817 | ||
571 | /* turn on Link Attention interrupts */ | 818 | /* turn on Link Attention interrupts */ |
572 | spin_lock_irq(phba->host->host_lock); | 819 | |
820 | spin_lock_irq(&phba->hbalock); | ||
573 | psli->sli_flag |= LPFC_PROCESS_LA; | 821 | psli->sli_flag |= LPFC_PROCESS_LA; |
574 | control = readl(phba->HCregaddr); | 822 | control = readl(phba->HCregaddr); |
575 | control |= HC_LAINT_ENA; | 823 | control |= HC_LAINT_ENA; |
576 | writel(control, phba->HCregaddr); | 824 | writel(control, phba->HCregaddr); |
577 | readl(phba->HCregaddr); /* flush */ | 825 | readl(phba->HCregaddr); /* flush */ |
578 | spin_unlock_irq(phba->host->host_lock); | 826 | spin_unlock_irq(&phba->hbalock); |
579 | 827 | ||
580 | return; | 828 | return; |
581 | } | 829 | } |
582 | 830 | ||
831 | |||
583 | static void | 832 | static void |
584 | lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 833 | lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
585 | { | 834 | { |
586 | struct lpfc_sli *psli = &phba->sli; | 835 | struct lpfc_vport *vport = pmb->vport; |
587 | int rc; | ||
588 | 836 | ||
589 | if (pmb->mb.mbxStatus) | 837 | if (pmb->mb.mbxStatus) |
590 | goto out; | 838 | goto out; |
@@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
592 | mempool_free(pmb, phba->mbox_mem_pool); | 840 | mempool_free(pmb, phba->mbox_mem_pool); |
593 | 841 | ||
594 | if (phba->fc_topology == TOPOLOGY_LOOP && | 842 | if (phba->fc_topology == TOPOLOGY_LOOP && |
595 | phba->fc_flag & FC_PUBLIC_LOOP && | 843 | vport->fc_flag & FC_PUBLIC_LOOP && |
596 | !(phba->fc_flag & FC_LBIT)) { | 844 | !(vport->fc_flag & FC_LBIT)) { |
597 | /* Need to wait for FAN - use discovery timer | 845 | /* Need to wait for FAN - use discovery timer |
598 | * for timeout. hba_state is identically | 846 | * for timeout. port_state is identically |
599 | * LPFC_LOCAL_CFG_LINK while waiting for FAN | 847 | * LPFC_LOCAL_CFG_LINK while waiting for FAN |
600 | */ | 848 | */ |
601 | lpfc_set_disctmo(phba); | 849 | lpfc_set_disctmo(vport); |
602 | return; | 850 | return; |
603 | } | 851 | } |
604 | 852 | ||
605 | /* Start discovery by sending a FLOGI. hba_state is identically | 853 | /* Start discovery by sending a FLOGI. port_state is identically |
606 | * LPFC_FLOGI while waiting for FLOGI cmpl | 854 | * LPFC_FLOGI while waiting for FLOGI cmpl |
607 | */ | 855 | */ |
608 | phba->hba_state = LPFC_FLOGI; | 856 | if (vport->port_state != LPFC_FLOGI) { |
609 | lpfc_set_disctmo(phba); | 857 | vport->port_state = LPFC_FLOGI; |
610 | lpfc_initial_flogi(phba); | 858 | lpfc_set_disctmo(vport); |
859 | lpfc_initial_flogi(vport); | ||
860 | } | ||
611 | return; | 861 | return; |
612 | 862 | ||
613 | out: | 863 | out: |
614 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 864 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
615 | "%d:0306 CONFIG_LINK mbxStatus error x%x " | 865 | "%d (%d):0306 CONFIG_LINK mbxStatus error x%x " |
616 | "HBA state x%x\n", | 866 | "HBA state x%x\n", |
617 | phba->brd_no, pmb->mb.mbxStatus, phba->hba_state); | 867 | phba->brd_no, vport->vpi, pmb->mb.mbxStatus, |
868 | vport->port_state); | ||
618 | 869 | ||
619 | lpfc_linkdown(phba); | 870 | mempool_free(pmb, phba->mbox_mem_pool); |
620 | 871 | ||
621 | phba->hba_state = LPFC_HBA_ERROR; | 872 | lpfc_linkdown(phba); |
622 | 873 | ||
623 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 874 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
624 | "%d:0200 CONFIG_LINK bad hba state x%x\n", | 875 | "%d (%d):0200 CONFIG_LINK bad hba state x%x\n", |
625 | phba->brd_no, phba->hba_state); | 876 | phba->brd_no, vport->vpi, vport->port_state); |
626 | 877 | ||
627 | lpfc_clear_la(phba, pmb); | 878 | lpfc_issue_clear_la(phba, vport); |
628 | pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | ||
629 | rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
630 | if (rc == MBX_NOT_FINISHED) { | ||
631 | mempool_free(pmb, phba->mbox_mem_pool); | ||
632 | lpfc_disc_flush_list(phba); | ||
633 | psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | ||
634 | psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | ||
635 | psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | ||
636 | phba->hba_state = LPFC_HBA_READY; | ||
637 | } | ||
638 | return; | 879 | return; |
639 | } | 880 | } |
640 | 881 | ||
641 | static void | 882 | static void |
642 | lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 883 | lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
643 | { | 884 | { |
644 | struct lpfc_sli *psli = &phba->sli; | ||
645 | MAILBOX_t *mb = &pmb->mb; | 885 | MAILBOX_t *mb = &pmb->mb; |
646 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; | 886 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; |
887 | struct lpfc_vport *vport = pmb->vport; | ||
647 | 888 | ||
648 | 889 | ||
649 | /* Check for error */ | 890 | /* Check for error */ |
650 | if (mb->mbxStatus) { | 891 | if (mb->mbxStatus) { |
651 | /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ | 892 | /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ |
652 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 893 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
653 | "%d:0319 READ_SPARAM mbxStatus error x%x " | 894 | "%d (%d):0319 READ_SPARAM mbxStatus error x%x " |
654 | "hba state x%x>\n", | 895 | "hba state x%x>\n", |
655 | phba->brd_no, mb->mbxStatus, phba->hba_state); | 896 | phba->brd_no, vport->vpi, mb->mbxStatus, |
897 | vport->port_state); | ||
656 | 898 | ||
657 | lpfc_linkdown(phba); | 899 | lpfc_linkdown(phba); |
658 | phba->hba_state = LPFC_HBA_ERROR; | ||
659 | goto out; | 900 | goto out; |
660 | } | 901 | } |
661 | 902 | ||
662 | memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, | 903 | memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, |
663 | sizeof (struct serv_parm)); | 904 | sizeof (struct serv_parm)); |
664 | if (phba->cfg_soft_wwnn) | 905 | if (phba->cfg_soft_wwnn) |
665 | u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); | 906 | u64_to_wwn(phba->cfg_soft_wwnn, |
907 | vport->fc_sparam.nodeName.u.wwn); | ||
666 | if (phba->cfg_soft_wwpn) | 908 | if (phba->cfg_soft_wwpn) |
667 | u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); | 909 | u64_to_wwn(phba->cfg_soft_wwpn, |
668 | memcpy((uint8_t *) & phba->fc_nodename, | 910 | vport->fc_sparam.portName.u.wwn); |
669 | (uint8_t *) & phba->fc_sparam.nodeName, | 911 | memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, |
670 | sizeof (struct lpfc_name)); | 912 | sizeof(vport->fc_nodename)); |
671 | memcpy((uint8_t *) & phba->fc_portname, | 913 | memcpy(&vport->fc_portname, &vport->fc_sparam.portName, |
672 | (uint8_t *) & phba->fc_sparam.portName, | 914 | sizeof(vport->fc_portname)); |
673 | sizeof (struct lpfc_name)); | 915 | if (vport->port_type == LPFC_PHYSICAL_PORT) { |
916 | memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); | ||
917 | memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); | ||
918 | } | ||
919 | |||
674 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 920 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
675 | kfree(mp); | 921 | kfree(mp); |
676 | mempool_free( pmb, phba->mbox_mem_pool); | 922 | mempool_free(pmb, phba->mbox_mem_pool); |
677 | return; | 923 | return; |
678 | 924 | ||
679 | out: | 925 | out: |
680 | pmb->context1 = NULL; | 926 | pmb->context1 = NULL; |
681 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 927 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
682 | kfree(mp); | 928 | kfree(mp); |
683 | if (phba->hba_state != LPFC_CLEAR_LA) { | 929 | lpfc_issue_clear_la(phba, vport); |
684 | lpfc_clear_la(phba, pmb); | 930 | mempool_free(pmb, phba->mbox_mem_pool); |
685 | pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | ||
686 | if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)) | ||
687 | == MBX_NOT_FINISHED) { | ||
688 | mempool_free( pmb, phba->mbox_mem_pool); | ||
689 | lpfc_disc_flush_list(phba); | ||
690 | psli->ring[(psli->extra_ring)].flag &= | ||
691 | ~LPFC_STOP_IOCB_EVENT; | ||
692 | psli->ring[(psli->fcp_ring)].flag &= | ||
693 | ~LPFC_STOP_IOCB_EVENT; | ||
694 | psli->ring[(psli->next_ring)].flag &= | ||
695 | ~LPFC_STOP_IOCB_EVENT; | ||
696 | phba->hba_state = LPFC_HBA_READY; | ||
697 | } | ||
698 | } else { | ||
699 | mempool_free( pmb, phba->mbox_mem_pool); | ||
700 | } | ||
701 | return; | 931 | return; |
702 | } | 932 | } |
703 | 933 | ||
704 | static void | 934 | static void |
705 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | 935 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) |
706 | { | 936 | { |
707 | int i; | 937 | struct lpfc_vport *vport = phba->pport; |
708 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; | 938 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; |
939 | int i; | ||
709 | struct lpfc_dmabuf *mp; | 940 | struct lpfc_dmabuf *mp; |
710 | int rc; | 941 | int rc; |
711 | 942 | ||
712 | sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 943 | sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
713 | cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 944 | cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
714 | 945 | ||
715 | spin_lock_irq(phba->host->host_lock); | 946 | spin_lock_irq(&phba->hbalock); |
716 | switch (la->UlnkSpeed) { | 947 | switch (la->UlnkSpeed) { |
717 | case LA_1GHZ_LINK: | 948 | case LA_1GHZ_LINK: |
718 | phba->fc_linkspeed = LA_1GHZ_LINK; | 949 | phba->fc_linkspeed = LA_1GHZ_LINK; |
719 | break; | 950 | break; |
720 | case LA_2GHZ_LINK: | 951 | case LA_2GHZ_LINK: |
721 | phba->fc_linkspeed = LA_2GHZ_LINK; | 952 | phba->fc_linkspeed = LA_2GHZ_LINK; |
722 | break; | 953 | break; |
723 | case LA_4GHZ_LINK: | 954 | case LA_4GHZ_LINK: |
724 | phba->fc_linkspeed = LA_4GHZ_LINK; | 955 | phba->fc_linkspeed = LA_4GHZ_LINK; |
725 | break; | 956 | break; |
726 | case LA_8GHZ_LINK: | 957 | case LA_8GHZ_LINK: |
727 | phba->fc_linkspeed = LA_8GHZ_LINK; | 958 | phba->fc_linkspeed = LA_8GHZ_LINK; |
728 | break; | 959 | break; |
729 | default: | 960 | default: |
730 | phba->fc_linkspeed = LA_UNKNW_LINK; | 961 | phba->fc_linkspeed = LA_UNKNW_LINK; |
731 | break; | 962 | break; |
732 | } | 963 | } |
733 | 964 | ||
734 | phba->fc_topology = la->topology; | 965 | phba->fc_topology = la->topology; |
966 | phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; | ||
735 | 967 | ||
736 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 968 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
737 | /* Get Loop Map information */ | 969 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
738 | 970 | ||
971 | /* Get Loop Map information */ | ||
739 | if (la->il) | 972 | if (la->il) |
740 | phba->fc_flag |= FC_LBIT; | 973 | vport->fc_flag |= FC_LBIT; |
741 | 974 | ||
742 | phba->fc_myDID = la->granted_AL_PA; | 975 | vport->fc_myDID = la->granted_AL_PA; |
743 | i = la->un.lilpBde64.tus.f.bdeSize; | 976 | i = la->un.lilpBde64.tus.f.bdeSize; |
744 | 977 | ||
745 | if (i == 0) { | 978 | if (i == 0) { |
@@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
769 | } | 1002 | } |
770 | /* Link Up Event ALPA map */ | 1003 | /* Link Up Event ALPA map */ |
771 | lpfc_printf_log(phba, | 1004 | lpfc_printf_log(phba, |
772 | KERN_WARNING, | 1005 | KERN_WARNING, |
773 | LOG_LINK_EVENT, | 1006 | LOG_LINK_EVENT, |
774 | "%d:1304 Link Up Event " | 1007 | "%d:1304 Link Up Event " |
775 | "ALPA map Data: x%x " | 1008 | "ALPA map Data: x%x " |
776 | "x%x x%x x%x\n", | 1009 | "x%x x%x x%x\n", |
777 | phba->brd_no, | 1010 | phba->brd_no, |
778 | un.pa.wd1, un.pa.wd2, | 1011 | un.pa.wd1, un.pa.wd2, |
779 | un.pa.wd3, un.pa.wd4); | 1012 | un.pa.wd3, un.pa.wd4); |
780 | } | 1013 | } |
781 | } | 1014 | } |
782 | } | 1015 | } |
783 | } else { | 1016 | } else { |
784 | phba->fc_myDID = phba->fc_pref_DID; | 1017 | if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { |
785 | phba->fc_flag |= FC_LBIT; | 1018 | if (phba->max_vpi && phba->cfg_npiv_enable && |
1019 | (phba->sli_rev == 3)) | ||
1020 | phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; | ||
1021 | } | ||
1022 | vport->fc_myDID = phba->fc_pref_DID; | ||
1023 | vport->fc_flag |= FC_LBIT; | ||
786 | } | 1024 | } |
787 | spin_unlock_irq(phba->host->host_lock); | 1025 | spin_unlock_irq(&phba->hbalock); |
788 | 1026 | ||
789 | lpfc_linkup(phba); | 1027 | lpfc_linkup(phba); |
790 | if (sparam_mbox) { | 1028 | if (sparam_mbox) { |
791 | lpfc_read_sparam(phba, sparam_mbox); | 1029 | lpfc_read_sparam(phba, sparam_mbox, 0); |
1030 | sparam_mbox->vport = vport; | ||
792 | sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; | 1031 | sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; |
793 | rc = lpfc_sli_issue_mbox(phba, sparam_mbox, | 1032 | rc = lpfc_sli_issue_mbox(phba, sparam_mbox, |
794 | (MBX_NOWAIT | MBX_STOP_IOCB)); | 1033 | (MBX_NOWAIT | MBX_STOP_IOCB)); |
795 | if (rc == MBX_NOT_FINISHED) { | 1034 | if (rc == MBX_NOT_FINISHED) { |
796 | mp = (struct lpfc_dmabuf *) sparam_mbox->context1; | 1035 | mp = (struct lpfc_dmabuf *) sparam_mbox->context1; |
797 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1036 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
799 | mempool_free(sparam_mbox, phba->mbox_mem_pool); | 1038 | mempool_free(sparam_mbox, phba->mbox_mem_pool); |
800 | if (cfglink_mbox) | 1039 | if (cfglink_mbox) |
801 | mempool_free(cfglink_mbox, phba->mbox_mem_pool); | 1040 | mempool_free(cfglink_mbox, phba->mbox_mem_pool); |
802 | return; | 1041 | goto out; |
803 | } | 1042 | } |
804 | } | 1043 | } |
805 | 1044 | ||
806 | if (cfglink_mbox) { | 1045 | if (cfglink_mbox) { |
807 | phba->hba_state = LPFC_LOCAL_CFG_LINK; | 1046 | vport->port_state = LPFC_LOCAL_CFG_LINK; |
808 | lpfc_config_link(phba, cfglink_mbox); | 1047 | lpfc_config_link(phba, cfglink_mbox); |
1048 | cfglink_mbox->vport = vport; | ||
809 | cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; | 1049 | cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; |
810 | rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, | 1050 | rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, |
811 | (MBX_NOWAIT | MBX_STOP_IOCB)); | 1051 | (MBX_NOWAIT | MBX_STOP_IOCB)); |
812 | if (rc == MBX_NOT_FINISHED) | 1052 | if (rc != MBX_NOT_FINISHED) |
813 | mempool_free(cfglink_mbox, phba->mbox_mem_pool); | 1053 | return; |
1054 | mempool_free(cfglink_mbox, phba->mbox_mem_pool); | ||
814 | } | 1055 | } |
1056 | out: | ||
1057 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
1058 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
1059 | "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n", | ||
1060 | phba->brd_no, vport->vpi, | ||
1061 | vport->port_state, sparam_mbox, cfglink_mbox); | ||
1062 | |||
1063 | lpfc_issue_clear_la(phba, vport); | ||
1064 | return; | ||
815 | } | 1065 | } |
816 | 1066 | ||
817 | static void | 1067 | static void |
818 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { | 1068 | lpfc_mbx_issue_link_down(struct lpfc_hba *phba) |
1069 | { | ||
819 | uint32_t control; | 1070 | uint32_t control; |
820 | struct lpfc_sli *psli = &phba->sli; | 1071 | struct lpfc_sli *psli = &phba->sli; |
821 | 1072 | ||
822 | lpfc_linkdown(phba); | 1073 | lpfc_linkdown(phba); |
823 | 1074 | ||
824 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ | 1075 | /* turn on Link Attention interrupts - no CLEAR_LA needed */ |
825 | spin_lock_irq(phba->host->host_lock); | 1076 | spin_lock_irq(&phba->hbalock); |
826 | psli->sli_flag |= LPFC_PROCESS_LA; | 1077 | psli->sli_flag |= LPFC_PROCESS_LA; |
827 | control = readl(phba->HCregaddr); | 1078 | control = readl(phba->HCregaddr); |
828 | control |= HC_LAINT_ENA; | 1079 | control |= HC_LAINT_ENA; |
829 | writel(control, phba->HCregaddr); | 1080 | writel(control, phba->HCregaddr); |
830 | readl(phba->HCregaddr); /* flush */ | 1081 | readl(phba->HCregaddr); /* flush */ |
831 | spin_unlock_irq(phba->host->host_lock); | 1082 | spin_unlock_irq(&phba->hbalock); |
832 | } | 1083 | } |
833 | 1084 | ||
834 | /* | 1085 | /* |
@@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { | |||
838 | * handed off to the SLI layer. | 1089 | * handed off to the SLI layer. |
839 | */ | 1090 | */ |
840 | void | 1091 | void |
841 | lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 1092 | lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
842 | { | 1093 | { |
1094 | struct lpfc_vport *vport = pmb->vport; | ||
1095 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
843 | READ_LA_VAR *la; | 1096 | READ_LA_VAR *la; |
844 | MAILBOX_t *mb = &pmb->mb; | 1097 | MAILBOX_t *mb = &pmb->mb; |
845 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | 1098 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
846 | 1099 | ||
847 | /* Check for error */ | 1100 | /* Check for error */ |
848 | if (mb->mbxStatus) { | 1101 | if (mb->mbxStatus) { |
849 | lpfc_printf_log(phba, | 1102 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
850 | KERN_INFO, | ||
851 | LOG_LINK_EVENT, | ||
852 | "%d:1307 READ_LA mbox error x%x state x%x\n", | 1103 | "%d:1307 READ_LA mbox error x%x state x%x\n", |
853 | phba->brd_no, | 1104 | phba->brd_no, mb->mbxStatus, vport->port_state); |
854 | mb->mbxStatus, phba->hba_state); | ||
855 | lpfc_mbx_issue_link_down(phba); | 1105 | lpfc_mbx_issue_link_down(phba); |
856 | phba->hba_state = LPFC_HBA_ERROR; | 1106 | phba->link_state = LPFC_HBA_ERROR; |
857 | goto lpfc_mbx_cmpl_read_la_free_mbuf; | 1107 | goto lpfc_mbx_cmpl_read_la_free_mbuf; |
858 | } | 1108 | } |
859 | 1109 | ||
@@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
861 | 1111 | ||
862 | memcpy(&phba->alpa_map[0], mp->virt, 128); | 1112 | memcpy(&phba->alpa_map[0], mp->virt, 128); |
863 | 1113 | ||
864 | spin_lock_irq(phba->host->host_lock); | 1114 | spin_lock_irq(shost->host_lock); |
865 | if (la->pb) | 1115 | if (la->pb) |
866 | phba->fc_flag |= FC_BYPASSED_MODE; | 1116 | vport->fc_flag |= FC_BYPASSED_MODE; |
867 | else | 1117 | else |
868 | phba->fc_flag &= ~FC_BYPASSED_MODE; | 1118 | vport->fc_flag &= ~FC_BYPASSED_MODE; |
869 | spin_unlock_irq(phba->host->host_lock); | 1119 | spin_unlock_irq(shost->host_lock); |
870 | 1120 | ||
871 | if (((phba->fc_eventTag + 1) < la->eventTag) || | 1121 | if (((phba->fc_eventTag + 1) < la->eventTag) || |
872 | (phba->fc_eventTag == la->eventTag)) { | 1122 | (phba->fc_eventTag == la->eventTag)) { |
873 | phba->fc_stat.LinkMultiEvent++; | 1123 | phba->fc_stat.LinkMultiEvent++; |
874 | if (la->attType == AT_LINK_UP) { | 1124 | if (la->attType == AT_LINK_UP) |
875 | if (phba->fc_eventTag != 0) | 1125 | if (phba->fc_eventTag != 0) |
876 | lpfc_linkdown(phba); | 1126 | lpfc_linkdown(phba); |
877 | } | ||
878 | } | 1127 | } |
879 | 1128 | ||
880 | phba->fc_eventTag = la->eventTag; | 1129 | phba->fc_eventTag = la->eventTag; |
881 | 1130 | ||
882 | if (la->attType == AT_LINK_UP) { | 1131 | if (la->attType == AT_LINK_UP) { |
883 | phba->fc_stat.LinkUp++; | 1132 | phba->fc_stat.LinkUp++; |
884 | if (phba->fc_flag & FC_LOOPBACK_MODE) { | 1133 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
885 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 1134 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
886 | "%d:1306 Link Up Event in loop back mode " | 1135 | "%d:1306 Link Up Event in loop back mode " |
887 | "x%x received Data: x%x x%x x%x x%x\n", | 1136 | "x%x received Data: x%x x%x x%x x%x\n", |
@@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
903 | "%d:1305 Link Down Event x%x received " | 1152 | "%d:1305 Link Down Event x%x received " |
904 | "Data: x%x x%x x%x\n", | 1153 | "Data: x%x x%x x%x\n", |
905 | phba->brd_no, la->eventTag, phba->fc_eventTag, | 1154 | phba->brd_no, la->eventTag, phba->fc_eventTag, |
906 | phba->hba_state, phba->fc_flag); | 1155 | phba->pport->port_state, vport->fc_flag); |
907 | lpfc_mbx_issue_link_down(phba); | 1156 | lpfc_mbx_issue_link_down(phba); |
908 | } | 1157 | } |
909 | 1158 | ||
@@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf: | |||
921 | * handed off to the SLI layer. | 1170 | * handed off to the SLI layer. |
922 | */ | 1171 | */ |
923 | void | 1172 | void |
924 | lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 1173 | lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
925 | { | 1174 | { |
926 | struct lpfc_sli *psli; | 1175 | struct lpfc_vport *vport = pmb->vport; |
927 | MAILBOX_t *mb; | 1176 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
928 | struct lpfc_dmabuf *mp; | 1177 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; |
929 | struct lpfc_nodelist *ndlp; | ||
930 | |||
931 | psli = &phba->sli; | ||
932 | mb = &pmb->mb; | ||
933 | |||
934 | ndlp = (struct lpfc_nodelist *) pmb->context2; | ||
935 | mp = (struct lpfc_dmabuf *) (pmb->context1); | ||
936 | 1178 | ||
937 | pmb->context1 = NULL; | 1179 | pmb->context1 = NULL; |
938 | 1180 | ||
939 | /* Good status, call state machine */ | 1181 | /* Good status, call state machine */ |
940 | lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); | 1182 | lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); |
941 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1183 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
942 | kfree(mp); | 1184 | kfree(mp); |
943 | mempool_free( pmb, phba->mbox_mem_pool); | 1185 | mempool_free(pmb, phba->mbox_mem_pool); |
944 | lpfc_nlp_put(ndlp); | 1186 | lpfc_nlp_put(ndlp); |
945 | 1187 | ||
946 | return; | 1188 | return; |
947 | } | 1189 | } |
948 | 1190 | ||
1191 | static void | ||
1192 | lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | ||
1193 | { | ||
1194 | MAILBOX_t *mb = &pmb->mb; | ||
1195 | struct lpfc_vport *vport = pmb->vport; | ||
1196 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1197 | |||
1198 | switch (mb->mbxStatus) { | ||
1199 | case 0x0011: | ||
1200 | case 0x0020: | ||
1201 | case 0x9700: | ||
1202 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | ||
1203 | "%d (%d):0911 cmpl_unreg_vpi, " | ||
1204 | "mb status = 0x%x\n", | ||
1205 | phba->brd_no, vport->vpi, mb->mbxStatus); | ||
1206 | break; | ||
1207 | } | ||
1208 | vport->unreg_vpi_cmpl = VPORT_OK; | ||
1209 | mempool_free(pmb, phba->mbox_mem_pool); | ||
1210 | /* | ||
1211 | * This shost reference might have been taken at the beginning of | ||
1212 | * lpfc_vport_delete() | ||
1213 | */ | ||
1214 | if (vport->load_flag & FC_UNLOADING) | ||
1215 | scsi_host_put(shost); | ||
1216 | } | ||
1217 | |||
1218 | void | ||
1219 | lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) | ||
1220 | { | ||
1221 | struct lpfc_hba *phba = vport->phba; | ||
1222 | LPFC_MBOXQ_t *mbox; | ||
1223 | int rc; | ||
1224 | |||
1225 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1226 | if (!mbox) | ||
1227 | return; | ||
1228 | |||
1229 | lpfc_unreg_vpi(phba, vport->vpi, mbox); | ||
1230 | mbox->vport = vport; | ||
1231 | mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; | ||
1232 | rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
1233 | if (rc == MBX_NOT_FINISHED) { | ||
1234 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, | ||
1235 | "%d (%d):1800 Could not issue unreg_vpi\n", | ||
1236 | phba->brd_no, vport->vpi); | ||
1237 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1238 | vport->unreg_vpi_cmpl = VPORT_ERROR; | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | static void | ||
1243 | lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | ||
1244 | { | ||
1245 | struct lpfc_vport *vport = pmb->vport; | ||
1246 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1247 | MAILBOX_t *mb = &pmb->mb; | ||
1248 | |||
1249 | switch (mb->mbxStatus) { | ||
1250 | case 0x0011: | ||
1251 | case 0x9601: | ||
1252 | case 0x9602: | ||
1253 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | ||
1254 | "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n", | ||
1255 | phba->brd_no, vport->vpi, mb->mbxStatus); | ||
1256 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
1257 | spin_lock_irq(shost->host_lock); | ||
1258 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); | ||
1259 | spin_unlock_irq(shost->host_lock); | ||
1260 | vport->fc_myDID = 0; | ||
1261 | goto out; | ||
1262 | } | ||
1263 | |||
1264 | vport->num_disc_nodes = 0; | ||
1265 | /* go thru NPR list and issue ELS PLOGIs */ | ||
1266 | if (vport->fc_npr_cnt) | ||
1267 | lpfc_els_disc_plogi(vport); | ||
1268 | |||
1269 | if (!vport->num_disc_nodes) { | ||
1270 | spin_lock_irq(shost->host_lock); | ||
1271 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | ||
1272 | spin_unlock_irq(shost->host_lock); | ||
1273 | lpfc_can_disctmo(vport); | ||
1274 | } | ||
1275 | vport->port_state = LPFC_VPORT_READY; | ||
1276 | |||
1277 | out: | ||
1278 | mempool_free(pmb, phba->mbox_mem_pool); | ||
1279 | return; | ||
1280 | } | ||
1281 | |||
949 | /* | 1282 | /* |
950 | * This routine handles processing a Fabric REG_LOGIN mailbox | 1283 | * This routine handles processing a Fabric REG_LOGIN mailbox |
951 | * command upon completion. It is setup in the LPFC_MBOXQ | 1284 | * command upon completion. It is setup in the LPFC_MBOXQ |
@@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
953 | * handed off to the SLI layer. | 1286 | * handed off to the SLI layer. |
954 | */ | 1287 | */ |
955 | void | 1288 | void |
956 | lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 1289 | lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
957 | { | 1290 | { |
958 | struct lpfc_sli *psli; | 1291 | struct lpfc_vport *vport = pmb->vport; |
959 | MAILBOX_t *mb; | 1292 | struct lpfc_vport *next_vport; |
960 | struct lpfc_dmabuf *mp; | 1293 | MAILBOX_t *mb = &pmb->mb; |
1294 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | ||
961 | struct lpfc_nodelist *ndlp; | 1295 | struct lpfc_nodelist *ndlp; |
962 | struct lpfc_nodelist *ndlp_fdmi; | ||
963 | |||
964 | |||
965 | psli = &phba->sli; | ||
966 | mb = &pmb->mb; | ||
967 | |||
968 | ndlp = (struct lpfc_nodelist *) pmb->context2; | 1296 | ndlp = (struct lpfc_nodelist *) pmb->context2; |
969 | mp = (struct lpfc_dmabuf *) (pmb->context1); | ||
970 | 1297 | ||
971 | pmb->context1 = NULL; | 1298 | pmb->context1 = NULL; |
972 | pmb->context2 = NULL; | 1299 | pmb->context2 = NULL; |
@@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
977 | mempool_free(pmb, phba->mbox_mem_pool); | 1304 | mempool_free(pmb, phba->mbox_mem_pool); |
978 | lpfc_nlp_put(ndlp); | 1305 | lpfc_nlp_put(ndlp); |
979 | 1306 | ||
980 | /* FLOGI failed, so just use loop map to make discovery list */ | 1307 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
981 | lpfc_disc_list_loopmap(phba); | 1308 | /* FLOGI failed, use loop map to make discovery list */ |
1309 | lpfc_disc_list_loopmap(vport); | ||
1310 | |||
1311 | /* Start discovery */ | ||
1312 | lpfc_disc_start(vport); | ||
1313 | return; | ||
1314 | } | ||
1315 | |||
1316 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
1317 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
1318 | "%d (%d):0258 Register Fabric login error: 0x%x\n", | ||
1319 | phba->brd_no, vport->vpi, mb->mbxStatus); | ||
982 | 1320 | ||
983 | /* Start discovery */ | ||
984 | lpfc_disc_start(phba); | ||
985 | return; | 1321 | return; |
986 | } | 1322 | } |
987 | 1323 | ||
988 | ndlp->nlp_rpi = mb->un.varWords[0]; | 1324 | ndlp->nlp_rpi = mb->un.varWords[0]; |
989 | ndlp->nlp_type |= NLP_FABRIC; | 1325 | ndlp->nlp_type |= NLP_FABRIC; |
990 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); | 1326 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
991 | 1327 | ||
992 | lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ | 1328 | lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ |
993 | 1329 | ||
994 | if (phba->hba_state == LPFC_FABRIC_CFG_LINK) { | 1330 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
995 | /* This NPort has been assigned an NPort_ID by the fabric as a | 1331 | list_for_each_entry(next_vport, &phba->port_list, listentry) { |
996 | * result of the completed fabric login. Issue a State Change | 1332 | if (next_vport->port_type == LPFC_PHYSICAL_PORT) |
997 | * Registration (SCR) ELS request to the fabric controller | 1333 | continue; |
998 | * (SCR_DID) so that this NPort gets RSCN events from the | 1334 | |
999 | * fabric. | 1335 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) |
1000 | */ | 1336 | lpfc_initial_fdisc(next_vport); |
1001 | lpfc_issue_els_scr(phba, SCR_DID, 0); | 1337 | else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { |
1002 | 1338 | lpfc_vport_set_state(vport, | |
1003 | ndlp = lpfc_findnode_did(phba, NameServer_DID); | 1339 | FC_VPORT_NO_FABRIC_SUPP); |
1004 | if (!ndlp) { | 1340 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, |
1005 | /* Allocate a new node instance. If the pool is empty, | 1341 | "%d (%d):0259 No NPIV Fabric " |
1006 | * start the discovery process and skip the Nameserver | 1342 | "support\n", |
1007 | * login process. This is attempted again later on. | 1343 | phba->brd_no, vport->vpi); |
1008 | * Otherwise, issue a Port Login (PLOGI) to NameServer. | ||
1009 | */ | ||
1010 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); | ||
1011 | if (!ndlp) { | ||
1012 | lpfc_disc_start(phba); | ||
1013 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
1014 | kfree(mp); | ||
1015 | mempool_free(pmb, phba->mbox_mem_pool); | ||
1016 | return; | ||
1017 | } else { | ||
1018 | lpfc_nlp_init(phba, ndlp, NameServer_DID); | ||
1019 | ndlp->nlp_type |= NLP_FABRIC; | ||
1020 | } | ||
1021 | } | ||
1022 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); | ||
1023 | lpfc_issue_els_plogi(phba, NameServer_DID, 0); | ||
1024 | if (phba->cfg_fdmi_on) { | ||
1025 | ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, | ||
1026 | GFP_KERNEL); | ||
1027 | if (ndlp_fdmi) { | ||
1028 | lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID); | ||
1029 | ndlp_fdmi->nlp_type |= NLP_FABRIC; | ||
1030 | ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE; | ||
1031 | lpfc_issue_els_plogi(phba, FDMI_DID, 0); | ||
1032 | } | 1344 | } |
1033 | } | 1345 | } |
1346 | lpfc_do_scr_ns_plogi(phba, vport); | ||
1034 | } | 1347 | } |
1035 | 1348 | ||
1036 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1349 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
1046 | * handed off to the SLI layer. | 1359 | * handed off to the SLI layer. |
1047 | */ | 1360 | */ |
1048 | void | 1361 | void |
1049 | lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 1362 | lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
1050 | { | 1363 | { |
1051 | struct lpfc_sli *psli; | 1364 | MAILBOX_t *mb = &pmb->mb; |
1052 | MAILBOX_t *mb; | 1365 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
1053 | struct lpfc_dmabuf *mp; | 1366 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; |
1054 | struct lpfc_nodelist *ndlp; | 1367 | struct lpfc_vport *vport = pmb->vport; |
1055 | |||
1056 | psli = &phba->sli; | ||
1057 | mb = &pmb->mb; | ||
1058 | |||
1059 | ndlp = (struct lpfc_nodelist *) pmb->context2; | ||
1060 | mp = (struct lpfc_dmabuf *) (pmb->context1); | ||
1061 | 1368 | ||
1062 | if (mb->mbxStatus) { | 1369 | if (mb->mbxStatus) { |
1370 | out: | ||
1063 | lpfc_nlp_put(ndlp); | 1371 | lpfc_nlp_put(ndlp); |
1064 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1372 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1065 | kfree(mp); | 1373 | kfree(mp); |
1066 | mempool_free(pmb, phba->mbox_mem_pool); | 1374 | mempool_free(pmb, phba->mbox_mem_pool); |
1067 | lpfc_drop_node(phba, ndlp); | 1375 | lpfc_drop_node(vport, ndlp); |
1068 | 1376 | ||
1069 | /* RegLogin failed, so just use loop map to make discovery | 1377 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
1070 | list */ | 1378 | /* |
1071 | lpfc_disc_list_loopmap(phba); | 1379 | * RegLogin failed, use loop map to make discovery |
1380 | * list | ||
1381 | */ | ||
1382 | lpfc_disc_list_loopmap(vport); | ||
1072 | 1383 | ||
1073 | /* Start discovery */ | 1384 | /* Start discovery */ |
1074 | lpfc_disc_start(phba); | 1385 | lpfc_disc_start(vport); |
1386 | return; | ||
1387 | } | ||
1388 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
1389 | lpfc_printf_log(phba, KERN_ERR, LOG_ELS, | ||
1390 | "%d (%d):0260 Register NameServer error: 0x%x\n", | ||
1391 | phba->brd_no, vport->vpi, mb->mbxStatus); | ||
1075 | return; | 1392 | return; |
1076 | } | 1393 | } |
1077 | 1394 | ||
@@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
1079 | 1396 | ||
1080 | ndlp->nlp_rpi = mb->un.varWords[0]; | 1397 | ndlp->nlp_rpi = mb->un.varWords[0]; |
1081 | ndlp->nlp_type |= NLP_FABRIC; | 1398 | ndlp->nlp_type |= NLP_FABRIC; |
1082 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); | 1399 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
1083 | 1400 | ||
1084 | if (phba->hba_state < LPFC_HBA_READY) { | 1401 | if (vport->port_state < LPFC_VPORT_READY) { |
1085 | /* Link up discovery requires Fabrib registration. */ | 1402 | /* Link up discovery requires Fabric registration. */ |
1086 | lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); | 1403 | lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ |
1087 | lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); | 1404 | lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); |
1088 | lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); | 1405 | lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); |
1089 | lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID); | 1406 | lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); |
1407 | lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); | ||
1408 | |||
1409 | /* Issue SCR just before NameServer GID_FT Query */ | ||
1410 | lpfc_issue_els_scr(vport, SCR_DID, 0); | ||
1090 | } | 1411 | } |
1091 | 1412 | ||
1092 | phba->fc_ns_retry = 0; | 1413 | vport->fc_ns_retry = 0; |
1093 | /* Good status, issue CT Request to NameServer */ | 1414 | /* Good status, issue CT Request to NameServer */ |
1094 | if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) { | 1415 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { |
1095 | /* Cannot issue NameServer Query, so finish up discovery */ | 1416 | /* Cannot issue NameServer Query, so finish up discovery */ |
1096 | lpfc_disc_start(phba); | 1417 | goto out; |
1097 | } | 1418 | } |
1098 | 1419 | ||
1099 | lpfc_nlp_put(ndlp); | 1420 | lpfc_nlp_put(ndlp); |
1100 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1421 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1101 | kfree(mp); | 1422 | kfree(mp); |
1102 | mempool_free( pmb, phba->mbox_mem_pool); | 1423 | mempool_free(pmb, phba->mbox_mem_pool); |
1103 | 1424 | ||
1104 | return; | 1425 | return; |
1105 | } | 1426 | } |
1106 | 1427 | ||
1107 | static void | 1428 | static void |
1108 | lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1429 | lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1109 | { | 1430 | { |
1110 | struct fc_rport *rport; | 1431 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1432 | struct fc_rport *rport; | ||
1111 | struct lpfc_rport_data *rdata; | 1433 | struct lpfc_rport_data *rdata; |
1112 | struct fc_rport_identifiers rport_ids; | 1434 | struct fc_rport_identifiers rport_ids; |
1435 | struct lpfc_hba *phba = vport->phba; | ||
1113 | 1436 | ||
1114 | /* Remote port has reappeared. Re-register w/ FC transport */ | 1437 | /* Remote port has reappeared. Re-register w/ FC transport */ |
1115 | rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); | 1438 | rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); |
@@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1125 | * registered the port. | 1448 | * registered the port. |
1126 | */ | 1449 | */ |
1127 | if (ndlp->rport && ndlp->rport->dd_data && | 1450 | if (ndlp->rport && ndlp->rport->dd_data && |
1128 | *(struct lpfc_rport_data **) ndlp->rport->dd_data) { | 1451 | ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { |
1129 | lpfc_nlp_put(ndlp); | 1452 | lpfc_nlp_put(ndlp); |
1130 | } | 1453 | } |
1131 | ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); | 1454 | |
1455 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | ||
1456 | "rport add: did:x%x flg:x%x type x%x", | ||
1457 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); | ||
1458 | |||
1459 | ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); | ||
1132 | if (!rport || !get_device(&rport->dev)) { | 1460 | if (!rport || !get_device(&rport->dev)) { |
1133 | dev_printk(KERN_WARNING, &phba->pcidev->dev, | 1461 | dev_printk(KERN_WARNING, &phba->pcidev->dev, |
1134 | "Warning: fc_remote_port_add failed\n"); | 1462 | "Warning: fc_remote_port_add failed\n"); |
@@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1151 | fc_remote_port_rolechg(rport, rport_ids.roles); | 1479 | fc_remote_port_rolechg(rport, rport_ids.roles); |
1152 | 1480 | ||
1153 | if ((rport->scsi_target_id != -1) && | 1481 | if ((rport->scsi_target_id != -1) && |
1154 | (rport->scsi_target_id < LPFC_MAX_TARGET)) { | 1482 | (rport->scsi_target_id < LPFC_MAX_TARGET)) { |
1155 | ndlp->nlp_sid = rport->scsi_target_id; | 1483 | ndlp->nlp_sid = rport->scsi_target_id; |
1156 | } | 1484 | } |
1157 | |||
1158 | return; | 1485 | return; |
1159 | } | 1486 | } |
1160 | 1487 | ||
1161 | static void | 1488 | static void |
1162 | lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1489 | lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) |
1163 | { | 1490 | { |
1164 | struct fc_rport *rport = ndlp->rport; | 1491 | struct fc_rport *rport = ndlp->rport; |
1165 | struct lpfc_rport_data *rdata = rport->dd_data; | ||
1166 | 1492 | ||
1167 | if (rport->scsi_target_id == -1) { | 1493 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, |
1168 | ndlp->rport = NULL; | 1494 | "rport delete: did:x%x flg:x%x type x%x", |
1169 | rdata->pnode = NULL; | 1495 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
1170 | lpfc_nlp_put(ndlp); | ||
1171 | put_device(&rport->dev); | ||
1172 | } | ||
1173 | 1496 | ||
1174 | fc_remote_port_delete(rport); | 1497 | fc_remote_port_delete(rport); |
1175 | 1498 | ||
@@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1177 | } | 1500 | } |
1178 | 1501 | ||
1179 | static void | 1502 | static void |
1180 | lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count) | 1503 | lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) |
1181 | { | 1504 | { |
1182 | spin_lock_irq(phba->host->host_lock); | 1505 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1506 | |||
1507 | spin_lock_irq(shost->host_lock); | ||
1183 | switch (state) { | 1508 | switch (state) { |
1184 | case NLP_STE_UNUSED_NODE: | 1509 | case NLP_STE_UNUSED_NODE: |
1185 | phba->fc_unused_cnt += count; | 1510 | vport->fc_unused_cnt += count; |
1186 | break; | 1511 | break; |
1187 | case NLP_STE_PLOGI_ISSUE: | 1512 | case NLP_STE_PLOGI_ISSUE: |
1188 | phba->fc_plogi_cnt += count; | 1513 | vport->fc_plogi_cnt += count; |
1189 | break; | 1514 | break; |
1190 | case NLP_STE_ADISC_ISSUE: | 1515 | case NLP_STE_ADISC_ISSUE: |
1191 | phba->fc_adisc_cnt += count; | 1516 | vport->fc_adisc_cnt += count; |
1192 | break; | 1517 | break; |
1193 | case NLP_STE_REG_LOGIN_ISSUE: | 1518 | case NLP_STE_REG_LOGIN_ISSUE: |
1194 | phba->fc_reglogin_cnt += count; | 1519 | vport->fc_reglogin_cnt += count; |
1195 | break; | 1520 | break; |
1196 | case NLP_STE_PRLI_ISSUE: | 1521 | case NLP_STE_PRLI_ISSUE: |
1197 | phba->fc_prli_cnt += count; | 1522 | vport->fc_prli_cnt += count; |
1198 | break; | 1523 | break; |
1199 | case NLP_STE_UNMAPPED_NODE: | 1524 | case NLP_STE_UNMAPPED_NODE: |
1200 | phba->fc_unmap_cnt += count; | 1525 | vport->fc_unmap_cnt += count; |
1201 | break; | 1526 | break; |
1202 | case NLP_STE_MAPPED_NODE: | 1527 | case NLP_STE_MAPPED_NODE: |
1203 | phba->fc_map_cnt += count; | 1528 | vport->fc_map_cnt += count; |
1204 | break; | 1529 | break; |
1205 | case NLP_STE_NPR_NODE: | 1530 | case NLP_STE_NPR_NODE: |
1206 | phba->fc_npr_cnt += count; | 1531 | vport->fc_npr_cnt += count; |
1207 | break; | 1532 | break; |
1208 | } | 1533 | } |
1209 | spin_unlock_irq(phba->host->host_lock); | 1534 | spin_unlock_irq(shost->host_lock); |
1210 | } | 1535 | } |
1211 | 1536 | ||
1212 | static void | 1537 | static void |
1213 | lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | 1538 | lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1214 | int old_state, int new_state) | 1539 | int old_state, int new_state) |
1215 | { | 1540 | { |
1541 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1542 | |||
1216 | if (new_state == NLP_STE_UNMAPPED_NODE) { | 1543 | if (new_state == NLP_STE_UNMAPPED_NODE) { |
1217 | ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); | 1544 | ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); |
1218 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; | 1545 | ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; |
@@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | |||
1226 | /* Transport interface */ | 1553 | /* Transport interface */ |
1227 | if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || | 1554 | if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || |
1228 | old_state == NLP_STE_UNMAPPED_NODE)) { | 1555 | old_state == NLP_STE_UNMAPPED_NODE)) { |
1229 | phba->nport_event_cnt++; | 1556 | vport->phba->nport_event_cnt++; |
1230 | lpfc_unregister_remote_port(phba, ndlp); | 1557 | lpfc_unregister_remote_port(ndlp); |
1231 | } | 1558 | } |
1232 | 1559 | ||
1233 | if (new_state == NLP_STE_MAPPED_NODE || | 1560 | if (new_state == NLP_STE_MAPPED_NODE || |
1234 | new_state == NLP_STE_UNMAPPED_NODE) { | 1561 | new_state == NLP_STE_UNMAPPED_NODE) { |
1235 | phba->nport_event_cnt++; | 1562 | vport->phba->nport_event_cnt++; |
1236 | /* | 1563 | /* |
1237 | * Tell the fc transport about the port, if we haven't | 1564 | * Tell the fc transport about the port, if we haven't |
1238 | * already. If we have, and it's a scsi entity, be | 1565 | * already. If we have, and it's a scsi entity, be |
1239 | * sure to unblock any attached scsi devices | 1566 | * sure to unblock any attached scsi devices |
1240 | */ | 1567 | */ |
1241 | lpfc_register_remote_port(phba, ndlp); | 1568 | lpfc_register_remote_port(vport, ndlp); |
1242 | } | 1569 | } |
1243 | 1570 | /* | |
1244 | /* | 1571 | * if we added to Mapped list, but the remote port |
1245 | * if we added to Mapped list, but the remote port | 1572 | * registration failed or assigned a target id outside |
1246 | * registration failed or assigned a target id outside | 1573 | * our presentable range - move the node to the |
1247 | * our presentable range - move the node to the | 1574 | * Unmapped List |
1248 | * Unmapped List | 1575 | */ |
1249 | */ | ||
1250 | if (new_state == NLP_STE_MAPPED_NODE && | 1576 | if (new_state == NLP_STE_MAPPED_NODE && |
1251 | (!ndlp->rport || | 1577 | (!ndlp->rport || |
1252 | ndlp->rport->scsi_target_id == -1 || | 1578 | ndlp->rport->scsi_target_id == -1 || |
1253 | ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { | 1579 | ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { |
1254 | spin_lock_irq(phba->host->host_lock); | 1580 | spin_lock_irq(shost->host_lock); |
1255 | ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; | 1581 | ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; |
1256 | spin_unlock_irq(phba->host->host_lock); | 1582 | spin_unlock_irq(shost->host_lock); |
1257 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); | 1583 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
1258 | } | 1584 | } |
1259 | } | 1585 | } |
1260 | 1586 | ||
@@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state) | |||
1280 | } | 1606 | } |
1281 | 1607 | ||
1282 | void | 1608 | void |
1283 | lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state) | 1609 | lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
1610 | int state) | ||
1284 | { | 1611 | { |
1612 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1285 | int old_state = ndlp->nlp_state; | 1613 | int old_state = ndlp->nlp_state; |
1286 | char name1[16], name2[16]; | 1614 | char name1[16], name2[16]; |
1287 | 1615 | ||
1288 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1616 | lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE, |
1289 | "%d:0904 NPort state transition x%06x, %s -> %s\n", | 1617 | "%d (%d):0904 NPort state transition x%06x, %s -> %s\n", |
1290 | phba->brd_no, | 1618 | vport->phba->brd_no, vport->vpi, |
1291 | ndlp->nlp_DID, | 1619 | ndlp->nlp_DID, |
1292 | lpfc_nlp_state_name(name1, sizeof(name1), old_state), | 1620 | lpfc_nlp_state_name(name1, sizeof(name1), old_state), |
1293 | lpfc_nlp_state_name(name2, sizeof(name2), state)); | 1621 | lpfc_nlp_state_name(name2, sizeof(name2), state)); |
1622 | |||
1623 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | ||
1624 | "node statechg did:x%x old:%d ste:%d", | ||
1625 | ndlp->nlp_DID, old_state, state); | ||
1626 | |||
1294 | if (old_state == NLP_STE_NPR_NODE && | 1627 | if (old_state == NLP_STE_NPR_NODE && |
1295 | (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && | 1628 | (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && |
1296 | state != NLP_STE_NPR_NODE) | 1629 | state != NLP_STE_NPR_NODE) |
1297 | lpfc_cancel_retry_delay_tmo(phba, ndlp); | 1630 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1298 | if (old_state == NLP_STE_UNMAPPED_NODE) { | 1631 | if (old_state == NLP_STE_UNMAPPED_NODE) { |
1299 | ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; | 1632 | ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; |
1300 | ndlp->nlp_type &= ~NLP_FC_NODE; | 1633 | ndlp->nlp_type &= ~NLP_FC_NODE; |
1301 | } | 1634 | } |
1302 | 1635 | ||
1303 | if (list_empty(&ndlp->nlp_listp)) { | 1636 | if (list_empty(&ndlp->nlp_listp)) { |
1304 | spin_lock_irq(phba->host->host_lock); | 1637 | spin_lock_irq(shost->host_lock); |
1305 | list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes); | 1638 | list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); |
1306 | spin_unlock_irq(phba->host->host_lock); | 1639 | spin_unlock_irq(shost->host_lock); |
1307 | } else if (old_state) | 1640 | } else if (old_state) |
1308 | lpfc_nlp_counters(phba, old_state, -1); | 1641 | lpfc_nlp_counters(vport, old_state, -1); |
1309 | 1642 | ||
1310 | ndlp->nlp_state = state; | 1643 | ndlp->nlp_state = state; |
1311 | lpfc_nlp_counters(phba, state, 1); | 1644 | lpfc_nlp_counters(vport, state, 1); |
1312 | lpfc_nlp_state_cleanup(phba, ndlp, old_state, state); | 1645 | lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); |
1313 | } | 1646 | } |
1314 | 1647 | ||
1315 | void | 1648 | void |
1316 | lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1649 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1317 | { | 1650 | { |
1651 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1652 | |||
1318 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | 1653 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) |
1319 | lpfc_cancel_retry_delay_tmo(phba, ndlp); | 1654 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1320 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | 1655 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) |
1321 | lpfc_nlp_counters(phba, ndlp->nlp_state, -1); | 1656 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); |
1322 | spin_lock_irq(phba->host->host_lock); | 1657 | spin_lock_irq(shost->host_lock); |
1323 | list_del_init(&ndlp->nlp_listp); | 1658 | list_del_init(&ndlp->nlp_listp); |
1324 | spin_unlock_irq(phba->host->host_lock); | 1659 | spin_unlock_irq(shost->host_lock); |
1325 | lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0); | 1660 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, |
1661 | NLP_STE_UNUSED_NODE); | ||
1326 | } | 1662 | } |
1327 | 1663 | ||
1328 | void | 1664 | void |
1329 | lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1665 | lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1330 | { | 1666 | { |
1667 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1668 | |||
1331 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | 1669 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) |
1332 | lpfc_cancel_retry_delay_tmo(phba, ndlp); | 1670 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1333 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | 1671 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) |
1334 | lpfc_nlp_counters(phba, ndlp->nlp_state, -1); | 1672 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); |
1335 | spin_lock_irq(phba->host->host_lock); | 1673 | spin_lock_irq(shost->host_lock); |
1336 | list_del_init(&ndlp->nlp_listp); | 1674 | list_del_init(&ndlp->nlp_listp); |
1337 | spin_unlock_irq(phba->host->host_lock); | 1675 | ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; |
1676 | spin_unlock_irq(shost->host_lock); | ||
1338 | lpfc_nlp_put(ndlp); | 1677 | lpfc_nlp_put(ndlp); |
1339 | } | 1678 | } |
1340 | 1679 | ||
@@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1342 | * Start / ReStart rescue timer for Discovery / RSCN handling | 1681 | * Start / ReStart rescue timer for Discovery / RSCN handling |
1343 | */ | 1682 | */ |
1344 | void | 1683 | void |
1345 | lpfc_set_disctmo(struct lpfc_hba * phba) | 1684 | lpfc_set_disctmo(struct lpfc_vport *vport) |
1346 | { | 1685 | { |
1686 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1687 | struct lpfc_hba *phba = vport->phba; | ||
1347 | uint32_t tmo; | 1688 | uint32_t tmo; |
1348 | 1689 | ||
1349 | if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { | 1690 | if (vport->port_state == LPFC_LOCAL_CFG_LINK) { |
1350 | /* For FAN, timeout should be greater then edtov */ | 1691 | /* For FAN, timeout should be greater then edtov */ |
1351 | tmo = (((phba->fc_edtov + 999) / 1000) + 1); | 1692 | tmo = (((phba->fc_edtov + 999) / 1000) + 1); |
1352 | } else { | 1693 | } else { |
@@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba) | |||
1356 | tmo = ((phba->fc_ratov * 3) + 3); | 1697 | tmo = ((phba->fc_ratov * 3) + 3); |
1357 | } | 1698 | } |
1358 | 1699 | ||
1359 | mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); | 1700 | |
1360 | spin_lock_irq(phba->host->host_lock); | 1701 | if (!timer_pending(&vport->fc_disctmo)) { |
1361 | phba->fc_flag |= FC_DISC_TMO; | 1702 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
1362 | spin_unlock_irq(phba->host->host_lock); | 1703 | "set disc timer: tmo:x%x state:x%x flg:x%x", |
1704 | tmo, vport->port_state, vport->fc_flag); | ||
1705 | } | ||
1706 | |||
1707 | mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); | ||
1708 | spin_lock_irq(shost->host_lock); | ||
1709 | vport->fc_flag |= FC_DISC_TMO; | ||
1710 | spin_unlock_irq(shost->host_lock); | ||
1363 | 1711 | ||
1364 | /* Start Discovery Timer state <hba_state> */ | 1712 | /* Start Discovery Timer state <hba_state> */ |
1365 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 1713 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
1366 | "%d:0247 Start Discovery Timer state x%x " | 1714 | "%d (%d):0247 Start Discovery Timer state x%x " |
1367 | "Data: x%x x%lx x%x x%x\n", | 1715 | "Data: x%x x%lx x%x x%x\n", |
1368 | phba->brd_no, | 1716 | phba->brd_no, vport->vpi, vport->port_state, tmo, |
1369 | phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo, | 1717 | (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, |
1370 | phba->fc_plogi_cnt, phba->fc_adisc_cnt); | 1718 | vport->fc_adisc_cnt); |
1371 | 1719 | ||
1372 | return; | 1720 | return; |
1373 | } | 1721 | } |
@@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba) | |||
1376 | * Cancel rescue timer for Discovery / RSCN handling | 1724 | * Cancel rescue timer for Discovery / RSCN handling |
1377 | */ | 1725 | */ |
1378 | int | 1726 | int |
1379 | lpfc_can_disctmo(struct lpfc_hba * phba) | 1727 | lpfc_can_disctmo(struct lpfc_vport *vport) |
1380 | { | 1728 | { |
1729 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1730 | struct lpfc_hba *phba = vport->phba; | ||
1731 | unsigned long iflags; | ||
1732 | |||
1733 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | ||
1734 | "can disc timer: state:x%x rtry:x%x flg:x%x", | ||
1735 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); | ||
1736 | |||
1381 | /* Turn off discovery timer if its running */ | 1737 | /* Turn off discovery timer if its running */ |
1382 | if (phba->fc_flag & FC_DISC_TMO) { | 1738 | if (vport->fc_flag & FC_DISC_TMO) { |
1383 | spin_lock_irq(phba->host->host_lock); | 1739 | spin_lock_irqsave(shost->host_lock, iflags); |
1384 | phba->fc_flag &= ~FC_DISC_TMO; | 1740 | vport->fc_flag &= ~FC_DISC_TMO; |
1385 | spin_unlock_irq(phba->host->host_lock); | 1741 | spin_unlock_irqrestore(shost->host_lock, iflags); |
1386 | del_timer_sync(&phba->fc_disctmo); | 1742 | del_timer_sync(&vport->fc_disctmo); |
1387 | phba->work_hba_events &= ~WORKER_DISC_TMO; | 1743 | spin_lock_irqsave(&vport->work_port_lock, iflags); |
1744 | vport->work_port_events &= ~WORKER_DISC_TMO; | ||
1745 | spin_unlock_irqrestore(&vport->work_port_lock, iflags); | ||
1388 | } | 1746 | } |
1389 | 1747 | ||
1390 | /* Cancel Discovery Timer state <hba_state> */ | 1748 | /* Cancel Discovery Timer state <hba_state> */ |
1391 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 1749 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
1392 | "%d:0248 Cancel Discovery Timer state x%x " | 1750 | "%d (%d):0248 Cancel Discovery Timer state x%x " |
1393 | "Data: x%x x%x x%x\n", | 1751 | "Data: x%x x%x x%x\n", |
1394 | phba->brd_no, phba->hba_state, phba->fc_flag, | 1752 | phba->brd_no, vport->vpi, vport->port_state, |
1395 | phba->fc_plogi_cnt, phba->fc_adisc_cnt); | 1753 | vport->fc_flag, vport->fc_plogi_cnt, |
1754 | vport->fc_adisc_cnt); | ||
1396 | 1755 | ||
1397 | return 0; | 1756 | return 0; |
1398 | } | 1757 | } |
@@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba) | |||
1402 | * Return true if iocb matches the specified nport | 1761 | * Return true if iocb matches the specified nport |
1403 | */ | 1762 | */ |
1404 | int | 1763 | int |
1405 | lpfc_check_sli_ndlp(struct lpfc_hba * phba, | 1764 | lpfc_check_sli_ndlp(struct lpfc_hba *phba, |
1406 | struct lpfc_sli_ring * pring, | 1765 | struct lpfc_sli_ring *pring, |
1407 | struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp) | 1766 | struct lpfc_iocbq *iocb, |
1767 | struct lpfc_nodelist *ndlp) | ||
1408 | { | 1768 | { |
1409 | struct lpfc_sli *psli; | 1769 | struct lpfc_sli *psli = &phba->sli; |
1410 | IOCB_t *icmd; | 1770 | IOCB_t *icmd = &iocb->iocb; |
1771 | struct lpfc_vport *vport = ndlp->vport; | ||
1772 | |||
1773 | if (iocb->vport != vport) | ||
1774 | return 0; | ||
1411 | 1775 | ||
1412 | psli = &phba->sli; | ||
1413 | icmd = &iocb->iocb; | ||
1414 | if (pring->ringno == LPFC_ELS_RING) { | 1776 | if (pring->ringno == LPFC_ELS_RING) { |
1415 | switch (icmd->ulpCommand) { | 1777 | switch (icmd->ulpCommand) { |
1416 | case CMD_GEN_REQUEST64_CR: | 1778 | case CMD_GEN_REQUEST64_CR: |
@@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, | |||
1428 | } else if (pring->ringno == psli->fcp_ring) { | 1790 | } else if (pring->ringno == psli->fcp_ring) { |
1429 | /* Skip match check if waiting to relogin to FCP target */ | 1791 | /* Skip match check if waiting to relogin to FCP target */ |
1430 | if ((ndlp->nlp_type & NLP_FCP_TARGET) && | 1792 | if ((ndlp->nlp_type & NLP_FCP_TARGET) && |
1431 | (ndlp->nlp_flag & NLP_DELAY_TMO)) { | 1793 | (ndlp->nlp_flag & NLP_DELAY_TMO)) { |
1432 | return 0; | 1794 | return 0; |
1433 | } | 1795 | } |
1434 | if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { | 1796 | if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { |
@@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, | |||
1445 | * associated with nlp_rpi in the LPFC_NODELIST entry. | 1807 | * associated with nlp_rpi in the LPFC_NODELIST entry. |
1446 | */ | 1808 | */ |
1447 | static int | 1809 | static int |
1448 | lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | 1810 | lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
1449 | { | 1811 | { |
1450 | LIST_HEAD(completions); | 1812 | LIST_HEAD(completions); |
1451 | struct lpfc_sli *psli; | 1813 | struct lpfc_sli *psli; |
@@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1454 | IOCB_t *icmd; | 1816 | IOCB_t *icmd; |
1455 | uint32_t rpi, i; | 1817 | uint32_t rpi, i; |
1456 | 1818 | ||
1819 | lpfc_fabric_abort_nport(ndlp); | ||
1820 | |||
1457 | /* | 1821 | /* |
1458 | * Everything that matches on txcmplq will be returned | 1822 | * Everything that matches on txcmplq will be returned |
1459 | * by firmware with a no rpi error. | 1823 | * by firmware with a no rpi error. |
@@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1465 | for (i = 0; i < psli->num_rings; i++) { | 1829 | for (i = 0; i < psli->num_rings; i++) { |
1466 | pring = &psli->ring[i]; | 1830 | pring = &psli->ring[i]; |
1467 | 1831 | ||
1468 | spin_lock_irq(phba->host->host_lock); | 1832 | spin_lock_irq(&phba->hbalock); |
1469 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, | 1833 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, |
1470 | list) { | 1834 | list) { |
1471 | /* | 1835 | /* |
1472 | * Check to see if iocb matches the nport we are | 1836 | * Check to see if iocb matches the nport we are |
1473 | * looking for | 1837 | * looking for |
1474 | */ | 1838 | */ |
1475 | if ((lpfc_check_sli_ndlp | 1839 | if ((lpfc_check_sli_ndlp(phba, pring, iocb, |
1476 | (phba, pring, iocb, ndlp))) { | 1840 | ndlp))) { |
1477 | /* It matches, so deque and call compl | 1841 | /* It matches, so deque and call compl |
1478 | with an error */ | 1842 | with an error */ |
1479 | list_move_tail(&iocb->list, | 1843 | list_move_tail(&iocb->list, |
@@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1481 | pring->txq_cnt--; | 1845 | pring->txq_cnt--; |
1482 | } | 1846 | } |
1483 | } | 1847 | } |
1484 | spin_unlock_irq(phba->host->host_lock); | 1848 | spin_unlock_irq(&phba->hbalock); |
1485 | |||
1486 | } | 1849 | } |
1487 | } | 1850 | } |
1488 | 1851 | ||
1489 | while (!list_empty(&completions)) { | 1852 | while (!list_empty(&completions)) { |
1490 | iocb = list_get_first(&completions, struct lpfc_iocbq, list); | 1853 | iocb = list_get_first(&completions, struct lpfc_iocbq, list); |
1491 | list_del(&iocb->list); | 1854 | list_del_init(&iocb->list); |
1492 | 1855 | ||
1493 | if (iocb->iocb_cmpl) { | 1856 | if (!iocb->iocb_cmpl) |
1857 | lpfc_sli_release_iocbq(phba, iocb); | ||
1858 | else { | ||
1494 | icmd = &iocb->iocb; | 1859 | icmd = &iocb->iocb; |
1495 | icmd->ulpStatus = IOSTAT_LOCAL_REJECT; | 1860 | icmd->ulpStatus = IOSTAT_LOCAL_REJECT; |
1496 | icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; | 1861 | icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; |
1497 | (iocb->iocb_cmpl) (phba, iocb, iocb); | 1862 | (iocb->iocb_cmpl)(phba, iocb, iocb); |
1498 | } else | 1863 | } |
1499 | lpfc_sli_release_iocbq(phba, iocb); | ||
1500 | } | 1864 | } |
1501 | 1865 | ||
1502 | return 0; | 1866 | return 0; |
@@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1512 | * we are waiting to PLOGI back to the remote NPort. | 1876 | * we are waiting to PLOGI back to the remote NPort. |
1513 | */ | 1877 | */ |
1514 | int | 1878 | int |
1515 | lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | 1879 | lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1516 | { | 1880 | { |
1517 | LPFC_MBOXQ_t *mbox; | 1881 | struct lpfc_hba *phba = vport->phba; |
1882 | LPFC_MBOXQ_t *mbox; | ||
1518 | int rc; | 1883 | int rc; |
1519 | 1884 | ||
1520 | if (ndlp->nlp_rpi) { | 1885 | if (ndlp->nlp_rpi) { |
1521 | if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { | 1886 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
1522 | lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); | 1887 | if (mbox) { |
1523 | mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; | 1888 | lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); |
1524 | rc = lpfc_sli_issue_mbox | 1889 | mbox->vport = vport; |
1525 | (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); | 1890 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
1891 | rc = lpfc_sli_issue_mbox(phba, mbox, | ||
1892 | (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
1526 | if (rc == MBX_NOT_FINISHED) | 1893 | if (rc == MBX_NOT_FINISHED) |
1527 | mempool_free( mbox, phba->mbox_mem_pool); | 1894 | mempool_free(mbox, phba->mbox_mem_pool); |
1528 | } | 1895 | } |
1529 | lpfc_no_rpi(phba, ndlp); | 1896 | lpfc_no_rpi(phba, ndlp); |
1530 | ndlp->nlp_rpi = 0; | 1897 | ndlp->nlp_rpi = 0; |
@@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1533 | return 0; | 1900 | return 0; |
1534 | } | 1901 | } |
1535 | 1902 | ||
1903 | void | ||
1904 | lpfc_unreg_all_rpis(struct lpfc_vport *vport) | ||
1905 | { | ||
1906 | struct lpfc_hba *phba = vport->phba; | ||
1907 | LPFC_MBOXQ_t *mbox; | ||
1908 | int rc; | ||
1909 | |||
1910 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1911 | if (mbox) { | ||
1912 | lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); | ||
1913 | mbox->vport = vport; | ||
1914 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1915 | rc = lpfc_sli_issue_mbox(phba, mbox, | ||
1916 | (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
1917 | if (rc == MBX_NOT_FINISHED) { | ||
1918 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1919 | } | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | void | ||
1924 | lpfc_unreg_default_rpis(struct lpfc_vport *vport) | ||
1925 | { | ||
1926 | struct lpfc_hba *phba = vport->phba; | ||
1927 | LPFC_MBOXQ_t *mbox; | ||
1928 | int rc; | ||
1929 | |||
1930 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1931 | if (mbox) { | ||
1932 | lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); | ||
1933 | mbox->vport = vport; | ||
1934 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
1935 | rc = lpfc_sli_issue_mbox(phba, mbox, | ||
1936 | (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
1937 | if (rc == MBX_NOT_FINISHED) { | ||
1938 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, | ||
1939 | "%d (%d):1815 Could not issue " | ||
1940 | "unreg_did (default rpis)\n", | ||
1941 | phba->brd_no, vport->vpi); | ||
1942 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1943 | } | ||
1944 | } | ||
1945 | } | ||
1946 | |||
1536 | /* | 1947 | /* |
1537 | * Free resources associated with LPFC_NODELIST entry | 1948 | * Free resources associated with LPFC_NODELIST entry |
1538 | * so it can be freed. | 1949 | * so it can be freed. |
1539 | */ | 1950 | */ |
1540 | static int | 1951 | static int |
1541 | lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | 1952 | lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1542 | { | 1953 | { |
1543 | LPFC_MBOXQ_t *mb; | 1954 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1544 | LPFC_MBOXQ_t *nextmb; | 1955 | struct lpfc_hba *phba = vport->phba; |
1956 | LPFC_MBOXQ_t *mb, *nextmb; | ||
1545 | struct lpfc_dmabuf *mp; | 1957 | struct lpfc_dmabuf *mp; |
1546 | 1958 | ||
1547 | /* Cleanup node for NPort <nlp_DID> */ | 1959 | /* Cleanup node for NPort <nlp_DID> */ |
1548 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 1960 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1549 | "%d:0900 Cleanup node for NPort x%x " | 1961 | "%d (%d):0900 Cleanup node for NPort x%x " |
1550 | "Data: x%x x%x x%x\n", | 1962 | "Data: x%x x%x x%x\n", |
1551 | phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, | 1963 | phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag, |
1552 | ndlp->nlp_state, ndlp->nlp_rpi); | 1964 | ndlp->nlp_state, ndlp->nlp_rpi); |
1553 | 1965 | ||
1554 | lpfc_dequeue_node(phba, ndlp); | 1966 | lpfc_dequeue_node(vport, ndlp); |
1555 | 1967 | ||
1556 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ | 1968 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ |
1557 | if ((mb = phba->sli.mbox_active)) { | 1969 | if ((mb = phba->sli.mbox_active)) { |
@@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1562 | } | 1974 | } |
1563 | } | 1975 | } |
1564 | 1976 | ||
1565 | spin_lock_irq(phba->host->host_lock); | 1977 | spin_lock_irq(&phba->hbalock); |
1566 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 1978 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
1567 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && | 1979 | if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && |
1568 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1980 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
1569 | mp = (struct lpfc_dmabuf *) (mb->context1); | 1981 | mp = (struct lpfc_dmabuf *) (mb->context1); |
1570 | if (mp) { | 1982 | if (mp) { |
1571 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1983 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1572 | kfree(mp); | 1984 | kfree(mp); |
1573 | } | 1985 | } |
1574 | list_del(&mb->list); | 1986 | list_del(&mb->list); |
@@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1576 | lpfc_nlp_put(ndlp); | 1988 | lpfc_nlp_put(ndlp); |
1577 | } | 1989 | } |
1578 | } | 1990 | } |
1579 | spin_unlock_irq(phba->host->host_lock); | 1991 | spin_unlock_irq(&phba->hbalock); |
1580 | 1992 | ||
1581 | lpfc_els_abort(phba,ndlp); | 1993 | lpfc_els_abort(phba,ndlp); |
1582 | spin_lock_irq(phba->host->host_lock); | 1994 | spin_lock_irq(shost->host_lock); |
1583 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; | 1995 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; |
1584 | spin_unlock_irq(phba->host->host_lock); | 1996 | spin_unlock_irq(shost->host_lock); |
1585 | 1997 | ||
1586 | ndlp->nlp_last_elscmd = 0; | 1998 | ndlp->nlp_last_elscmd = 0; |
1587 | del_timer_sync(&ndlp->nlp_delayfunc); | 1999 | del_timer_sync(&ndlp->nlp_delayfunc); |
1588 | 2000 | ||
1589 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) | 2001 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) |
1590 | list_del_init(&ndlp->els_retry_evt.evt_listp); | 2002 | list_del_init(&ndlp->els_retry_evt.evt_listp); |
2003 | if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) | ||
2004 | list_del_init(&ndlp->dev_loss_evt.evt_listp); | ||
2005 | |||
2006 | if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) { | ||
2007 | list_del_init(&ndlp->dev_loss_evt.evt_listp); | ||
2008 | complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2)); | ||
2009 | } | ||
1591 | 2010 | ||
1592 | lpfc_unreg_rpi(phba, ndlp); | 2011 | lpfc_unreg_rpi(vport, ndlp); |
1593 | 2012 | ||
1594 | return 0; | 2013 | return 0; |
1595 | } | 2014 | } |
@@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1600 | * machine, defer the free till we reach the end of the state machine. | 2019 | * machine, defer the free till we reach the end of the state machine. |
1601 | */ | 2020 | */ |
1602 | static void | 2021 | static void |
1603 | lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 2022 | lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1604 | { | 2023 | { |
1605 | struct lpfc_rport_data *rdata; | 2024 | struct lpfc_rport_data *rdata; |
1606 | 2025 | ||
1607 | if (ndlp->nlp_flag & NLP_DELAY_TMO) { | 2026 | if (ndlp->nlp_flag & NLP_DELAY_TMO) { |
1608 | lpfc_cancel_retry_delay_tmo(phba, ndlp); | 2027 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1609 | } | 2028 | } |
1610 | 2029 | ||
1611 | lpfc_cleanup_node(phba, ndlp); | 2030 | lpfc_cleanup_node(vport, ndlp); |
1612 | 2031 | ||
1613 | if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) { | 2032 | /* |
1614 | put_device(&ndlp->rport->dev); | 2033 | * We can get here with a non-NULL ndlp->rport because when we |
2034 | * unregister a rport we don't break the rport/node linkage. So if we | ||
2035 | * do, make sure we don't leaving any dangling pointers behind. | ||
2036 | */ | ||
2037 | if (ndlp->rport) { | ||
1615 | rdata = ndlp->rport->dd_data; | 2038 | rdata = ndlp->rport->dd_data; |
1616 | rdata->pnode = NULL; | 2039 | rdata->pnode = NULL; |
1617 | ndlp->rport = NULL; | 2040 | ndlp->rport = NULL; |
@@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1619 | } | 2042 | } |
1620 | 2043 | ||
1621 | static int | 2044 | static int |
1622 | lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) | 2045 | lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, |
2046 | uint32_t did) | ||
1623 | { | 2047 | { |
1624 | D_ID mydid; | 2048 | D_ID mydid, ndlpdid, matchdid; |
1625 | D_ID ndlpdid; | ||
1626 | D_ID matchdid; | ||
1627 | 2049 | ||
1628 | if (did == Bcast_DID) | 2050 | if (did == Bcast_DID) |
1629 | return 0; | 2051 | return 0; |
@@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) | |||
1637 | return 1; | 2059 | return 1; |
1638 | 2060 | ||
1639 | /* Next check for area/domain identically equals 0 match */ | 2061 | /* Next check for area/domain identically equals 0 match */ |
1640 | mydid.un.word = phba->fc_myDID; | 2062 | mydid.un.word = vport->fc_myDID; |
1641 | if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { | 2063 | if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { |
1642 | return 0; | 2064 | return 0; |
1643 | } | 2065 | } |
@@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) | |||
1669 | } | 2091 | } |
1670 | 2092 | ||
1671 | /* Search for a nodelist entry */ | 2093 | /* Search for a nodelist entry */ |
1672 | struct lpfc_nodelist * | 2094 | static struct lpfc_nodelist * |
1673 | lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did) | 2095 | __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
1674 | { | 2096 | { |
2097 | struct lpfc_hba *phba = vport->phba; | ||
1675 | struct lpfc_nodelist *ndlp; | 2098 | struct lpfc_nodelist *ndlp; |
1676 | uint32_t data1; | 2099 | uint32_t data1; |
1677 | 2100 | ||
1678 | spin_lock_irq(phba->host->host_lock); | 2101 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
1679 | list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { | 2102 | if (lpfc_matchdid(vport, ndlp, did)) { |
1680 | if (lpfc_matchdid(phba, ndlp, did)) { | ||
1681 | data1 = (((uint32_t) ndlp->nlp_state << 24) | | 2103 | data1 = (((uint32_t) ndlp->nlp_state << 24) | |
1682 | ((uint32_t) ndlp->nlp_xri << 16) | | 2104 | ((uint32_t) ndlp->nlp_xri << 16) | |
1683 | ((uint32_t) ndlp->nlp_type << 8) | | 2105 | ((uint32_t) ndlp->nlp_type << 8) | |
1684 | ((uint32_t) ndlp->nlp_rpi & 0xff)); | 2106 | ((uint32_t) ndlp->nlp_rpi & 0xff)); |
1685 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 2107 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1686 | "%d:0929 FIND node DID " | 2108 | "%d (%d):0929 FIND node DID " |
1687 | " Data: x%p x%x x%x x%x\n", | 2109 | " Data: x%p x%x x%x x%x\n", |
1688 | phba->brd_no, | 2110 | phba->brd_no, vport->vpi, |
1689 | ndlp, ndlp->nlp_DID, | 2111 | ndlp, ndlp->nlp_DID, |
1690 | ndlp->nlp_flag, data1); | 2112 | ndlp->nlp_flag, data1); |
1691 | spin_unlock_irq(phba->host->host_lock); | ||
1692 | return ndlp; | 2113 | return ndlp; |
1693 | } | 2114 | } |
1694 | } | 2115 | } |
1695 | spin_unlock_irq(phba->host->host_lock); | ||
1696 | 2116 | ||
1697 | /* FIND node did <did> NOT FOUND */ | 2117 | /* FIND node did <did> NOT FOUND */ |
1698 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, | 2118 | lpfc_printf_log(phba, KERN_INFO, LOG_NODE, |
1699 | "%d:0932 FIND node did x%x NOT FOUND.\n", | 2119 | "%d (%d):0932 FIND node did x%x NOT FOUND.\n", |
1700 | phba->brd_no, did); | 2120 | phba->brd_no, vport->vpi, did); |
1701 | return NULL; | 2121 | return NULL; |
1702 | } | 2122 | } |
1703 | 2123 | ||
1704 | struct lpfc_nodelist * | 2124 | struct lpfc_nodelist * |
1705 | lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) | 2125 | lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) |
1706 | { | 2126 | { |
2127 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1707 | struct lpfc_nodelist *ndlp; | 2128 | struct lpfc_nodelist *ndlp; |
1708 | 2129 | ||
1709 | ndlp = lpfc_findnode_did(phba, did); | 2130 | spin_lock_irq(shost->host_lock); |
2131 | ndlp = __lpfc_findnode_did(vport, did); | ||
2132 | spin_unlock_irq(shost->host_lock); | ||
2133 | return ndlp; | ||
2134 | } | ||
2135 | |||
2136 | struct lpfc_nodelist * | ||
2137 | lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | ||
2138 | { | ||
2139 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2140 | struct lpfc_nodelist *ndlp; | ||
2141 | |||
2142 | ndlp = lpfc_findnode_did(vport, did); | ||
1710 | if (!ndlp) { | 2143 | if (!ndlp) { |
1711 | if ((phba->fc_flag & FC_RSCN_MODE) && | 2144 | if ((vport->fc_flag & FC_RSCN_MODE) != 0 && |
1712 | ((lpfc_rscn_payload_check(phba, did) == 0))) | 2145 | lpfc_rscn_payload_check(vport, did) == 0) |
1713 | return NULL; | 2146 | return NULL; |
1714 | ndlp = (struct lpfc_nodelist *) | 2147 | ndlp = (struct lpfc_nodelist *) |
1715 | mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 2148 | mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); |
1716 | if (!ndlp) | 2149 | if (!ndlp) |
1717 | return NULL; | 2150 | return NULL; |
1718 | lpfc_nlp_init(phba, ndlp, did); | 2151 | lpfc_nlp_init(vport, ndlp, did); |
1719 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); | 2152 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
2153 | spin_lock_irq(shost->host_lock); | ||
1720 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 2154 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
2155 | spin_unlock_irq(shost->host_lock); | ||
1721 | return ndlp; | 2156 | return ndlp; |
1722 | } | 2157 | } |
1723 | if (phba->fc_flag & FC_RSCN_MODE) { | 2158 | if (vport->fc_flag & FC_RSCN_MODE) { |
1724 | if (lpfc_rscn_payload_check(phba, did)) { | 2159 | if (lpfc_rscn_payload_check(vport, did)) { |
2160 | spin_lock_irq(shost->host_lock); | ||
1725 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 2161 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
2162 | spin_unlock_irq(shost->host_lock); | ||
1726 | 2163 | ||
1727 | /* Since this node is marked for discovery, | 2164 | /* Since this node is marked for discovery, |
1728 | * delay timeout is not needed. | 2165 | * delay timeout is not needed. |
1729 | */ | 2166 | */ |
1730 | if (ndlp->nlp_flag & NLP_DELAY_TMO) | 2167 | if (ndlp->nlp_flag & NLP_DELAY_TMO) |
1731 | lpfc_cancel_retry_delay_tmo(phba, ndlp); | 2168 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
1732 | } else | 2169 | } else |
1733 | ndlp = NULL; | 2170 | ndlp = NULL; |
1734 | } else { | 2171 | } else { |
1735 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || | 2172 | if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || |
1736 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) | 2173 | ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) |
1737 | return NULL; | 2174 | return NULL; |
1738 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); | 2175 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
2176 | spin_lock_irq(shost->host_lock); | ||
1739 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 2177 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
2178 | spin_unlock_irq(shost->host_lock); | ||
1740 | } | 2179 | } |
1741 | return ndlp; | 2180 | return ndlp; |
1742 | } | 2181 | } |
1743 | 2182 | ||
1744 | /* Build a list of nodes to discover based on the loopmap */ | 2183 | /* Build a list of nodes to discover based on the loopmap */ |
1745 | void | 2184 | void |
1746 | lpfc_disc_list_loopmap(struct lpfc_hba * phba) | 2185 | lpfc_disc_list_loopmap(struct lpfc_vport *vport) |
1747 | { | 2186 | { |
2187 | struct lpfc_hba *phba = vport->phba; | ||
1748 | int j; | 2188 | int j; |
1749 | uint32_t alpa, index; | 2189 | uint32_t alpa, index; |
1750 | 2190 | ||
1751 | if (phba->hba_state <= LPFC_LINK_DOWN) { | 2191 | if (!lpfc_is_link_up(phba)) |
1752 | return; | 2192 | return; |
1753 | } | 2193 | |
1754 | if (phba->fc_topology != TOPOLOGY_LOOP) { | 2194 | if (phba->fc_topology != TOPOLOGY_LOOP) |
1755 | return; | 2195 | return; |
1756 | } | ||
1757 | 2196 | ||
1758 | /* Check for loop map present or not */ | 2197 | /* Check for loop map present or not */ |
1759 | if (phba->alpa_map[0]) { | 2198 | if (phba->alpa_map[0]) { |
1760 | for (j = 1; j <= phba->alpa_map[0]; j++) { | 2199 | for (j = 1; j <= phba->alpa_map[0]; j++) { |
1761 | alpa = phba->alpa_map[j]; | 2200 | alpa = phba->alpa_map[j]; |
1762 | 2201 | if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) | |
1763 | if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) { | ||
1764 | continue; | 2202 | continue; |
1765 | } | 2203 | lpfc_setup_disc_node(vport, alpa); |
1766 | lpfc_setup_disc_node(phba, alpa); | ||
1767 | } | 2204 | } |
1768 | } else { | 2205 | } else { |
1769 | /* No alpamap, so try all alpa's */ | 2206 | /* No alpamap, so try all alpa's */ |
@@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba) | |||
1776 | else | 2213 | else |
1777 | index = FC_MAXLOOP - j - 1; | 2214 | index = FC_MAXLOOP - j - 1; |
1778 | alpa = lpfcAlpaArray[index]; | 2215 | alpa = lpfcAlpaArray[index]; |
1779 | if ((phba->fc_myDID & 0xff) == alpa) { | 2216 | if ((vport->fc_myDID & 0xff) == alpa) |
1780 | continue; | 2217 | continue; |
1781 | } | 2218 | lpfc_setup_disc_node(vport, alpa); |
1782 | |||
1783 | lpfc_setup_disc_node(phba, alpa); | ||
1784 | } | 2219 | } |
1785 | } | 2220 | } |
1786 | return; | 2221 | return; |
1787 | } | 2222 | } |
1788 | 2223 | ||
1789 | /* Start Link up / RSCN discovery on NPR list */ | ||
1790 | void | 2224 | void |
1791 | lpfc_disc_start(struct lpfc_hba * phba) | 2225 | lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) |
1792 | { | 2226 | { |
1793 | struct lpfc_sli *psli; | ||
1794 | LPFC_MBOXQ_t *mbox; | 2227 | LPFC_MBOXQ_t *mbox; |
1795 | struct lpfc_nodelist *ndlp, *next_ndlp; | 2228 | struct lpfc_sli *psli = &phba->sli; |
2229 | struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; | ||
2230 | struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; | ||
2231 | struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; | ||
2232 | int rc; | ||
2233 | |||
2234 | /* | ||
2235 | * if it's not a physical port or if we already send | ||
2236 | * clear_la then don't send it. | ||
2237 | */ | ||
2238 | if ((phba->link_state >= LPFC_CLEAR_LA) || | ||
2239 | (vport->port_type != LPFC_PHYSICAL_PORT)) | ||
2240 | return; | ||
2241 | |||
2242 | /* Link up discovery */ | ||
2243 | if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { | ||
2244 | phba->link_state = LPFC_CLEAR_LA; | ||
2245 | lpfc_clear_la(phba, mbox); | ||
2246 | mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | ||
2247 | mbox->vport = vport; | ||
2248 | rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | | ||
2249 | MBX_STOP_IOCB)); | ||
2250 | if (rc == MBX_NOT_FINISHED) { | ||
2251 | mempool_free(mbox, phba->mbox_mem_pool); | ||
2252 | lpfc_disc_flush_list(vport); | ||
2253 | extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; | ||
2254 | fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; | ||
2255 | next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; | ||
2256 | phba->link_state = LPFC_HBA_ERROR; | ||
2257 | } | ||
2258 | } | ||
2259 | } | ||
2260 | |||
2261 | /* Reg_vpi to tell firmware to resume normal operations */ | ||
2262 | void | ||
2263 | lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) | ||
2264 | { | ||
2265 | LPFC_MBOXQ_t *regvpimbox; | ||
2266 | |||
2267 | regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2268 | if (regvpimbox) { | ||
2269 | lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); | ||
2270 | regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; | ||
2271 | regvpimbox->vport = vport; | ||
2272 | if (lpfc_sli_issue_mbox(phba, regvpimbox, | ||
2273 | (MBX_NOWAIT | MBX_STOP_IOCB)) | ||
2274 | == MBX_NOT_FINISHED) { | ||
2275 | mempool_free(regvpimbox, phba->mbox_mem_pool); | ||
2276 | } | ||
2277 | } | ||
2278 | } | ||
2279 | |||
2280 | /* Start Link up / RSCN discovery on NPR nodes */ | ||
2281 | void | ||
2282 | lpfc_disc_start(struct lpfc_vport *vport) | ||
2283 | { | ||
2284 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2285 | struct lpfc_hba *phba = vport->phba; | ||
1796 | uint32_t num_sent; | 2286 | uint32_t num_sent; |
1797 | uint32_t clear_la_pending; | 2287 | uint32_t clear_la_pending; |
1798 | int did_changed; | 2288 | int did_changed; |
1799 | int rc; | ||
1800 | 2289 | ||
1801 | psli = &phba->sli; | 2290 | if (!lpfc_is_link_up(phba)) |
1802 | |||
1803 | if (phba->hba_state <= LPFC_LINK_DOWN) { | ||
1804 | return; | 2291 | return; |
1805 | } | 2292 | |
1806 | if (phba->hba_state == LPFC_CLEAR_LA) | 2293 | if (phba->link_state == LPFC_CLEAR_LA) |
1807 | clear_la_pending = 1; | 2294 | clear_la_pending = 1; |
1808 | else | 2295 | else |
1809 | clear_la_pending = 0; | 2296 | clear_la_pending = 0; |
1810 | 2297 | ||
1811 | if (phba->hba_state < LPFC_HBA_READY) { | 2298 | if (vport->port_state < LPFC_VPORT_READY) |
1812 | phba->hba_state = LPFC_DISC_AUTH; | 2299 | vport->port_state = LPFC_DISC_AUTH; |
1813 | } | ||
1814 | lpfc_set_disctmo(phba); | ||
1815 | 2300 | ||
1816 | if (phba->fc_prevDID == phba->fc_myDID) { | 2301 | lpfc_set_disctmo(vport); |
2302 | |||
2303 | if (vport->fc_prevDID == vport->fc_myDID) | ||
1817 | did_changed = 0; | 2304 | did_changed = 0; |
1818 | } else { | 2305 | else |
1819 | did_changed = 1; | 2306 | did_changed = 1; |
1820 | } | 2307 | |
1821 | phba->fc_prevDID = phba->fc_myDID; | 2308 | vport->fc_prevDID = vport->fc_myDID; |
1822 | phba->num_disc_nodes = 0; | 2309 | vport->num_disc_nodes = 0; |
1823 | 2310 | ||
1824 | /* Start Discovery state <hba_state> */ | 2311 | /* Start Discovery state <hba_state> */ |
1825 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, | 2312 | lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, |
1826 | "%d:0202 Start Discovery hba state x%x " | 2313 | "%d (%d):0202 Start Discovery hba state x%x " |
1827 | "Data: x%x x%x x%x\n", | 2314 | "Data: x%x x%x x%x\n", |
1828 | phba->brd_no, phba->hba_state, phba->fc_flag, | 2315 | phba->brd_no, vport->vpi, vport->port_state, |
1829 | phba->fc_plogi_cnt, phba->fc_adisc_cnt); | 2316 | vport->fc_flag, vport->fc_plogi_cnt, |
1830 | 2317 | vport->fc_adisc_cnt); | |
1831 | /* If our did changed, we MUST do PLOGI */ | ||
1832 | list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { | ||
1833 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && | ||
1834 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && | ||
1835 | did_changed) { | ||
1836 | spin_lock_irq(phba->host->host_lock); | ||
1837 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | ||
1838 | spin_unlock_irq(phba->host->host_lock); | ||
1839 | } | ||
1840 | } | ||
1841 | 2318 | ||
1842 | /* First do ADISCs - if any */ | 2319 | /* First do ADISCs - if any */ |
1843 | num_sent = lpfc_els_disc_adisc(phba); | 2320 | num_sent = lpfc_els_disc_adisc(vport); |
1844 | 2321 | ||
1845 | if (num_sent) | 2322 | if (num_sent) |
1846 | return; | 2323 | return; |
1847 | 2324 | ||
1848 | if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) { | 2325 | /* |
2326 | * For SLI3, cmpl_reg_vpi will set port_state to READY, and | ||
2327 | * continue discovery. | ||
2328 | */ | ||
2329 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | ||
2330 | !(vport->fc_flag & FC_RSCN_MODE)) { | ||
2331 | lpfc_issue_reg_vpi(phba, vport); | ||
2332 | return; | ||
2333 | } | ||
2334 | |||
2335 | /* | ||
2336 | * For SLI2, we need to set port_state to READY and continue | ||
2337 | * discovery. | ||
2338 | */ | ||
2339 | if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { | ||
1849 | /* If we get here, there is nothing to ADISC */ | 2340 | /* If we get here, there is nothing to ADISC */ |
1850 | if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { | 2341 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
1851 | phba->hba_state = LPFC_CLEAR_LA; | 2342 | lpfc_issue_clear_la(phba, vport); |
1852 | lpfc_clear_la(phba, mbox); | 2343 | |
1853 | mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | 2344 | if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { |
1854 | rc = lpfc_sli_issue_mbox(phba, mbox, | 2345 | vport->num_disc_nodes = 0; |
1855 | (MBX_NOWAIT | MBX_STOP_IOCB)); | 2346 | /* go thru NPR nodes and issue ELS PLOGIs */ |
1856 | if (rc == MBX_NOT_FINISHED) { | 2347 | if (vport->fc_npr_cnt) |
1857 | mempool_free( mbox, phba->mbox_mem_pool); | 2348 | lpfc_els_disc_plogi(vport); |
1858 | lpfc_disc_flush_list(phba); | 2349 | |
1859 | psli->ring[(psli->extra_ring)].flag &= | 2350 | if (!vport->num_disc_nodes) { |
1860 | ~LPFC_STOP_IOCB_EVENT; | 2351 | spin_lock_irq(shost->host_lock); |
1861 | psli->ring[(psli->fcp_ring)].flag &= | 2352 | vport->fc_flag &= ~FC_NDISC_ACTIVE; |
1862 | ~LPFC_STOP_IOCB_EVENT; | 2353 | spin_unlock_irq(shost->host_lock); |
1863 | psli->ring[(psli->next_ring)].flag &= | 2354 | lpfc_can_disctmo(vport); |
1864 | ~LPFC_STOP_IOCB_EVENT; | ||
1865 | phba->hba_state = LPFC_HBA_READY; | ||
1866 | } | 2355 | } |
1867 | } | 2356 | } |
2357 | vport->port_state = LPFC_VPORT_READY; | ||
1868 | } else { | 2358 | } else { |
1869 | /* Next do PLOGIs - if any */ | 2359 | /* Next do PLOGIs - if any */ |
1870 | num_sent = lpfc_els_disc_plogi(phba); | 2360 | num_sent = lpfc_els_disc_plogi(vport); |
1871 | 2361 | ||
1872 | if (num_sent) | 2362 | if (num_sent) |
1873 | return; | 2363 | return; |
1874 | 2364 | ||
1875 | if (phba->fc_flag & FC_RSCN_MODE) { | 2365 | if (vport->fc_flag & FC_RSCN_MODE) { |
1876 | /* Check to see if more RSCNs came in while we | 2366 | /* Check to see if more RSCNs came in while we |
1877 | * were processing this one. | 2367 | * were processing this one. |
1878 | */ | 2368 | */ |
1879 | if ((phba->fc_rscn_id_cnt == 0) && | 2369 | if ((vport->fc_rscn_id_cnt == 0) && |
1880 | (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { | 2370 | (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { |
1881 | spin_lock_irq(phba->host->host_lock); | 2371 | spin_lock_irq(shost->host_lock); |
1882 | phba->fc_flag &= ~FC_RSCN_MODE; | 2372 | vport->fc_flag &= ~FC_RSCN_MODE; |
1883 | spin_unlock_irq(phba->host->host_lock); | 2373 | spin_unlock_irq(shost->host_lock); |
2374 | lpfc_can_disctmo(vport); | ||
1884 | } else | 2375 | } else |
1885 | lpfc_els_handle_rscn(phba); | 2376 | lpfc_els_handle_rscn(vport); |
1886 | } | 2377 | } |
1887 | } | 2378 | } |
1888 | return; | 2379 | return; |
@@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba) | |||
1893 | * ring the match the sppecified nodelist. | 2384 | * ring the match the sppecified nodelist. |
1894 | */ | 2385 | */ |
1895 | static void | 2386 | static void |
1896 | lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | 2387 | lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
1897 | { | 2388 | { |
1898 | LIST_HEAD(completions); | 2389 | LIST_HEAD(completions); |
1899 | struct lpfc_sli *psli; | 2390 | struct lpfc_sli *psli; |
@@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1907 | /* Error matching iocb on txq or txcmplq | 2398 | /* Error matching iocb on txq or txcmplq |
1908 | * First check the txq. | 2399 | * First check the txq. |
1909 | */ | 2400 | */ |
1910 | spin_lock_irq(phba->host->host_lock); | 2401 | spin_lock_irq(&phba->hbalock); |
1911 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { | 2402 | list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { |
1912 | if (iocb->context1 != ndlp) { | 2403 | if (iocb->context1 != ndlp) { |
1913 | continue; | 2404 | continue; |
@@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) | |||
1927 | continue; | 2418 | continue; |
1928 | } | 2419 | } |
1929 | icmd = &iocb->iocb; | 2420 | icmd = &iocb->iocb; |
1930 | if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || | 2421 | if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || |
1931 | (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { | 2422 | icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { |
1932 | lpfc_sli_issue_abort_iotag(phba, pring, iocb); | 2423 | lpfc_sli_issue_abort_iotag(phba, pring, iocb); |
1933 | } | 2424 | } |
1934 | } | 2425 | } |
1935 | spin_unlock_irq(phba->host->host_lock); | 2426 | spin_unlock_irq(&phba->hbalock); |
1936 | 2427 | ||
1937 | while (!list_empty(&completions)) { | 2428 | while (!list_empty(&completions)) { |
1938 | iocb = list_get_first(&completions, struct lpfc_iocbq, list); | 2429 | iocb = list_get_first(&completions, struct lpfc_iocbq, list); |
1939 | list_del(&iocb->list); | 2430 | list_del_init(&iocb->list); |
1940 | 2431 | ||
1941 | if (iocb->iocb_cmpl) { | 2432 | if (!iocb->iocb_cmpl) |
2433 | lpfc_sli_release_iocbq(phba, iocb); | ||
2434 | else { | ||
1942 | icmd = &iocb->iocb; | 2435 | icmd = &iocb->iocb; |
1943 | icmd->ulpStatus = IOSTAT_LOCAL_REJECT; | 2436 | icmd->ulpStatus = IOSTAT_LOCAL_REJECT; |
1944 | icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; | 2437 | icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; |
1945 | (iocb->iocb_cmpl) (phba, iocb, iocb); | 2438 | (iocb->iocb_cmpl) (phba, iocb, iocb); |
1946 | } else | 2439 | } |
1947 | lpfc_sli_release_iocbq(phba, iocb); | ||
1948 | } | 2440 | } |
1949 | |||
1950 | return; | ||
1951 | } | 2441 | } |
1952 | 2442 | ||
1953 | void | 2443 | void |
1954 | lpfc_disc_flush_list(struct lpfc_hba * phba) | 2444 | lpfc_disc_flush_list(struct lpfc_vport *vport) |
1955 | { | 2445 | { |
1956 | struct lpfc_nodelist *ndlp, *next_ndlp; | 2446 | struct lpfc_nodelist *ndlp, *next_ndlp; |
2447 | struct lpfc_hba *phba = vport->phba; | ||
1957 | 2448 | ||
1958 | if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) { | 2449 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { |
1959 | list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, | 2450 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
1960 | nlp_listp) { | 2451 | nlp_listp) { |
1961 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || | 2452 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
1962 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { | 2453 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { |
@@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba) | |||
1967 | } | 2458 | } |
1968 | } | 2459 | } |
1969 | 2460 | ||
2461 | void | ||
2462 | lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) | ||
2463 | { | ||
2464 | lpfc_els_flush_rscn(vport); | ||
2465 | lpfc_els_flush_cmd(vport); | ||
2466 | lpfc_disc_flush_list(vport); | ||
2467 | } | ||
2468 | |||
1970 | /*****************************************************************************/ | 2469 | /*****************************************************************************/ |
1971 | /* | 2470 | /* |
1972 | * NAME: lpfc_disc_timeout | 2471 | * NAME: lpfc_disc_timeout |
@@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba) | |||
1985 | void | 2484 | void |
1986 | lpfc_disc_timeout(unsigned long ptr) | 2485 | lpfc_disc_timeout(unsigned long ptr) |
1987 | { | 2486 | { |
1988 | struct lpfc_hba *phba = (struct lpfc_hba *)ptr; | 2487 | struct lpfc_vport *vport = (struct lpfc_vport *) ptr; |
2488 | struct lpfc_hba *phba = vport->phba; | ||
1989 | unsigned long flags = 0; | 2489 | unsigned long flags = 0; |
1990 | 2490 | ||
1991 | if (unlikely(!phba)) | 2491 | if (unlikely(!phba)) |
1992 | return; | 2492 | return; |
1993 | 2493 | ||
1994 | spin_lock_irqsave(phba->host->host_lock, flags); | 2494 | if ((vport->work_port_events & WORKER_DISC_TMO) == 0) { |
1995 | if (!(phba->work_hba_events & WORKER_DISC_TMO)) { | 2495 | spin_lock_irqsave(&vport->work_port_lock, flags); |
1996 | phba->work_hba_events |= WORKER_DISC_TMO; | 2496 | vport->work_port_events |= WORKER_DISC_TMO; |
2497 | spin_unlock_irqrestore(&vport->work_port_lock, flags); | ||
2498 | |||
2499 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1997 | if (phba->work_wait) | 2500 | if (phba->work_wait) |
1998 | wake_up(phba->work_wait); | 2501 | lpfc_worker_wake_up(phba); |
2502 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1999 | } | 2503 | } |
2000 | spin_unlock_irqrestore(phba->host->host_lock, flags); | ||
2001 | return; | 2504 | return; |
2002 | } | 2505 | } |
2003 | 2506 | ||
2004 | static void | 2507 | static void |
2005 | lpfc_disc_timeout_handler(struct lpfc_hba *phba) | 2508 | lpfc_disc_timeout_handler(struct lpfc_vport *vport) |
2006 | { | 2509 | { |
2007 | struct lpfc_sli *psli; | 2510 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
2511 | struct lpfc_hba *phba = vport->phba; | ||
2512 | struct lpfc_sli *psli = &phba->sli; | ||
2008 | struct lpfc_nodelist *ndlp, *next_ndlp; | 2513 | struct lpfc_nodelist *ndlp, *next_ndlp; |
2009 | LPFC_MBOXQ_t *clearlambox, *initlinkmbox; | 2514 | LPFC_MBOXQ_t *initlinkmbox; |
2010 | int rc, clrlaerr = 0; | 2515 | int rc, clrlaerr = 0; |
2011 | 2516 | ||
2012 | if (unlikely(!phba)) | 2517 | if (!(vport->fc_flag & FC_DISC_TMO)) |
2013 | return; | 2518 | return; |
2014 | 2519 | ||
2015 | if (!(phba->fc_flag & FC_DISC_TMO)) | 2520 | spin_lock_irq(shost->host_lock); |
2016 | return; | 2521 | vport->fc_flag &= ~FC_DISC_TMO; |
2522 | spin_unlock_irq(shost->host_lock); | ||
2017 | 2523 | ||
2018 | psli = &phba->sli; | 2524 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
2525 | "disc timeout: state:x%x rtry:x%x flg:x%x", | ||
2526 | vport->port_state, vport->fc_ns_retry, vport->fc_flag); | ||
2019 | 2527 | ||
2020 | spin_lock_irq(phba->host->host_lock); | 2528 | switch (vport->port_state) { |
2021 | phba->fc_flag &= ~FC_DISC_TMO; | ||
2022 | spin_unlock_irq(phba->host->host_lock); | ||
2023 | |||
2024 | switch (phba->hba_state) { | ||
2025 | 2529 | ||
2026 | case LPFC_LOCAL_CFG_LINK: | 2530 | case LPFC_LOCAL_CFG_LINK: |
2027 | /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */ | 2531 | /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for |
2028 | /* FAN timeout */ | 2532 | * FAN |
2029 | lpfc_printf_log(phba, | 2533 | */ |
2030 | KERN_WARNING, | 2534 | /* FAN timeout */ |
2031 | LOG_DISCOVERY, | 2535 | lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, |
2032 | "%d:0221 FAN timeout\n", | 2536 | "%d (%d):0221 FAN timeout\n", |
2033 | phba->brd_no); | 2537 | phba->brd_no, vport->vpi); |
2034 | 2538 | ||
2035 | /* Start discovery by sending FLOGI, clean up old rpis */ | 2539 | /* Start discovery by sending FLOGI, clean up old rpis */ |
2036 | list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, | 2540 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
2037 | nlp_listp) { | 2541 | nlp_listp) { |
2038 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 2542 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
2039 | continue; | 2543 | continue; |
2040 | if (ndlp->nlp_type & NLP_FABRIC) { | 2544 | if (ndlp->nlp_type & NLP_FABRIC) { |
2041 | /* Clean up the ndlp on Fabric connections */ | 2545 | /* Clean up the ndlp on Fabric connections */ |
2042 | lpfc_drop_node(phba, ndlp); | 2546 | lpfc_drop_node(vport, ndlp); |
2043 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { | 2547 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
2044 | /* Fail outstanding IO now since device | 2548 | /* Fail outstanding IO now since device |
2045 | * is marked for PLOGI. | 2549 | * is marked for PLOGI. |
2046 | */ | 2550 | */ |
2047 | lpfc_unreg_rpi(phba, ndlp); | 2551 | lpfc_unreg_rpi(vport, ndlp); |
2048 | } | 2552 | } |
2049 | } | 2553 | } |
2050 | phba->hba_state = LPFC_FLOGI; | 2554 | if (vport->port_state != LPFC_FLOGI) { |
2051 | lpfc_set_disctmo(phba); | 2555 | vport->port_state = LPFC_FLOGI; |
2052 | lpfc_initial_flogi(phba); | 2556 | lpfc_set_disctmo(vport); |
2557 | lpfc_initial_flogi(vport); | ||
2558 | } | ||
2053 | break; | 2559 | break; |
2054 | 2560 | ||
2561 | case LPFC_FDISC: | ||
2055 | case LPFC_FLOGI: | 2562 | case LPFC_FLOGI: |
2056 | /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ | 2563 | /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ |
2057 | /* Initial FLOGI timeout */ | 2564 | /* Initial FLOGI timeout */ |
2058 | lpfc_printf_log(phba, | 2565 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2059 | KERN_ERR, | 2566 | "%d (%d):0222 Initial %s timeout\n", |
2060 | LOG_DISCOVERY, | 2567 | phba->brd_no, vport->vpi, |
2061 | "%d:0222 Initial FLOGI timeout\n", | 2568 | vport->vpi ? "FLOGI" : "FDISC"); |
2062 | phba->brd_no); | ||
2063 | 2569 | ||
2064 | /* Assume no Fabric and go on with discovery. | 2570 | /* Assume no Fabric and go on with discovery. |
2065 | * Check for outstanding ELS FLOGI to abort. | 2571 | * Check for outstanding ELS FLOGI to abort. |
2066 | */ | 2572 | */ |
2067 | 2573 | ||
2068 | /* FLOGI failed, so just use loop map to make discovery list */ | 2574 | /* FLOGI failed, so just use loop map to make discovery list */ |
2069 | lpfc_disc_list_loopmap(phba); | 2575 | lpfc_disc_list_loopmap(vport); |
2070 | 2576 | ||
2071 | /* Start discovery */ | 2577 | /* Start discovery */ |
2072 | lpfc_disc_start(phba); | 2578 | lpfc_disc_start(vport); |
2073 | break; | 2579 | break; |
2074 | 2580 | ||
2075 | case LPFC_FABRIC_CFG_LINK: | 2581 | case LPFC_FABRIC_CFG_LINK: |
2076 | /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for | 2582 | /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for |
2077 | NameServer login */ | 2583 | NameServer login */ |
2078 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2584 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2079 | "%d:0223 Timeout while waiting for NameServer " | 2585 | "%d (%d):0223 Timeout while waiting for " |
2080 | "login\n", phba->brd_no); | 2586 | "NameServer login\n", |
2587 | phba->brd_no, vport->vpi); | ||
2081 | 2588 | ||
2082 | /* Next look for NameServer ndlp */ | 2589 | /* Next look for NameServer ndlp */ |
2083 | ndlp = lpfc_findnode_did(phba, NameServer_DID); | 2590 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
2084 | if (ndlp) | 2591 | if (ndlp) |
2085 | lpfc_nlp_put(ndlp); | 2592 | lpfc_nlp_put(ndlp); |
2086 | /* Start discovery */ | 2593 | /* Start discovery */ |
2087 | lpfc_disc_start(phba); | 2594 | lpfc_disc_start(vport); |
2088 | break; | 2595 | break; |
2089 | 2596 | ||
2090 | case LPFC_NS_QRY: | 2597 | case LPFC_NS_QRY: |
2091 | /* Check for wait for NameServer Rsp timeout */ | 2598 | /* Check for wait for NameServer Rsp timeout */ |
2092 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2599 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2093 | "%d:0224 NameServer Query timeout " | 2600 | "%d (%d):0224 NameServer Query timeout " |
2094 | "Data: x%x x%x\n", | 2601 | "Data: x%x x%x\n", |
2095 | phba->brd_no, | 2602 | phba->brd_no, vport->vpi, |
2096 | phba->fc_ns_retry, LPFC_MAX_NS_RETRY); | 2603 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY); |
2097 | 2604 | ||
2098 | ndlp = lpfc_findnode_did(phba, NameServer_DID); | 2605 | if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { |
2099 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 2606 | /* Try it one more time */ |
2100 | if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { | 2607 | vport->fc_ns_retry++; |
2101 | /* Try it one more time */ | 2608 | rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, |
2102 | rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT); | 2609 | vport->fc_ns_retry, 0); |
2103 | if (rc == 0) | 2610 | if (rc == 0) |
2104 | break; | 2611 | break; |
2105 | } | ||
2106 | phba->fc_ns_retry = 0; | ||
2107 | } | 2612 | } |
2613 | vport->fc_ns_retry = 0; | ||
2108 | 2614 | ||
2109 | /* Nothing to authenticate, so CLEAR_LA right now */ | 2615 | /* |
2110 | clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2616 | * Discovery is over. |
2111 | if (!clearlambox) { | 2617 | * set port_state to PORT_READY if SLI2. |
2112 | clrlaerr = 1; | 2618 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
2113 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2619 | */ |
2114 | "%d:0226 Device Discovery " | 2620 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
2115 | "completion error\n", | 2621 | lpfc_issue_reg_vpi(phba, vport); |
2116 | phba->brd_no); | 2622 | else { /* NPIV Not enabled */ |
2117 | phba->hba_state = LPFC_HBA_ERROR; | 2623 | lpfc_issue_clear_la(phba, vport); |
2118 | break; | 2624 | vport->port_state = LPFC_VPORT_READY; |
2119 | } | ||
2120 | |||
2121 | phba->hba_state = LPFC_CLEAR_LA; | ||
2122 | lpfc_clear_la(phba, clearlambox); | ||
2123 | clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | ||
2124 | rc = lpfc_sli_issue_mbox(phba, clearlambox, | ||
2125 | (MBX_NOWAIT | MBX_STOP_IOCB)); | ||
2126 | if (rc == MBX_NOT_FINISHED) { | ||
2127 | mempool_free(clearlambox, phba->mbox_mem_pool); | ||
2128 | clrlaerr = 1; | ||
2129 | break; | ||
2130 | } | 2625 | } |
2131 | 2626 | ||
2132 | /* Setup and issue mailbox INITIALIZE LINK command */ | 2627 | /* Setup and issue mailbox INITIALIZE LINK command */ |
2133 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2628 | initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
2134 | if (!initlinkmbox) { | 2629 | if (!initlinkmbox) { |
2135 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2630 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2136 | "%d:0206 Device Discovery " | 2631 | "%d (%d):0206 Device Discovery " |
2137 | "completion error\n", | 2632 | "completion error\n", |
2138 | phba->brd_no); | 2633 | phba->brd_no, vport->vpi); |
2139 | phba->hba_state = LPFC_HBA_ERROR; | 2634 | phba->link_state = LPFC_HBA_ERROR; |
2140 | break; | 2635 | break; |
2141 | } | 2636 | } |
2142 | 2637 | ||
@@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2144 | lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, | 2639 | lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, |
2145 | phba->cfg_link_speed); | 2640 | phba->cfg_link_speed); |
2146 | initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; | 2641 | initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; |
2642 | initlinkmbox->vport = vport; | ||
2643 | initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
2147 | rc = lpfc_sli_issue_mbox(phba, initlinkmbox, | 2644 | rc = lpfc_sli_issue_mbox(phba, initlinkmbox, |
2148 | (MBX_NOWAIT | MBX_STOP_IOCB)); | 2645 | (MBX_NOWAIT | MBX_STOP_IOCB)); |
2149 | lpfc_set_loopback_flag(phba); | 2646 | lpfc_set_loopback_flag(phba); |
@@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2154 | 2651 | ||
2155 | case LPFC_DISC_AUTH: | 2652 | case LPFC_DISC_AUTH: |
2156 | /* Node Authentication timeout */ | 2653 | /* Node Authentication timeout */ |
2157 | lpfc_printf_log(phba, | 2654 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2158 | KERN_ERR, | 2655 | "%d (%d):0227 Node Authentication timeout\n", |
2159 | LOG_DISCOVERY, | 2656 | phba->brd_no, vport->vpi); |
2160 | "%d:0227 Node Authentication timeout\n", | 2657 | lpfc_disc_flush_list(vport); |
2161 | phba->brd_no); | 2658 | |
2162 | lpfc_disc_flush_list(phba); | 2659 | /* |
2163 | clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2660 | * set port_state to PORT_READY if SLI2. |
2164 | if (!clearlambox) { | 2661 | * cmpl_reg_vpi will set port_state to READY for SLI3. |
2165 | clrlaerr = 1; | 2662 | */ |
2166 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | 2663 | if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) |
2167 | "%d:0207 Device Discovery " | 2664 | lpfc_issue_reg_vpi(phba, vport); |
2168 | "completion error\n", | 2665 | else { /* NPIV Not enabled */ |
2169 | phba->brd_no); | 2666 | lpfc_issue_clear_la(phba, vport); |
2170 | phba->hba_state = LPFC_HBA_ERROR; | 2667 | vport->port_state = LPFC_VPORT_READY; |
2171 | break; | ||
2172 | } | 2668 | } |
2173 | phba->hba_state = LPFC_CLEAR_LA; | 2669 | break; |
2174 | lpfc_clear_la(phba, clearlambox); | 2670 | |
2175 | clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; | 2671 | case LPFC_VPORT_READY: |
2176 | rc = lpfc_sli_issue_mbox(phba, clearlambox, | 2672 | if (vport->fc_flag & FC_RSCN_MODE) { |
2177 | (MBX_NOWAIT | MBX_STOP_IOCB)); | 2673 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2178 | if (rc == MBX_NOT_FINISHED) { | 2674 | "%d (%d):0231 RSCN timeout Data: x%x " |
2179 | mempool_free(clearlambox, phba->mbox_mem_pool); | 2675 | "x%x\n", |
2180 | clrlaerr = 1; | 2676 | phba->brd_no, vport->vpi, |
2677 | vport->fc_ns_retry, LPFC_MAX_NS_RETRY); | ||
2678 | |||
2679 | /* Cleanup any outstanding ELS commands */ | ||
2680 | lpfc_els_flush_cmd(vport); | ||
2681 | |||
2682 | lpfc_els_flush_rscn(vport); | ||
2683 | lpfc_disc_flush_list(vport); | ||
2181 | } | 2684 | } |
2182 | break; | 2685 | break; |
2183 | 2686 | ||
2687 | default: | ||
2688 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
2689 | "%d (%d):0229 Unexpected discovery timeout, " | ||
2690 | "vport State x%x\n", | ||
2691 | phba->brd_no, vport->vpi, vport->port_state); | ||
2692 | |||
2693 | break; | ||
2694 | } | ||
2695 | |||
2696 | switch (phba->link_state) { | ||
2184 | case LPFC_CLEAR_LA: | 2697 | case LPFC_CLEAR_LA: |
2185 | /* CLEAR LA timeout */ | 2698 | /* CLEAR LA timeout */ |
2186 | lpfc_printf_log(phba, | 2699 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2187 | KERN_ERR, | 2700 | "%d (%d):0228 CLEAR LA timeout\n", |
2188 | LOG_DISCOVERY, | 2701 | phba->brd_no, vport->vpi); |
2189 | "%d:0228 CLEAR LA timeout\n", | ||
2190 | phba->brd_no); | ||
2191 | clrlaerr = 1; | 2702 | clrlaerr = 1; |
2192 | break; | 2703 | break; |
2193 | 2704 | ||
2194 | case LPFC_HBA_READY: | 2705 | case LPFC_LINK_UNKNOWN: |
2195 | if (phba->fc_flag & FC_RSCN_MODE) { | 2706 | case LPFC_WARM_START: |
2196 | lpfc_printf_log(phba, | 2707 | case LPFC_INIT_START: |
2197 | KERN_ERR, | 2708 | case LPFC_INIT_MBX_CMDS: |
2198 | LOG_DISCOVERY, | 2709 | case LPFC_LINK_DOWN: |
2199 | "%d:0231 RSCN timeout Data: x%x x%x\n", | 2710 | case LPFC_LINK_UP: |
2200 | phba->brd_no, | 2711 | case LPFC_HBA_ERROR: |
2201 | phba->fc_ns_retry, LPFC_MAX_NS_RETRY); | 2712 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2202 | 2713 | "%d (%d):0230 Unexpected timeout, hba link " | |
2203 | /* Cleanup any outstanding ELS commands */ | 2714 | "state x%x\n", |
2204 | lpfc_els_flush_cmd(phba); | 2715 | phba->brd_no, vport->vpi, phba->link_state); |
2716 | clrlaerr = 1; | ||
2717 | break; | ||
2205 | 2718 | ||
2206 | lpfc_els_flush_rscn(phba); | 2719 | case LPFC_HBA_READY: |
2207 | lpfc_disc_flush_list(phba); | ||
2208 | } | ||
2209 | break; | 2720 | break; |
2210 | } | 2721 | } |
2211 | 2722 | ||
2212 | if (clrlaerr) { | 2723 | if (clrlaerr) { |
2213 | lpfc_disc_flush_list(phba); | 2724 | lpfc_disc_flush_list(vport); |
2214 | psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | 2725 | psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; |
2215 | psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | 2726 | psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; |
2216 | psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; | 2727 | psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; |
2217 | phba->hba_state = LPFC_HBA_READY; | 2728 | vport->port_state = LPFC_VPORT_READY; |
2218 | } | 2729 | } |
2219 | 2730 | ||
2220 | return; | 2731 | return; |
@@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) | |||
2227 | * handed off to the SLI layer. | 2738 | * handed off to the SLI layer. |
2228 | */ | 2739 | */ |
2229 | void | 2740 | void |
2230 | lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | 2741 | lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
2231 | { | 2742 | { |
2232 | struct lpfc_sli *psli; | 2743 | MAILBOX_t *mb = &pmb->mb; |
2233 | MAILBOX_t *mb; | 2744 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
2234 | struct lpfc_dmabuf *mp; | 2745 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; |
2235 | struct lpfc_nodelist *ndlp; | 2746 | struct lpfc_vport *vport = pmb->vport; |
2236 | |||
2237 | psli = &phba->sli; | ||
2238 | mb = &pmb->mb; | ||
2239 | |||
2240 | ndlp = (struct lpfc_nodelist *) pmb->context2; | ||
2241 | mp = (struct lpfc_dmabuf *) (pmb->context1); | ||
2242 | 2747 | ||
2243 | pmb->context1 = NULL; | 2748 | pmb->context1 = NULL; |
2244 | 2749 | ||
2245 | ndlp->nlp_rpi = mb->un.varWords[0]; | 2750 | ndlp->nlp_rpi = mb->un.varWords[0]; |
2246 | ndlp->nlp_type |= NLP_FABRIC; | 2751 | ndlp->nlp_type |= NLP_FABRIC; |
2247 | lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); | 2752 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
2248 | 2753 | ||
2249 | /* Start issuing Fabric-Device Management Interface (FDMI) | 2754 | /* |
2250 | * command to 0xfffffa (FDMI well known port) | 2755 | * Start issuing Fabric-Device Management Interface (FDMI) command to |
2756 | * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if | ||
2757 | * fdmi-on=2 (supporting RPA/hostnmae) | ||
2251 | */ | 2758 | */ |
2252 | if (phba->cfg_fdmi_on == 1) { | 2759 | |
2253 | lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); | 2760 | if (phba->cfg_fdmi_on == 1) |
2254 | } else { | 2761 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); |
2255 | /* | 2762 | else |
2256 | * Delay issuing FDMI command if fdmi-on=2 | 2763 | mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); |
2257 | * (supporting RPA/hostnmae) | ||
2258 | */ | ||
2259 | mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); | ||
2260 | } | ||
2261 | 2764 | ||
2262 | /* Mailbox took a reference to the node */ | 2765 | /* Mailbox took a reference to the node */ |
2263 | lpfc_nlp_put(ndlp); | 2766 | lpfc_nlp_put(ndlp); |
@@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) | |||
2283 | sizeof(ndlp->nlp_portname)) == 0; | 2786 | sizeof(ndlp->nlp_portname)) == 0; |
2284 | } | 2787 | } |
2285 | 2788 | ||
2286 | /* | ||
2287 | * Search node lists for a remote port matching filter criteria | ||
2288 | * Caller needs to hold host_lock before calling this routine. | ||
2289 | */ | ||
2290 | struct lpfc_nodelist * | 2789 | struct lpfc_nodelist * |
2291 | __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) | 2790 | __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) |
2292 | { | 2791 | { |
2293 | struct lpfc_nodelist *ndlp; | 2792 | struct lpfc_nodelist *ndlp; |
2294 | 2793 | ||
2295 | list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { | 2794 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
2296 | if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && | 2795 | if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && |
2297 | filter(ndlp, param)) | 2796 | filter(ndlp, param)) |
2298 | return ndlp; | 2797 | return ndlp; |
@@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) | |||
2302 | 2801 | ||
2303 | /* | 2802 | /* |
2304 | * Search node lists for a remote port matching filter criteria | 2803 | * Search node lists for a remote port matching filter criteria |
2305 | * This routine is used when the caller does NOT have host_lock. | 2804 | * Caller needs to hold host_lock before calling this routine. |
2306 | */ | 2805 | */ |
2307 | struct lpfc_nodelist * | 2806 | struct lpfc_nodelist * |
2308 | lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) | 2807 | lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) |
2309 | { | 2808 | { |
2809 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2310 | struct lpfc_nodelist *ndlp; | 2810 | struct lpfc_nodelist *ndlp; |
2311 | 2811 | ||
2312 | spin_lock_irq(phba->host->host_lock); | 2812 | spin_lock_irq(shost->host_lock); |
2313 | ndlp = __lpfc_find_node(phba, filter, param); | 2813 | ndlp = __lpfc_find_node(vport, filter, param); |
2314 | spin_unlock_irq(phba->host->host_lock); | 2814 | spin_unlock_irq(shost->host_lock); |
2315 | return ndlp; | 2815 | return ndlp; |
2316 | } | 2816 | } |
2317 | 2817 | ||
2318 | /* | 2818 | /* |
2319 | * This routine looks up the ndlp lists for the given RPI. If rpi found it | 2819 | * This routine looks up the ndlp lists for the given RPI. If rpi found it |
2320 | * returns the node list pointer else return NULL. | 2820 | * returns the node list element pointer else return NULL. |
2321 | */ | 2821 | */ |
2322 | struct lpfc_nodelist * | 2822 | struct lpfc_nodelist * |
2323 | __lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi) | 2823 | __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
2324 | { | 2824 | { |
2325 | return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi); | 2825 | return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); |
2326 | } | 2826 | } |
2327 | 2827 | ||
2328 | struct lpfc_nodelist * | 2828 | struct lpfc_nodelist * |
2329 | lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) | 2829 | lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) |
2330 | { | 2830 | { |
2831 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2331 | struct lpfc_nodelist *ndlp; | 2832 | struct lpfc_nodelist *ndlp; |
2332 | 2833 | ||
2333 | spin_lock_irq(phba->host->host_lock); | 2834 | spin_lock_irq(shost->host_lock); |
2334 | ndlp = __lpfc_findnode_rpi(phba, rpi); | 2835 | ndlp = __lpfc_findnode_rpi(vport, rpi); |
2335 | spin_unlock_irq(phba->host->host_lock); | 2836 | spin_unlock_irq(shost->host_lock); |
2336 | return ndlp; | 2837 | return ndlp; |
2337 | } | 2838 | } |
2338 | 2839 | ||
2339 | /* | 2840 | /* |
2340 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it | 2841 | * This routine looks up the ndlp lists for the given WWPN. If WWPN found it |
2341 | * returns the node list pointer else return NULL. | 2842 | * returns the node element list pointer else return NULL. |
2342 | */ | 2843 | */ |
2343 | struct lpfc_nodelist * | 2844 | struct lpfc_nodelist * |
2344 | lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn) | 2845 | lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) |
2345 | { | 2846 | { |
2847 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2346 | struct lpfc_nodelist *ndlp; | 2848 | struct lpfc_nodelist *ndlp; |
2347 | 2849 | ||
2348 | spin_lock_irq(phba->host->host_lock); | 2850 | spin_lock_irq(shost->host_lock); |
2349 | ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn); | 2851 | ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); |
2350 | spin_unlock_irq(phba->host->host_lock); | 2852 | spin_unlock_irq(shost->host_lock); |
2351 | return NULL; | 2853 | return ndlp; |
2352 | } | 2854 | } |
2353 | 2855 | ||
2354 | void | 2856 | void |
2355 | lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) | 2857 | lpfc_dev_loss_delay(unsigned long ptr) |
2858 | { | ||
2859 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; | ||
2860 | struct lpfc_vport *vport = ndlp->vport; | ||
2861 | struct lpfc_hba *phba = vport->phba; | ||
2862 | struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt; | ||
2863 | unsigned long flags; | ||
2864 | |||
2865 | evtp = &ndlp->dev_loss_evt; | ||
2866 | |||
2867 | spin_lock_irqsave(&phba->hbalock, flags); | ||
2868 | if (!list_empty(&evtp->evt_listp)) { | ||
2869 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
2870 | return; | ||
2871 | } | ||
2872 | |||
2873 | evtp->evt_arg1 = ndlp; | ||
2874 | evtp->evt = LPFC_EVT_DEV_LOSS_DELAY; | ||
2875 | list_add_tail(&evtp->evt_listp, &phba->work_list); | ||
2876 | if (phba->work_wait) | ||
2877 | lpfc_worker_wake_up(phba); | ||
2878 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
2879 | return; | ||
2880 | } | ||
2881 | |||
2882 | void | ||
2883 | lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | ||
2884 | uint32_t did) | ||
2356 | { | 2885 | { |
2357 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); | 2886 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); |
2358 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | 2887 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); |
2888 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | ||
2359 | init_timer(&ndlp->nlp_delayfunc); | 2889 | init_timer(&ndlp->nlp_delayfunc); |
2360 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | 2890 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; |
2361 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | 2891 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; |
2362 | ndlp->nlp_DID = did; | 2892 | ndlp->nlp_DID = did; |
2363 | ndlp->nlp_phba = phba; | 2893 | ndlp->vport = vport; |
2364 | ndlp->nlp_sid = NLP_NO_SID; | 2894 | ndlp->nlp_sid = NLP_NO_SID; |
2365 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 2895 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
2366 | kref_init(&ndlp->kref); | 2896 | kref_init(&ndlp->kref); |
2897 | |||
2898 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | ||
2899 | "node init: did:x%x", | ||
2900 | ndlp->nlp_DID, 0, 0); | ||
2901 | |||
2367 | return; | 2902 | return; |
2368 | } | 2903 | } |
2369 | 2904 | ||
@@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref) | |||
2372 | { | 2907 | { |
2373 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, | 2908 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, |
2374 | kref); | 2909 | kref); |
2375 | lpfc_nlp_remove(ndlp->nlp_phba, ndlp); | 2910 | |
2376 | mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool); | 2911 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
2912 | "node release: did:x%x flg:x%x type:x%x", | ||
2913 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); | ||
2914 | |||
2915 | lpfc_nlp_remove(ndlp->vport, ndlp); | ||
2916 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | ||
2377 | } | 2917 | } |
2378 | 2918 | ||
2379 | struct lpfc_nodelist * | 2919 | struct lpfc_nodelist * |