aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-10-22 11:06:08 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-10-26 11:40:49 -0400
commita93ff37a8a869c7065a1b05f75e69bfb74eb599c (patch)
tree357ef8d67e3a845c21a8843fc2a5b941e007b33d /drivers/scsi/lpfc/lpfc_hbadisc.c
parent12265f68ae925b9dee8099140b4213c28ef54f14 (diff)
[SCSI] lpfc 8.3.18: Add logic to detect last devloss timeout
Added driver logic to detect the last devloss timeout of remote nodes which was still in use of FCF. At that point, the driver should set the last in-use remote node devloss timeout flag if it was not already set and should perform proper action on the in-use FCF and recover of FCF from firmware, depending on the state the driver's FIP engine is in. Find eligible FCF through FCF table rescan or the next new FCF event when FCF table rescan turned out empty eligible FCF, and the successful flogi into an FCF shall clear the HBA_DEVLOSS_TMO flag, indicating the successful recovery from devloss timeout. [jejb: add delay.h include to lpfc_hbadisc.c to fix ppc compile] Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c349
1 files changed, 264 insertions, 85 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0788bf670add..05c9398a723d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/delay.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
63static void lpfc_disc_timeout_handler(struct lpfc_vport *); 64static void lpfc_disc_timeout_handler(struct lpfc_vport *);
64static void lpfc_disc_flush_list(struct lpfc_vport *vport); 65static void lpfc_disc_flush_list(struct lpfc_vport *vport);
65static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
67static int lpfc_fcf_inuse(struct lpfc_hba *);
66 68
67void 69void
68lpfc_terminate_rport_io(struct fc_rport *rport) 70lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
160 return; 162 return;
161} 163}
162 164
163/* 165/**
164 * This function is called from the worker thread when dev_loss_tmo 166 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
165 * expire. 167 * @ndlp: Pointer to remote node object.
166 */ 168 *
167static void 169 * This function is called from the worker thread when devloss timeout timer
170 * expires. For SLI4 host, this routine shall return 1 when at lease one
171 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
172 * routine shall return 0 when there is no remote node is still in use of FCF
173 * when devloss timeout happened to this @ndlp.
174 **/
175static int
168lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 176lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
169{ 177{
170 struct lpfc_rport_data *rdata; 178 struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
175 int put_node; 183 int put_node;
176 int put_rport; 184 int put_rport;
177 int warn_on = 0; 185 int warn_on = 0;
186 int fcf_inuse = 0;
178 187
179 rport = ndlp->rport; 188 rport = ndlp->rport;
180 189
181 if (!rport) 190 if (!rport)
182 return; 191 return fcf_inuse;
183 192
184 rdata = rport->dd_data; 193 rdata = rport->dd_data;
185 name = (uint8_t *) &ndlp->nlp_portname; 194 name = (uint8_t *) &ndlp->nlp_portname;
186 vport = ndlp->vport; 195 vport = ndlp->vport;
187 phba = vport->phba; 196 phba = vport->phba;
188 197
198 if (phba->sli_rev == LPFC_SLI_REV4)
199 fcf_inuse = lpfc_fcf_inuse(phba);
200
189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 201 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
190 "rport devlosstmo:did:x%x type:x%x id:x%x", 202 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 203 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
209 lpfc_nlp_put(ndlp); 221 lpfc_nlp_put(ndlp);
210 if (put_rport) 222 if (put_rport)
211 put_device(&rport->dev); 223 put_device(&rport->dev);
212 return; 224 return fcf_inuse;
213 } 225 }
214 226
215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 227 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
220 *name, *(name+1), *(name+2), *(name+3), 232 *name, *(name+1), *(name+2), *(name+3),
221 *(name+4), *(name+5), *(name+6), *(name+7), 233 *(name+4), *(name+5), *(name+6), *(name+7),
222 ndlp->nlp_DID); 234 ndlp->nlp_DID);
223 return; 235 return fcf_inuse;
224 } 236 }
225 237
226 if (ndlp->nlp_type & NLP_FABRIC) { 238 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
233 lpfc_nlp_put(ndlp); 245 lpfc_nlp_put(ndlp);
234 if (put_rport) 246 if (put_rport)
235 put_device(&rport->dev); 247 put_device(&rport->dev);
236 return; 248 return fcf_inuse;
237 } 249 }
238 250
239 if (ndlp->nlp_sid != NLP_NO_SID) { 251 if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 292 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 293 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
282 294
295 return fcf_inuse;
296}
297
298/**
299 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
300 * @phba: Pointer to hba context object.
301 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
302 * @nlp_did: remote node identifer with devloss timeout.
303 *
304 * This function is called from the worker thread after invoking devloss
305 * timeout handler and releasing the reference count for the ndlp with
306 * which the devloss timeout was handled for SLI4 host. For the devloss
307 * timeout of the last remote node which had been in use of FCF, when this
308 * routine is invoked, it shall be guaranteed that none of the remote are
309 * in-use of FCF. When devloss timeout to the last remote using the FCF,
310 * if the FIP engine is neither in FCF table scan process nor roundrobin
311 * failover process, the in-use FCF shall be unregistered. If the FIP
312 * engine is in FCF discovery process, the devloss timeout state shall
313 * be set for either the FCF table scan process or roundrobin failover
314 * process to unregister the in-use FCF.
315 **/
316static void
317lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
318 uint32_t nlp_did)
319{
320 /* If devloss timeout happened to a remote node when FCF had no
321 * longer been in-use, do nothing.
322 */
323 if (!fcf_inuse)
324 return;
325
326 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
327 spin_lock_irq(&phba->hbalock);
328 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
329 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
330 spin_unlock_irq(&phba->hbalock);
331 return;
332 }
333 phba->hba_flag |= HBA_DEVLOSS_TMO;
334 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
335 "2847 Last remote node (x%x) using "
336 "FCF devloss tmo\n", nlp_did);
337 }
338 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
339 spin_unlock_irq(&phba->hbalock);
340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
341 "2868 Devloss tmo to FCF rediscovery "
342 "in progress\n");
343 return;
344 }
345 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
346 spin_unlock_irq(&phba->hbalock);
347 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
348 "2869 Devloss tmo to idle FIP engine, "
349 "unreg in-use FCF and rescan.\n");
350 /* Unregister in-use FCF and rescan */
351 lpfc_unregister_fcf_rescan(phba);
352 return;
353 }
354 spin_unlock_irq(&phba->hbalock);
355 if (phba->hba_flag & FCF_TS_INPROG)
356 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
357 "2870 FCF table scan in progress\n");
358 if (phba->hba_flag & FCF_RR_INPROG)
359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
360 "2871 FLOGI roundrobin FCF failover "
361 "in progress\n");
362 }
283 lpfc_unregister_unused_fcf(phba); 363 lpfc_unregister_unused_fcf(phba);
284} 364}
285 365
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
408 struct lpfc_work_evt *evtp = NULL; 488 struct lpfc_work_evt *evtp = NULL;
409 struct lpfc_nodelist *ndlp; 489 struct lpfc_nodelist *ndlp;
410 int free_evt; 490 int free_evt;
491 int fcf_inuse;
492 uint32_t nlp_did;
411 493
412 spin_lock_irq(&phba->hbalock); 494 spin_lock_irq(&phba->hbalock);
413 while (!list_empty(&phba->work_list)) { 495 while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
427 break; 509 break;
428 case LPFC_EVT_DEV_LOSS: 510 case LPFC_EVT_DEV_LOSS:
429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 511 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
430 lpfc_dev_loss_tmo_handler(ndlp); 512 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
431 free_evt = 0; 513 free_evt = 0;
432 /* decrement the node reference count held for 514 /* decrement the node reference count held for
433 * this queued work 515 * this queued work
434 */ 516 */
517 nlp_did = ndlp->nlp_DID;
435 lpfc_nlp_put(ndlp); 518 lpfc_nlp_put(ndlp);
519 if (phba->sli_rev == LPFC_SLI_REV4)
520 lpfc_sli4_post_dev_loss_tmo_handler(phba,
521 fcf_inuse,
522 nlp_did);
436 break; 523 break;
437 case LPFC_EVT_ONLINE: 524 case LPFC_EVT_ONLINE:
438 if (phba->link_state < LPFC_LINK_DOWN) 525 if (phba->link_state < LPFC_LINK_DOWN)
@@ -1021,8 +1108,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1021 "2017 REG_FCFI mbxStatus error x%x " 1108 "2017 REG_FCFI mbxStatus error x%x "
1022 "HBA state x%x\n", 1109 "HBA state x%x\n",
1023 mboxq->u.mb.mbxStatus, vport->port_state); 1110 mboxq->u.mb.mbxStatus, vport->port_state);
1024 mempool_free(mboxq, phba->mbox_mem_pool); 1111 goto fail_out;
1025 return;
1026 } 1112 }
1027 1113
1028 /* Start FCoE discovery by sending a FLOGI. */ 1114 /* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1117,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1031 spin_lock_irq(&phba->hbalock); 1117 spin_lock_irq(&phba->hbalock);
1032 phba->fcf.fcf_flag |= FCF_REGISTERED; 1118 phba->fcf.fcf_flag |= FCF_REGISTERED;
1033 spin_unlock_irq(&phba->hbalock); 1119 spin_unlock_irq(&phba->hbalock);
1120
1034 /* If there is a pending FCoE event, restart FCF table scan. */ 1121 /* If there is a pending FCoE event, restart FCF table scan. */
1035 if (lpfc_check_pending_fcoe_event(phba, 1)) { 1122 if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1036 mempool_free(mboxq, phba->mbox_mem_pool); 1123 goto fail_out;
1037 return; 1124
1038 } 1125 /* Mark successful completion of FCF table scan */
1039 spin_lock_irq(&phba->hbalock); 1126 spin_lock_irq(&phba->hbalock);
1040 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1127 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1041 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1128 phba->hba_flag &= ~FCF_TS_INPROG;
1042 spin_unlock_irq(&phba->hbalock); 1129 if (vport->port_state != LPFC_FLOGI) {
1043 if (vport->port_state != LPFC_FLOGI) 1130 phba->hba_flag |= FCF_RR_INPROG;
1131 spin_unlock_irq(&phba->hbalock);
1044 lpfc_initial_flogi(vport); 1132 lpfc_initial_flogi(vport);
1133 goto out;
1134 }
1135 spin_unlock_irq(&phba->hbalock);
1136 goto out;
1045 1137
1138fail_out:
1139 spin_lock_irq(&phba->hbalock);
1140 phba->hba_flag &= ~FCF_RR_INPROG;
1141 spin_unlock_irq(&phba->hbalock);
1142out:
1046 mempool_free(mboxq, phba->mbox_mem_pool); 1143 mempool_free(mboxq, phba->mbox_mem_pool);
1047 return;
1048} 1144}
1049 1145
1050/** 1146/**
@@ -1241,10 +1337,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1241 int rc; 1337 int rc;
1242 1338
1243 spin_lock_irq(&phba->hbalock); 1339 spin_lock_irq(&phba->hbalock);
1244
1245 /* If the FCF is not availabe do nothing. */ 1340 /* If the FCF is not availabe do nothing. */
1246 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1341 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1247 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1342 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1248 spin_unlock_irq(&phba->hbalock); 1343 spin_unlock_irq(&phba->hbalock);
1249 return; 1344 return;
1250 } 1345 }
@@ -1252,19 +1347,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1252 /* The FCF is already registered, start discovery */ 1347 /* The FCF is already registered, start discovery */
1253 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1348 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1254 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1349 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1255 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1350 phba->hba_flag &= ~FCF_TS_INPROG;
1256 spin_unlock_irq(&phba->hbalock); 1351 if (phba->pport->port_state != LPFC_FLOGI) {
1257 if (phba->pport->port_state != LPFC_FLOGI) 1352 phba->hba_flag |= FCF_RR_INPROG;
1353 spin_unlock_irq(&phba->hbalock);
1258 lpfc_initial_flogi(phba->pport); 1354 lpfc_initial_flogi(phba->pport);
1355 return;
1356 }
1357 spin_unlock_irq(&phba->hbalock);
1259 return; 1358 return;
1260 } 1359 }
1261 spin_unlock_irq(&phba->hbalock); 1360 spin_unlock_irq(&phba->hbalock);
1262 1361
1263 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1362 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1264 GFP_KERNEL);
1265 if (!fcf_mbxq) { 1363 if (!fcf_mbxq) {
1266 spin_lock_irq(&phba->hbalock); 1364 spin_lock_irq(&phba->hbalock);
1267 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1365 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1268 spin_unlock_irq(&phba->hbalock); 1366 spin_unlock_irq(&phba->hbalock);
1269 return; 1367 return;
1270 } 1368 }
@@ -1275,7 +1373,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1275 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1373 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1276 if (rc == MBX_NOT_FINISHED) { 1374 if (rc == MBX_NOT_FINISHED) {
1277 spin_lock_irq(&phba->hbalock); 1375 spin_lock_irq(&phba->hbalock);
1278 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1376 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1279 spin_unlock_irq(&phba->hbalock); 1377 spin_unlock_irq(&phba->hbalock);
1280 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1378 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1281 } 1379 }
@@ -1493,7 +1591,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1493 * FCF discovery, no need to restart FCF discovery. 1591 * FCF discovery, no need to restart FCF discovery.
1494 */ 1592 */
1495 if ((phba->link_state >= LPFC_LINK_UP) && 1593 if ((phba->link_state >= LPFC_LINK_UP) &&
1496 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1594 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1497 return 0; 1595 return 0;
1498 1596
1499 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1597 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1615,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1517 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1615 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1518 } else { 1616 } else {
1519 /* 1617 /*
1520 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1618 * Do not continue FCF discovery and clear FCF_TS_INPROG
1521 * flag 1619 * flag
1522 */ 1620 */
1523 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1621 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1524 "2833 Stop FCF discovery process due to link " 1622 "2833 Stop FCF discovery process due to link "
1525 "state change (x%x)\n", phba->link_state); 1623 "state change (x%x)\n", phba->link_state);
1526 spin_lock_irq(&phba->hbalock); 1624 spin_lock_irq(&phba->hbalock);
1527 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1625 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1528 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1626 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1529 spin_unlock_irq(&phba->hbalock); 1627 spin_unlock_irq(&phba->hbalock);
1530 } 1628 }
@@ -1729,6 +1827,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1729} 1827}
1730 1828
1731/** 1829/**
1830 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1831 * @vport: Pointer to vport object.
1832 * @fcf_index: index to next fcf.
1833 *
1834 * This function processing the roundrobin fcf failover to next fcf index.
1835 * When this function is invoked, there will be a current fcf registered
1836 * for flogi.
1837 * Return: 0 for continue retrying flogi on currently registered fcf;
1838 * 1 for stop flogi on currently registered fcf;
1839 */
1840int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1841{
1842 struct lpfc_hba *phba = vport->phba;
1843 int rc;
1844
1845 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1846 spin_lock_irq(&phba->hbalock);
1847 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1848 spin_unlock_irq(&phba->hbalock);
1849 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1850 "2872 Devloss tmo with no eligible "
1851 "FCF, unregister in-use FCF (x%x) "
1852 "and rescan FCF table\n",
1853 phba->fcf.current_rec.fcf_indx);
1854 lpfc_unregister_fcf_rescan(phba);
1855 goto stop_flogi_current_fcf;
1856 }
1857 /* Mark the end to FLOGI roundrobin failover */
1858 phba->hba_flag &= ~FCF_RR_INPROG;
1859 /* Allow action to new fcf asynchronous event */
1860 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1861 spin_unlock_irq(&phba->hbalock);
1862 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1863 "2865 No FCF available, stop roundrobin FCF "
1864 "failover and change port state:x%x/x%x\n",
1865 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1866 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1867 goto stop_flogi_current_fcf;
1868 } else {
1869 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1870 "2794 Try FLOGI roundrobin FCF failover to "
1871 "(x%x)\n", fcf_index);
1872 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1873 if (rc)
1874 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1875 "2761 FLOGI roundrobin FCF failover "
1876 "failed (rc:x%x) to read FCF (x%x)\n",
1877 rc, phba->fcf.current_rec.fcf_indx);
1878 else
1879 goto stop_flogi_current_fcf;
1880 }
1881 return 0;
1882
1883stop_flogi_current_fcf:
1884 lpfc_can_disctmo(vport);
1885 return 1;
1886}
1887
1888/**
1732 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 1889 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1733 * @phba: pointer to lpfc hba data structure. 1890 * @phba: pointer to lpfc hba data structure.
1734 * @mboxq: pointer to mailbox object. 1891 * @mboxq: pointer to mailbox object.
@@ -1756,7 +1913,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 int rc; 1913 int rc;
1757 1914
1758 /* If there is pending FCoE event restart FCF table scan */ 1915 /* If there is pending FCoE event restart FCF table scan */
1759 if (lpfc_check_pending_fcoe_event(phba, 0)) { 1916 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
1760 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1917 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1761 return; 1918 return;
1762 } 1919 }
@@ -1765,12 +1922,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 1922 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1766 &next_fcf_index); 1923 &next_fcf_index);
1767 if (!new_fcf_record) { 1924 if (!new_fcf_record) {
1768 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1925 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1769 "2765 Mailbox command READ_FCF_RECORD " 1926 "2765 Mailbox command READ_FCF_RECORD "
1770 "failed to retrieve a FCF record.\n"); 1927 "failed to retrieve a FCF record.\n");
1771 /* Let next new FCF event trigger fast failover */ 1928 /* Let next new FCF event trigger fast failover */
1772 spin_lock_irq(&phba->hbalock); 1929 spin_lock_irq(&phba->hbalock);
1773 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1930 phba->hba_flag &= ~FCF_TS_INPROG;
1774 spin_unlock_irq(&phba->hbalock); 1931 spin_unlock_irq(&phba->hbalock);
1775 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1932 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1776 return; 1933 return;
@@ -1787,13 +1944,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1787 /* 1944 /*
1788 * If the fcf record does not match with connect list entries 1945 * If the fcf record does not match with connect list entries
1789 * read the next entry; otherwise, this is an eligible FCF 1946 * read the next entry; otherwise, this is an eligible FCF
1790 * record for round robin FCF failover. 1947 * record for roundrobin FCF failover.
1791 */ 1948 */
1792 if (!rc) { 1949 if (!rc) {
1793 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1950 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1794 "2781 FCF record (x%x) failed FCF " 1951 "2781 FCF (x%x) failed connection "
1795 "connection list check, fcf_avail:x%x, " 1952 "list check: (x%x/x%x)\n",
1796 "fcf_valid:x%x\n",
1797 bf_get(lpfc_fcf_record_fcf_index, 1953 bf_get(lpfc_fcf_record_fcf_index,
1798 new_fcf_record), 1954 new_fcf_record),
1799 bf_get(lpfc_fcf_record_fcf_avail, 1955 bf_get(lpfc_fcf_record_fcf_avail,
@@ -1823,9 +1979,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1823 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1979 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1824 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 1980 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1825 "2835 Invalid in-use FCF " 1981 "2835 Invalid in-use FCF "
1826 "record (x%x) reported, " 1982 "(x%x), enter FCF failover "
1827 "entering fast FCF failover " 1983 "table scan.\n",
1828 "mode scanning.\n",
1829 phba->fcf.current_rec.fcf_indx); 1984 phba->fcf.current_rec.fcf_indx);
1830 spin_lock_irq(&phba->hbalock); 1985 spin_lock_irq(&phba->hbalock);
1831 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 1986 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1970,8 +2125,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1970 */ 2125 */
1971 if (fcf_rec) { 2126 if (fcf_rec) {
1972 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2127 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1973 "2840 Update current FCF record " 2128 "2840 Update initial FCF candidate "
1974 "with initial FCF record (x%x)\n", 2129 "with FCF (x%x)\n",
1975 bf_get(lpfc_fcf_record_fcf_index, 2130 bf_get(lpfc_fcf_record_fcf_index,
1976 new_fcf_record)); 2131 new_fcf_record));
1977 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2132 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -2001,20 +2156,28 @@ read_next_fcf:
2001 */ 2156 */
2002 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2157 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2003 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2158 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2004 "2782 No suitable FCF record " 2159 "2782 No suitable FCF found: "
2005 "found during this round of " 2160 "(x%x/x%x)\n",
2006 "post FCF rediscovery scan: "
2007 "fcf_evt_tag:x%x, fcf_index: "
2008 "x%x\n",
2009 phba->fcoe_eventtag_at_fcf_scan, 2161 phba->fcoe_eventtag_at_fcf_scan,
2010 bf_get(lpfc_fcf_record_fcf_index, 2162 bf_get(lpfc_fcf_record_fcf_index,
2011 new_fcf_record)); 2163 new_fcf_record));
2164 spin_lock_irq(&phba->hbalock);
2165 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2166 phba->hba_flag &= ~FCF_TS_INPROG;
2167 spin_unlock_irq(&phba->hbalock);
2168 /* Unregister in-use FCF and rescan */
2169 lpfc_printf_log(phba, KERN_INFO,
2170 LOG_FIP,
2171 "2864 On devloss tmo "
2172 "unreg in-use FCF and "
2173 "rescan FCF table\n");
2174 lpfc_unregister_fcf_rescan(phba);
2175 return;
2176 }
2012 /* 2177 /*
2013 * Let next new FCF event trigger fast 2178 * Let next new FCF event trigger fast failover
2014 * failover
2015 */ 2179 */
2016 spin_lock_irq(&phba->hbalock); 2180 phba->hba_flag &= ~FCF_TS_INPROG;
2017 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
2018 spin_unlock_irq(&phba->hbalock); 2181 spin_unlock_irq(&phba->hbalock);
2019 return; 2182 return;
2020 } 2183 }
@@ -2032,9 +2195,8 @@ read_next_fcf:
2032 2195
2033 /* Replace in-use record with the new record */ 2196 /* Replace in-use record with the new record */
2034 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2197 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2035 "2842 Replace the current in-use " 2198 "2842 Replace in-use FCF (x%x) "
2036 "FCF record (x%x) with failover FCF " 2199 "with failover FCF (x%x)\n",
2037 "record (x%x)\n",
2038 phba->fcf.current_rec.fcf_indx, 2200 phba->fcf.current_rec.fcf_indx,
2039 phba->fcf.failover_rec.fcf_indx); 2201 phba->fcf.failover_rec.fcf_indx);
2040 memcpy(&phba->fcf.current_rec, 2202 memcpy(&phba->fcf.current_rec,
@@ -2046,15 +2208,8 @@ read_next_fcf:
2046 * FCF failover. 2208 * FCF failover.
2047 */ 2209 */
2048 spin_lock_irq(&phba->hbalock); 2210 spin_lock_irq(&phba->hbalock);
2049 phba->fcf.fcf_flag &= 2211 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2050 ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
2051 spin_unlock_irq(&phba->hbalock); 2212 spin_unlock_irq(&phba->hbalock);
2052 /*
2053 * Set up the initial registered FCF index for FLOGI
2054 * round robin FCF failover.
2055 */
2056 phba->fcf.fcf_rr_init_indx =
2057 phba->fcf.failover_rec.fcf_indx;
2058 /* Register to the new FCF record */ 2213 /* Register to the new FCF record */
2059 lpfc_register_fcf(phba); 2214 lpfc_register_fcf(phba);
2060 } else { 2215 } else {
@@ -2101,11 +2256,11 @@ out:
2101} 2256}
2102 2257
2103/** 2258/**
2104 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler 2259 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2105 * @phba: pointer to lpfc hba data structure. 2260 * @phba: pointer to lpfc hba data structure.
2106 * @mboxq: pointer to mailbox object. 2261 * @mboxq: pointer to mailbox object.
2107 * 2262 *
2108 * This is the callback function for FLOGI failure round robin FCF failover 2263 * This is the callback function for FLOGI failure roundrobin FCF failover
2109 * read FCF record mailbox command from the eligible FCF record bmask for 2264 * read FCF record mailbox command from the eligible FCF record bmask for
2110 * performing the failover. If the FCF read back is not valid/available, it 2265 * performing the failover. If the FCF read back is not valid/available, it
2111 * fails through to retrying FLOGI to the currently registered FCF again. 2266 * fails through to retrying FLOGI to the currently registered FCF again.
@@ -2120,17 +2275,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2120{ 2275{
2121 struct fcf_record *new_fcf_record; 2276 struct fcf_record *new_fcf_record;
2122 uint32_t boot_flag, addr_mode; 2277 uint32_t boot_flag, addr_mode;
2123 uint16_t next_fcf_index; 2278 uint16_t next_fcf_index, fcf_index;
2124 uint16_t current_fcf_index; 2279 uint16_t current_fcf_index;
2125 uint16_t vlan_id; 2280 uint16_t vlan_id;
2281 int rc;
2126 2282
2127 /* If link state is not up, stop the round robin failover process */ 2283 /* If link state is not up, stop the roundrobin failover process */
2128 if (phba->link_state < LPFC_LINK_UP) { 2284 if (phba->link_state < LPFC_LINK_UP) {
2129 spin_lock_irq(&phba->hbalock); 2285 spin_lock_irq(&phba->hbalock);
2130 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2286 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2287 phba->hba_flag &= ~FCF_RR_INPROG;
2131 spin_unlock_irq(&phba->hbalock); 2288 spin_unlock_irq(&phba->hbalock);
2132 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2289 goto out;
2133 return;
2134 } 2290 }
2135 2291
2136 /* Parse the FCF record from the non-embedded mailbox command */ 2292 /* Parse the FCF record from the non-embedded mailbox command */
@@ -2140,23 +2296,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2140 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2296 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2141 "2766 Mailbox command READ_FCF_RECORD " 2297 "2766 Mailbox command READ_FCF_RECORD "
2142 "failed to retrieve a FCF record.\n"); 2298 "failed to retrieve a FCF record.\n");
2143 goto out; 2299 goto error_out;
2144 } 2300 }
2145 2301
2146 /* Get the needed parameters from FCF record */ 2302 /* Get the needed parameters from FCF record */
2147 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2303 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2148 &addr_mode, &vlan_id); 2304 &addr_mode, &vlan_id);
2149 2305
2150 /* Log the FCF record information if turned on */ 2306 /* Log the FCF record information if turned on */
2151 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2307 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2152 next_fcf_index); 2308 next_fcf_index);
2153 2309
2310 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2311 if (!rc) {
2312 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2313 "2848 Remove ineligible FCF (x%x) from "
2314 "from roundrobin bmask\n", fcf_index);
2315 /* Clear roundrobin bmask bit for ineligible FCF */
2316 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2317 /* Perform next round of roundrobin FCF failover */
2318 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2319 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2320 if (rc)
2321 goto out;
2322 goto error_out;
2323 }
2324
2325 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2326 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2327 "2760 Perform FLOGI roundrobin FCF failover: "
2328 "FCF (x%x) back to FCF (x%x)\n",
2329 phba->fcf.current_rec.fcf_indx, fcf_index);
2330 /* Wait 500 ms before retrying FLOGI to current FCF */
2331 msleep(500);
2332 lpfc_initial_flogi(phba->pport);
2333 goto out;
2334 }
2335
2154 /* Upload new FCF record to the failover FCF record */ 2336 /* Upload new FCF record to the failover FCF record */
2155 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2337 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2156 "2834 Update the current FCF record (x%x) " 2338 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2157 "with the next FCF record (x%x)\n", 2339 phba->fcf.failover_rec.fcf_indx, fcf_index);
2158 phba->fcf.failover_rec.fcf_indx,
2159 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2160 spin_lock_irq(&phba->hbalock); 2340 spin_lock_irq(&phba->hbalock);
2161 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2341 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2162 new_fcf_record, addr_mode, vlan_id, 2342 new_fcf_record, addr_mode, vlan_id,
@@ -2173,14 +2353,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2173 sizeof(struct lpfc_fcf_rec)); 2353 sizeof(struct lpfc_fcf_rec));
2174 2354
2175 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2355 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2176 "2783 FLOGI round robin FCF failover from FCF " 2356 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2177 "(x%x) to FCF (x%x).\n", 2357 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2178 current_fcf_index,
2179 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2180 2358
2359error_out:
2360 lpfc_register_fcf(phba);
2181out: 2361out:
2182 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2362 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2183 lpfc_register_fcf(phba);
2184} 2363}
2185 2364
2186/** 2365/**
@@ -2189,10 +2368,10 @@ out:
2189 * @mboxq: pointer to mailbox object. 2368 * @mboxq: pointer to mailbox object.
2190 * 2369 *
2191 * This is the callback function of read FCF record mailbox command for 2370 * This is the callback function of read FCF record mailbox command for
2192 * updating the eligible FCF bmask for FLOGI failure round robin FCF 2371 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2193 * failover when a new FCF event happened. If the FCF read back is 2372 * failover when a new FCF event happened. If the FCF read back is
2194 * valid/available and it passes the connection list check, it updates 2373 * valid/available and it passes the connection list check, it updates
2195 * the bmask for the eligible FCF record for round robin failover. 2374 * the bmask for the eligible FCF record for roundrobin failover.
2196 */ 2375 */
2197void 2376void
2198lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2377lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2634,7 +2813,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2634 * and get the FCF Table. 2813 * and get the FCF Table.
2635 */ 2814 */
2636 spin_lock_irq(&phba->hbalock); 2815 spin_lock_irq(&phba->hbalock);
2637 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 2816 if (phba->hba_flag & FCF_TS_INPROG) {
2638 spin_unlock_irq(&phba->hbalock); 2817 spin_unlock_irq(&phba->hbalock);
2639 return; 2818 return;
2640 } 2819 }