diff options
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 54 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 349 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 41 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 72 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 6 |
7 files changed, 327 insertions, 204 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index cd4afcf749dd..196de40b906c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -552,9 +552,11 @@ struct lpfc_hba { | |||
552 | #define ELS_XRI_ABORT_EVENT 0x40 | 552 | #define ELS_XRI_ABORT_EVENT 0x40 |
553 | #define ASYNC_EVENT 0x80 | 553 | #define ASYNC_EVENT 0x80 |
554 | #define LINK_DISABLED 0x100 /* Link disabled by user */ | 554 | #define LINK_DISABLED 0x100 /* Link disabled by user */ |
555 | #define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ | 555 | #define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ |
556 | #define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ | 556 | #define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ |
557 | #define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ | 557 | #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ |
558 | #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ | ||
559 | #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ | ||
558 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ | 560 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ |
559 | struct lpfc_dmabuf slim2p; | 561 | struct lpfc_dmabuf slim2p; |
560 | 562 | ||
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 03f4ddc18572..b15d13e56174 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -229,6 +229,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); | |||
229 | uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); | 229 | uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); |
230 | int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); | 230 | int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); |
231 | void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); | 231 | void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); |
232 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); | ||
232 | 233 | ||
233 | int lpfc_mem_alloc(struct lpfc_hba *, int align); | 234 | int lpfc_mem_alloc(struct lpfc_hba *, int align); |
234 | void lpfc_mem_free(struct lpfc_hba *); | 235 | void lpfc_mem_free(struct lpfc_hba *); |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index ea511d18f0ec..b115e92025e5 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -795,7 +795,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
795 | 795 | ||
796 | if (irsp->ulpStatus) { | 796 | if (irsp->ulpStatus) { |
797 | /* | 797 | /* |
798 | * In case of FIP mode, perform round robin FCF failover | 798 | * In case of FIP mode, perform roundrobin FCF failover |
799 | * due to new FCF discovery | 799 | * due to new FCF discovery |
800 | */ | 800 | */ |
801 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && | 801 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && |
@@ -803,48 +803,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
803 | (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && | 803 | (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && |
804 | (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { | 804 | (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { |
805 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, | 805 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, |
806 | "2611 FLOGI failed on registered " | 806 | "2611 FLOGI failed on FCF (x%x), " |
807 | "FCF record fcf_index(%d), status: " | 807 | "status:x%x/x%x, tmo:x%x, perform " |
808 | "x%x/x%x, tmo:x%x, trying to perform " | 808 | "roundrobin FCF failover\n", |
809 | "round robin failover\n", | ||
810 | phba->fcf.current_rec.fcf_indx, | 809 | phba->fcf.current_rec.fcf_indx, |
811 | irsp->ulpStatus, irsp->un.ulpWord[4], | 810 | irsp->ulpStatus, irsp->un.ulpWord[4], |
812 | irsp->ulpTimeout); | 811 | irsp->ulpTimeout); |
813 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); | 812 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); |
814 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { | 813 | rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); |
815 | /* | 814 | if (rc) |
816 | * Exhausted the eligible FCF record list, | 815 | goto out; |
817 | * fail through to retry FLOGI on current | ||
818 | * FCF record. | ||
819 | */ | ||
820 | lpfc_printf_log(phba, KERN_WARNING, | ||
821 | LOG_FIP | LOG_ELS, | ||
822 | "2760 Completed one round " | ||
823 | "of FLOGI FCF round robin " | ||
824 | "failover list, retry FLOGI " | ||
825 | "on currently registered " | ||
826 | "FCF index:%d\n", | ||
827 | phba->fcf.current_rec.fcf_indx); | ||
828 | } else { | ||
829 | lpfc_printf_log(phba, KERN_INFO, | ||
830 | LOG_FIP | LOG_ELS, | ||
831 | "2794 FLOGI FCF round robin " | ||
832 | "failover to FCF index x%x\n", | ||
833 | fcf_index); | ||
834 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, | ||
835 | fcf_index); | ||
836 | if (rc) | ||
837 | lpfc_printf_log(phba, KERN_WARNING, | ||
838 | LOG_FIP | LOG_ELS, | ||
839 | "2761 FLOGI round " | ||
840 | "robin FCF failover " | ||
841 | "read FCF failed " | ||
842 | "rc:x%x, fcf_index:" | ||
843 | "%d\n", rc, | ||
844 | phba->fcf.current_rec.fcf_indx); | ||
845 | else | ||
846 | goto out; | ||
847 | } | ||
848 | } | 816 | } |
849 | 817 | ||
850 | /* FLOGI failure */ | 818 | /* FLOGI failure */ |
@@ -934,6 +902,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
934 | lpfc_nlp_put(ndlp); | 902 | lpfc_nlp_put(ndlp); |
935 | spin_lock_irq(&phba->hbalock); | 903 | spin_lock_irq(&phba->hbalock); |
936 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 904 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
905 | phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); | ||
937 | spin_unlock_irq(&phba->hbalock); | 906 | spin_unlock_irq(&phba->hbalock); |
938 | goto out; | 907 | goto out; |
939 | } | 908 | } |
@@ -942,13 +911,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
942 | if (phba->hba_flag & HBA_FIP_SUPPORT) | 911 | if (phba->hba_flag & HBA_FIP_SUPPORT) |
943 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | | 912 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | |
944 | LOG_ELS, | 913 | LOG_ELS, |
945 | "2769 FLOGI successful on FCF " | 914 | "2769 FLOGI to FCF (x%x) " |
946 | "record: current_fcf_index:" | 915 | "completed successfully\n", |
947 | "x%x, terminate FCF round " | ||
948 | "robin failover process\n", | ||
949 | phba->fcf.current_rec.fcf_indx); | 916 | phba->fcf.current_rec.fcf_indx); |
950 | spin_lock_irq(&phba->hbalock); | 917 | spin_lock_irq(&phba->hbalock); |
951 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 918 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
919 | phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); | ||
952 | spin_unlock_irq(&phba->hbalock); | 920 | spin_unlock_irq(&phba->hbalock); |
953 | goto out; | 921 | goto out; |
954 | } | 922 | } |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0788bf670add..05c9398a723d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | *******************************************************************/ | 20 | *******************************************************************/ |
21 | 21 | ||
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/delay.h> | ||
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
25 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = { | |||
63 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); | 64 | static void lpfc_disc_timeout_handler(struct lpfc_vport *); |
64 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); | 65 | static void lpfc_disc_flush_list(struct lpfc_vport *vport); |
65 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 66 | static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
67 | static int lpfc_fcf_inuse(struct lpfc_hba *); | ||
66 | 68 | ||
67 | void | 69 | void |
68 | lpfc_terminate_rport_io(struct fc_rport *rport) | 70 | lpfc_terminate_rport_io(struct fc_rport *rport) |
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
160 | return; | 162 | return; |
161 | } | 163 | } |
162 | 164 | ||
163 | /* | 165 | /** |
164 | * This function is called from the worker thread when dev_loss_tmo | 166 | * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler |
165 | * expire. | 167 | * @ndlp: Pointer to remote node object. |
166 | */ | 168 | * |
167 | static void | 169 | * This function is called from the worker thread when devloss timeout timer |
170 | * expires. For SLI4 host, this routine shall return 1 when at lease one | ||
171 | * remote node, including this @ndlp, is still in use of FCF; otherwise, this | ||
172 | * routine shall return 0 when there is no remote node is still in use of FCF | ||
173 | * when devloss timeout happened to this @ndlp. | ||
174 | **/ | ||
175 | static int | ||
168 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | 176 | lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) |
169 | { | 177 | { |
170 | struct lpfc_rport_data *rdata; | 178 | struct lpfc_rport_data *rdata; |
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
175 | int put_node; | 183 | int put_node; |
176 | int put_rport; | 184 | int put_rport; |
177 | int warn_on = 0; | 185 | int warn_on = 0; |
186 | int fcf_inuse = 0; | ||
178 | 187 | ||
179 | rport = ndlp->rport; | 188 | rport = ndlp->rport; |
180 | 189 | ||
181 | if (!rport) | 190 | if (!rport) |
182 | return; | 191 | return fcf_inuse; |
183 | 192 | ||
184 | rdata = rport->dd_data; | 193 | rdata = rport->dd_data; |
185 | name = (uint8_t *) &ndlp->nlp_portname; | 194 | name = (uint8_t *) &ndlp->nlp_portname; |
186 | vport = ndlp->vport; | 195 | vport = ndlp->vport; |
187 | phba = vport->phba; | 196 | phba = vport->phba; |
188 | 197 | ||
198 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
199 | fcf_inuse = lpfc_fcf_inuse(phba); | ||
200 | |||
189 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | 201 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
190 | "rport devlosstmo:did:x%x type:x%x id:x%x", | 202 | "rport devlosstmo:did:x%x type:x%x id:x%x", |
191 | ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); | 203 | ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); |
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
209 | lpfc_nlp_put(ndlp); | 221 | lpfc_nlp_put(ndlp); |
210 | if (put_rport) | 222 | if (put_rport) |
211 | put_device(&rport->dev); | 223 | put_device(&rport->dev); |
212 | return; | 224 | return fcf_inuse; |
213 | } | 225 | } |
214 | 226 | ||
215 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { | 227 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { |
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
220 | *name, *(name+1), *(name+2), *(name+3), | 232 | *name, *(name+1), *(name+2), *(name+3), |
221 | *(name+4), *(name+5), *(name+6), *(name+7), | 233 | *(name+4), *(name+5), *(name+6), *(name+7), |
222 | ndlp->nlp_DID); | 234 | ndlp->nlp_DID); |
223 | return; | 235 | return fcf_inuse; |
224 | } | 236 | } |
225 | 237 | ||
226 | if (ndlp->nlp_type & NLP_FABRIC) { | 238 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
233 | lpfc_nlp_put(ndlp); | 245 | lpfc_nlp_put(ndlp); |
234 | if (put_rport) | 246 | if (put_rport) |
235 | put_device(&rport->dev); | 247 | put_device(&rport->dev); |
236 | return; | 248 | return fcf_inuse; |
237 | } | 249 | } |
238 | 250 | ||
239 | if (ndlp->nlp_sid != NLP_NO_SID) { | 251 | if (ndlp->nlp_sid != NLP_NO_SID) { |
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
280 | (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) | 292 | (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) |
281 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 293 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
282 | 294 | ||
295 | return fcf_inuse; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler | ||
300 | * @phba: Pointer to hba context object. | ||
301 | * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. | ||
302 | * @nlp_did: remote node identifer with devloss timeout. | ||
303 | * | ||
304 | * This function is called from the worker thread after invoking devloss | ||
305 | * timeout handler and releasing the reference count for the ndlp with | ||
306 | * which the devloss timeout was handled for SLI4 host. For the devloss | ||
307 | * timeout of the last remote node which had been in use of FCF, when this | ||
308 | * routine is invoked, it shall be guaranteed that none of the remote are | ||
309 | * in-use of FCF. When devloss timeout to the last remote using the FCF, | ||
310 | * if the FIP engine is neither in FCF table scan process nor roundrobin | ||
311 | * failover process, the in-use FCF shall be unregistered. If the FIP | ||
312 | * engine is in FCF discovery process, the devloss timeout state shall | ||
313 | * be set for either the FCF table scan process or roundrobin failover | ||
314 | * process to unregister the in-use FCF. | ||
315 | **/ | ||
316 | static void | ||
317 | lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, | ||
318 | uint32_t nlp_did) | ||
319 | { | ||
320 | /* If devloss timeout happened to a remote node when FCF had no | ||
321 | * longer been in-use, do nothing. | ||
322 | */ | ||
323 | if (!fcf_inuse) | ||
324 | return; | ||
325 | |||
326 | if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { | ||
327 | spin_lock_irq(&phba->hbalock); | ||
328 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | ||
329 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
330 | spin_unlock_irq(&phba->hbalock); | ||
331 | return; | ||
332 | } | ||
333 | phba->hba_flag |= HBA_DEVLOSS_TMO; | ||
334 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
335 | "2847 Last remote node (x%x) using " | ||
336 | "FCF devloss tmo\n", nlp_did); | ||
337 | } | ||
338 | if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { | ||
339 | spin_unlock_irq(&phba->hbalock); | ||
340 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
341 | "2868 Devloss tmo to FCF rediscovery " | ||
342 | "in progress\n"); | ||
343 | return; | ||
344 | } | ||
345 | if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { | ||
346 | spin_unlock_irq(&phba->hbalock); | ||
347 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
348 | "2869 Devloss tmo to idle FIP engine, " | ||
349 | "unreg in-use FCF and rescan.\n"); | ||
350 | /* Unregister in-use FCF and rescan */ | ||
351 | lpfc_unregister_fcf_rescan(phba); | ||
352 | return; | ||
353 | } | ||
354 | spin_unlock_irq(&phba->hbalock); | ||
355 | if (phba->hba_flag & FCF_TS_INPROG) | ||
356 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
357 | "2870 FCF table scan in progress\n"); | ||
358 | if (phba->hba_flag & FCF_RR_INPROG) | ||
359 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
360 | "2871 FLOGI roundrobin FCF failover " | ||
361 | "in progress\n"); | ||
362 | } | ||
283 | lpfc_unregister_unused_fcf(phba); | 363 | lpfc_unregister_unused_fcf(phba); |
284 | } | 364 | } |
285 | 365 | ||
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba) | |||
408 | struct lpfc_work_evt *evtp = NULL; | 488 | struct lpfc_work_evt *evtp = NULL; |
409 | struct lpfc_nodelist *ndlp; | 489 | struct lpfc_nodelist *ndlp; |
410 | int free_evt; | 490 | int free_evt; |
491 | int fcf_inuse; | ||
492 | uint32_t nlp_did; | ||
411 | 493 | ||
412 | spin_lock_irq(&phba->hbalock); | 494 | spin_lock_irq(&phba->hbalock); |
413 | while (!list_empty(&phba->work_list)) { | 495 | while (!list_empty(&phba->work_list)) { |
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba) | |||
427 | break; | 509 | break; |
428 | case LPFC_EVT_DEV_LOSS: | 510 | case LPFC_EVT_DEV_LOSS: |
429 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); | 511 | ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); |
430 | lpfc_dev_loss_tmo_handler(ndlp); | 512 | fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); |
431 | free_evt = 0; | 513 | free_evt = 0; |
432 | /* decrement the node reference count held for | 514 | /* decrement the node reference count held for |
433 | * this queued work | 515 | * this queued work |
434 | */ | 516 | */ |
517 | nlp_did = ndlp->nlp_DID; | ||
435 | lpfc_nlp_put(ndlp); | 518 | lpfc_nlp_put(ndlp); |
519 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
520 | lpfc_sli4_post_dev_loss_tmo_handler(phba, | ||
521 | fcf_inuse, | ||
522 | nlp_did); | ||
436 | break; | 523 | break; |
437 | case LPFC_EVT_ONLINE: | 524 | case LPFC_EVT_ONLINE: |
438 | if (phba->link_state < LPFC_LINK_DOWN) | 525 | if (phba->link_state < LPFC_LINK_DOWN) |
@@ -1021,8 +1108,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1021 | "2017 REG_FCFI mbxStatus error x%x " | 1108 | "2017 REG_FCFI mbxStatus error x%x " |
1022 | "HBA state x%x\n", | 1109 | "HBA state x%x\n", |
1023 | mboxq->u.mb.mbxStatus, vport->port_state); | 1110 | mboxq->u.mb.mbxStatus, vport->port_state); |
1024 | mempool_free(mboxq, phba->mbox_mem_pool); | 1111 | goto fail_out; |
1025 | return; | ||
1026 | } | 1112 | } |
1027 | 1113 | ||
1028 | /* Start FCoE discovery by sending a FLOGI. */ | 1114 | /* Start FCoE discovery by sending a FLOGI. */ |
@@ -1031,20 +1117,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1031 | spin_lock_irq(&phba->hbalock); | 1117 | spin_lock_irq(&phba->hbalock); |
1032 | phba->fcf.fcf_flag |= FCF_REGISTERED; | 1118 | phba->fcf.fcf_flag |= FCF_REGISTERED; |
1033 | spin_unlock_irq(&phba->hbalock); | 1119 | spin_unlock_irq(&phba->hbalock); |
1120 | |||
1034 | /* If there is a pending FCoE event, restart FCF table scan. */ | 1121 | /* If there is a pending FCoE event, restart FCF table scan. */ |
1035 | if (lpfc_check_pending_fcoe_event(phba, 1)) { | 1122 | if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) |
1036 | mempool_free(mboxq, phba->mbox_mem_pool); | 1123 | goto fail_out; |
1037 | return; | 1124 | |
1038 | } | 1125 | /* Mark successful completion of FCF table scan */ |
1039 | spin_lock_irq(&phba->hbalock); | 1126 | spin_lock_irq(&phba->hbalock); |
1040 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); | 1127 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1041 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1128 | phba->hba_flag &= ~FCF_TS_INPROG; |
1042 | spin_unlock_irq(&phba->hbalock); | 1129 | if (vport->port_state != LPFC_FLOGI) { |
1043 | if (vport->port_state != LPFC_FLOGI) | 1130 | phba->hba_flag |= FCF_RR_INPROG; |
1131 | spin_unlock_irq(&phba->hbalock); | ||
1044 | lpfc_initial_flogi(vport); | 1132 | lpfc_initial_flogi(vport); |
1133 | goto out; | ||
1134 | } | ||
1135 | spin_unlock_irq(&phba->hbalock); | ||
1136 | goto out; | ||
1045 | 1137 | ||
1138 | fail_out: | ||
1139 | spin_lock_irq(&phba->hbalock); | ||
1140 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
1141 | spin_unlock_irq(&phba->hbalock); | ||
1142 | out: | ||
1046 | mempool_free(mboxq, phba->mbox_mem_pool); | 1143 | mempool_free(mboxq, phba->mbox_mem_pool); |
1047 | return; | ||
1048 | } | 1144 | } |
1049 | 1145 | ||
1050 | /** | 1146 | /** |
@@ -1241,10 +1337,9 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1241 | int rc; | 1337 | int rc; |
1242 | 1338 | ||
1243 | spin_lock_irq(&phba->hbalock); | 1339 | spin_lock_irq(&phba->hbalock); |
1244 | |||
1245 | /* If the FCF is not availabe do nothing. */ | 1340 | /* If the FCF is not availabe do nothing. */ |
1246 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { | 1341 | if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { |
1247 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1342 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1248 | spin_unlock_irq(&phba->hbalock); | 1343 | spin_unlock_irq(&phba->hbalock); |
1249 | return; | 1344 | return; |
1250 | } | 1345 | } |
@@ -1252,19 +1347,22 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1252 | /* The FCF is already registered, start discovery */ | 1347 | /* The FCF is already registered, start discovery */ |
1253 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { | 1348 | if (phba->fcf.fcf_flag & FCF_REGISTERED) { |
1254 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); | 1349 | phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); |
1255 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1350 | phba->hba_flag &= ~FCF_TS_INPROG; |
1256 | spin_unlock_irq(&phba->hbalock); | 1351 | if (phba->pport->port_state != LPFC_FLOGI) { |
1257 | if (phba->pport->port_state != LPFC_FLOGI) | 1352 | phba->hba_flag |= FCF_RR_INPROG; |
1353 | spin_unlock_irq(&phba->hbalock); | ||
1258 | lpfc_initial_flogi(phba->pport); | 1354 | lpfc_initial_flogi(phba->pport); |
1355 | return; | ||
1356 | } | ||
1357 | spin_unlock_irq(&phba->hbalock); | ||
1259 | return; | 1358 | return; |
1260 | } | 1359 | } |
1261 | spin_unlock_irq(&phba->hbalock); | 1360 | spin_unlock_irq(&phba->hbalock); |
1262 | 1361 | ||
1263 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, | 1362 | fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
1264 | GFP_KERNEL); | ||
1265 | if (!fcf_mbxq) { | 1363 | if (!fcf_mbxq) { |
1266 | spin_lock_irq(&phba->hbalock); | 1364 | spin_lock_irq(&phba->hbalock); |
1267 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1365 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1268 | spin_unlock_irq(&phba->hbalock); | 1366 | spin_unlock_irq(&phba->hbalock); |
1269 | return; | 1367 | return; |
1270 | } | 1368 | } |
@@ -1275,7 +1373,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1275 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); | 1373 | rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); |
1276 | if (rc == MBX_NOT_FINISHED) { | 1374 | if (rc == MBX_NOT_FINISHED) { |
1277 | spin_lock_irq(&phba->hbalock); | 1375 | spin_lock_irq(&phba->hbalock); |
1278 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1376 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1279 | spin_unlock_irq(&phba->hbalock); | 1377 | spin_unlock_irq(&phba->hbalock); |
1280 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); | 1378 | mempool_free(fcf_mbxq, phba->mbox_mem_pool); |
1281 | } | 1379 | } |
@@ -1493,7 +1591,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
1493 | * FCF discovery, no need to restart FCF discovery. | 1591 | * FCF discovery, no need to restart FCF discovery. |
1494 | */ | 1592 | */ |
1495 | if ((phba->link_state >= LPFC_LINK_UP) && | 1593 | if ((phba->link_state >= LPFC_LINK_UP) && |
1496 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) | 1594 | (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) |
1497 | return 0; | 1595 | return 0; |
1498 | 1596 | ||
1499 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 1597 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
@@ -1517,14 +1615,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) | |||
1517 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); | 1615 | lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
1518 | } else { | 1616 | } else { |
1519 | /* | 1617 | /* |
1520 | * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS | 1618 | * Do not continue FCF discovery and clear FCF_TS_INPROG |
1521 | * flag | 1619 | * flag |
1522 | */ | 1620 | */ |
1523 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 1621 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
1524 | "2833 Stop FCF discovery process due to link " | 1622 | "2833 Stop FCF discovery process due to link " |
1525 | "state change (x%x)\n", phba->link_state); | 1623 | "state change (x%x)\n", phba->link_state); |
1526 | spin_lock_irq(&phba->hbalock); | 1624 | spin_lock_irq(&phba->hbalock); |
1527 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1625 | phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); |
1528 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); | 1626 | phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); |
1529 | spin_unlock_irq(&phba->hbalock); | 1627 | spin_unlock_irq(&phba->hbalock); |
1530 | } | 1628 | } |
@@ -1729,6 +1827,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, | |||
1729 | } | 1827 | } |
1730 | 1828 | ||
1731 | /** | 1829 | /** |
1830 | * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf | ||
1831 | * @vport: Pointer to vport object. | ||
1832 | * @fcf_index: index to next fcf. | ||
1833 | * | ||
1834 | * This function processing the roundrobin fcf failover to next fcf index. | ||
1835 | * When this function is invoked, there will be a current fcf registered | ||
1836 | * for flogi. | ||
1837 | * Return: 0 for continue retrying flogi on currently registered fcf; | ||
1838 | * 1 for stop flogi on currently registered fcf; | ||
1839 | */ | ||
1840 | int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) | ||
1841 | { | ||
1842 | struct lpfc_hba *phba = vport->phba; | ||
1843 | int rc; | ||
1844 | |||
1845 | if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { | ||
1846 | spin_lock_irq(&phba->hbalock); | ||
1847 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
1848 | spin_unlock_irq(&phba->hbalock); | ||
1849 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
1850 | "2872 Devloss tmo with no eligible " | ||
1851 | "FCF, unregister in-use FCF (x%x) " | ||
1852 | "and rescan FCF table\n", | ||
1853 | phba->fcf.current_rec.fcf_indx); | ||
1854 | lpfc_unregister_fcf_rescan(phba); | ||
1855 | goto stop_flogi_current_fcf; | ||
1856 | } | ||
1857 | /* Mark the end to FLOGI roundrobin failover */ | ||
1858 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
1859 | /* Allow action to new fcf asynchronous event */ | ||
1860 | phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); | ||
1861 | spin_unlock_irq(&phba->hbalock); | ||
1862 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
1863 | "2865 No FCF available, stop roundrobin FCF " | ||
1864 | "failover and change port state:x%x/x%x\n", | ||
1865 | phba->pport->port_state, LPFC_VPORT_UNKNOWN); | ||
1866 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | ||
1867 | goto stop_flogi_current_fcf; | ||
1868 | } else { | ||
1869 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, | ||
1870 | "2794 Try FLOGI roundrobin FCF failover to " | ||
1871 | "(x%x)\n", fcf_index); | ||
1872 | rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); | ||
1873 | if (rc) | ||
1874 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, | ||
1875 | "2761 FLOGI roundrobin FCF failover " | ||
1876 | "failed (rc:x%x) to read FCF (x%x)\n", | ||
1877 | rc, phba->fcf.current_rec.fcf_indx); | ||
1878 | else | ||
1879 | goto stop_flogi_current_fcf; | ||
1880 | } | ||
1881 | return 0; | ||
1882 | |||
1883 | stop_flogi_current_fcf: | ||
1884 | lpfc_can_disctmo(vport); | ||
1885 | return 1; | ||
1886 | } | ||
1887 | |||
1888 | /** | ||
1732 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. | 1889 | * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. |
1733 | * @phba: pointer to lpfc hba data structure. | 1890 | * @phba: pointer to lpfc hba data structure. |
1734 | * @mboxq: pointer to mailbox object. | 1891 | * @mboxq: pointer to mailbox object. |
@@ -1756,7 +1913,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1756 | int rc; | 1913 | int rc; |
1757 | 1914 | ||
1758 | /* If there is pending FCoE event restart FCF table scan */ | 1915 | /* If there is pending FCoE event restart FCF table scan */ |
1759 | if (lpfc_check_pending_fcoe_event(phba, 0)) { | 1916 | if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { |
1760 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 1917 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
1761 | return; | 1918 | return; |
1762 | } | 1919 | } |
@@ -1765,12 +1922,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1765 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, | 1922 | new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, |
1766 | &next_fcf_index); | 1923 | &next_fcf_index); |
1767 | if (!new_fcf_record) { | 1924 | if (!new_fcf_record) { |
1768 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1925 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
1769 | "2765 Mailbox command READ_FCF_RECORD " | 1926 | "2765 Mailbox command READ_FCF_RECORD " |
1770 | "failed to retrieve a FCF record.\n"); | 1927 | "failed to retrieve a FCF record.\n"); |
1771 | /* Let next new FCF event trigger fast failover */ | 1928 | /* Let next new FCF event trigger fast failover */ |
1772 | spin_lock_irq(&phba->hbalock); | 1929 | spin_lock_irq(&phba->hbalock); |
1773 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 1930 | phba->hba_flag &= ~FCF_TS_INPROG; |
1774 | spin_unlock_irq(&phba->hbalock); | 1931 | spin_unlock_irq(&phba->hbalock); |
1775 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 1932 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
1776 | return; | 1933 | return; |
@@ -1787,13 +1944,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1787 | /* | 1944 | /* |
1788 | * If the fcf record does not match with connect list entries | 1945 | * If the fcf record does not match with connect list entries |
1789 | * read the next entry; otherwise, this is an eligible FCF | 1946 | * read the next entry; otherwise, this is an eligible FCF |
1790 | * record for round robin FCF failover. | 1947 | * record for roundrobin FCF failover. |
1791 | */ | 1948 | */ |
1792 | if (!rc) { | 1949 | if (!rc) { |
1793 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1950 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
1794 | "2781 FCF record (x%x) failed FCF " | 1951 | "2781 FCF (x%x) failed connection " |
1795 | "connection list check, fcf_avail:x%x, " | 1952 | "list check: (x%x/x%x)\n", |
1796 | "fcf_valid:x%x\n", | ||
1797 | bf_get(lpfc_fcf_record_fcf_index, | 1953 | bf_get(lpfc_fcf_record_fcf_index, |
1798 | new_fcf_record), | 1954 | new_fcf_record), |
1799 | bf_get(lpfc_fcf_record_fcf_avail, | 1955 | bf_get(lpfc_fcf_record_fcf_avail, |
@@ -1823,9 +1979,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1823 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { | 1979 | !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { |
1824 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 1980 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
1825 | "2835 Invalid in-use FCF " | 1981 | "2835 Invalid in-use FCF " |
1826 | "record (x%x) reported, " | 1982 | "(x%x), enter FCF failover " |
1827 | "entering fast FCF failover " | 1983 | "table scan.\n", |
1828 | "mode scanning.\n", | ||
1829 | phba->fcf.current_rec.fcf_indx); | 1984 | phba->fcf.current_rec.fcf_indx); |
1830 | spin_lock_irq(&phba->hbalock); | 1985 | spin_lock_irq(&phba->hbalock); |
1831 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; | 1986 | phba->fcf.fcf_flag |= FCF_REDISC_FOV; |
@@ -1970,8 +2125,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1970 | */ | 2125 | */ |
1971 | if (fcf_rec) { | 2126 | if (fcf_rec) { |
1972 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2127 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
1973 | "2840 Update current FCF record " | 2128 | "2840 Update initial FCF candidate " |
1974 | "with initial FCF record (x%x)\n", | 2129 | "with FCF (x%x)\n", |
1975 | bf_get(lpfc_fcf_record_fcf_index, | 2130 | bf_get(lpfc_fcf_record_fcf_index, |
1976 | new_fcf_record)); | 2131 | new_fcf_record)); |
1977 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, | 2132 | __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, |
@@ -2001,20 +2156,28 @@ read_next_fcf: | |||
2001 | */ | 2156 | */ |
2002 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { | 2157 | if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { |
2003 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 2158 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2004 | "2782 No suitable FCF record " | 2159 | "2782 No suitable FCF found: " |
2005 | "found during this round of " | 2160 | "(x%x/x%x)\n", |
2006 | "post FCF rediscovery scan: " | ||
2007 | "fcf_evt_tag:x%x, fcf_index: " | ||
2008 | "x%x\n", | ||
2009 | phba->fcoe_eventtag_at_fcf_scan, | 2161 | phba->fcoe_eventtag_at_fcf_scan, |
2010 | bf_get(lpfc_fcf_record_fcf_index, | 2162 | bf_get(lpfc_fcf_record_fcf_index, |
2011 | new_fcf_record)); | 2163 | new_fcf_record)); |
2164 | spin_lock_irq(&phba->hbalock); | ||
2165 | if (phba->hba_flag & HBA_DEVLOSS_TMO) { | ||
2166 | phba->hba_flag &= ~FCF_TS_INPROG; | ||
2167 | spin_unlock_irq(&phba->hbalock); | ||
2168 | /* Unregister in-use FCF and rescan */ | ||
2169 | lpfc_printf_log(phba, KERN_INFO, | ||
2170 | LOG_FIP, | ||
2171 | "2864 On devloss tmo " | ||
2172 | "unreg in-use FCF and " | ||
2173 | "rescan FCF table\n"); | ||
2174 | lpfc_unregister_fcf_rescan(phba); | ||
2175 | return; | ||
2176 | } | ||
2012 | /* | 2177 | /* |
2013 | * Let next new FCF event trigger fast | 2178 | * Let next new FCF event trigger fast failover |
2014 | * failover | ||
2015 | */ | 2179 | */ |
2016 | spin_lock_irq(&phba->hbalock); | 2180 | phba->hba_flag &= ~FCF_TS_INPROG; |
2017 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
2018 | spin_unlock_irq(&phba->hbalock); | 2181 | spin_unlock_irq(&phba->hbalock); |
2019 | return; | 2182 | return; |
2020 | } | 2183 | } |
@@ -2032,9 +2195,8 @@ read_next_fcf: | |||
2032 | 2195 | ||
2033 | /* Replace in-use record with the new record */ | 2196 | /* Replace in-use record with the new record */ |
2034 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2197 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2035 | "2842 Replace the current in-use " | 2198 | "2842 Replace in-use FCF (x%x) " |
2036 | "FCF record (x%x) with failover FCF " | 2199 | "with failover FCF (x%x)\n", |
2037 | "record (x%x)\n", | ||
2038 | phba->fcf.current_rec.fcf_indx, | 2200 | phba->fcf.current_rec.fcf_indx, |
2039 | phba->fcf.failover_rec.fcf_indx); | 2201 | phba->fcf.failover_rec.fcf_indx); |
2040 | memcpy(&phba->fcf.current_rec, | 2202 | memcpy(&phba->fcf.current_rec, |
@@ -2046,15 +2208,8 @@ read_next_fcf: | |||
2046 | * FCF failover. | 2208 | * FCF failover. |
2047 | */ | 2209 | */ |
2048 | spin_lock_irq(&phba->hbalock); | 2210 | spin_lock_irq(&phba->hbalock); |
2049 | phba->fcf.fcf_flag &= | 2211 | phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; |
2050 | ~(FCF_REDISC_FOV | FCF_REDISC_RRU); | ||
2051 | spin_unlock_irq(&phba->hbalock); | 2212 | spin_unlock_irq(&phba->hbalock); |
2052 | /* | ||
2053 | * Set up the initial registered FCF index for FLOGI | ||
2054 | * round robin FCF failover. | ||
2055 | */ | ||
2056 | phba->fcf.fcf_rr_init_indx = | ||
2057 | phba->fcf.failover_rec.fcf_indx; | ||
2058 | /* Register to the new FCF record */ | 2213 | /* Register to the new FCF record */ |
2059 | lpfc_register_fcf(phba); | 2214 | lpfc_register_fcf(phba); |
2060 | } else { | 2215 | } else { |
@@ -2101,11 +2256,11 @@ out: | |||
2101 | } | 2256 | } |
2102 | 2257 | ||
2103 | /** | 2258 | /** |
2104 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler | 2259 | * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler |
2105 | * @phba: pointer to lpfc hba data structure. | 2260 | * @phba: pointer to lpfc hba data structure. |
2106 | * @mboxq: pointer to mailbox object. | 2261 | * @mboxq: pointer to mailbox object. |
2107 | * | 2262 | * |
2108 | * This is the callback function for FLOGI failure round robin FCF failover | 2263 | * This is the callback function for FLOGI failure roundrobin FCF failover |
2109 | * read FCF record mailbox command from the eligible FCF record bmask for | 2264 | * read FCF record mailbox command from the eligible FCF record bmask for |
2110 | * performing the failover. If the FCF read back is not valid/available, it | 2265 | * performing the failover. If the FCF read back is not valid/available, it |
2111 | * fails through to retrying FLOGI to the currently registered FCF again. | 2266 | * fails through to retrying FLOGI to the currently registered FCF again. |
@@ -2120,17 +2275,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2120 | { | 2275 | { |
2121 | struct fcf_record *new_fcf_record; | 2276 | struct fcf_record *new_fcf_record; |
2122 | uint32_t boot_flag, addr_mode; | 2277 | uint32_t boot_flag, addr_mode; |
2123 | uint16_t next_fcf_index; | 2278 | uint16_t next_fcf_index, fcf_index; |
2124 | uint16_t current_fcf_index; | 2279 | uint16_t current_fcf_index; |
2125 | uint16_t vlan_id; | 2280 | uint16_t vlan_id; |
2281 | int rc; | ||
2126 | 2282 | ||
2127 | /* If link state is not up, stop the round robin failover process */ | 2283 | /* If link state is not up, stop the roundrobin failover process */ |
2128 | if (phba->link_state < LPFC_LINK_UP) { | 2284 | if (phba->link_state < LPFC_LINK_UP) { |
2129 | spin_lock_irq(&phba->hbalock); | 2285 | spin_lock_irq(&phba->hbalock); |
2130 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; | 2286 | phba->fcf.fcf_flag &= ~FCF_DISCOVERY; |
2287 | phba->hba_flag &= ~FCF_RR_INPROG; | ||
2131 | spin_unlock_irq(&phba->hbalock); | 2288 | spin_unlock_irq(&phba->hbalock); |
2132 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 2289 | goto out; |
2133 | return; | ||
2134 | } | 2290 | } |
2135 | 2291 | ||
2136 | /* Parse the FCF record from the non-embedded mailbox command */ | 2292 | /* Parse the FCF record from the non-embedded mailbox command */ |
@@ -2140,23 +2296,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2140 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | 2296 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, |
2141 | "2766 Mailbox command READ_FCF_RECORD " | 2297 | "2766 Mailbox command READ_FCF_RECORD " |
2142 | "failed to retrieve a FCF record.\n"); | 2298 | "failed to retrieve a FCF record.\n"); |
2143 | goto out; | 2299 | goto error_out; |
2144 | } | 2300 | } |
2145 | 2301 | ||
2146 | /* Get the needed parameters from FCF record */ | 2302 | /* Get the needed parameters from FCF record */ |
2147 | lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, | 2303 | rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, |
2148 | &addr_mode, &vlan_id); | 2304 | &addr_mode, &vlan_id); |
2149 | 2305 | ||
2150 | /* Log the FCF record information if turned on */ | 2306 | /* Log the FCF record information if turned on */ |
2151 | lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, | 2307 | lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, |
2152 | next_fcf_index); | 2308 | next_fcf_index); |
2153 | 2309 | ||
2310 | fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); | ||
2311 | if (!rc) { | ||
2312 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
2313 | "2848 Remove ineligible FCF (x%x) from " | ||
2314 | "from roundrobin bmask\n", fcf_index); | ||
2315 | /* Clear roundrobin bmask bit for ineligible FCF */ | ||
2316 | lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); | ||
2317 | /* Perform next round of roundrobin FCF failover */ | ||
2318 | fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); | ||
2319 | rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); | ||
2320 | if (rc) | ||
2321 | goto out; | ||
2322 | goto error_out; | ||
2323 | } | ||
2324 | |||
2325 | if (fcf_index == phba->fcf.current_rec.fcf_indx) { | ||
2326 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
2327 | "2760 Perform FLOGI roundrobin FCF failover: " | ||
2328 | "FCF (x%x) back to FCF (x%x)\n", | ||
2329 | phba->fcf.current_rec.fcf_indx, fcf_index); | ||
2330 | /* Wait 500 ms before retrying FLOGI to current FCF */ | ||
2331 | msleep(500); | ||
2332 | lpfc_initial_flogi(phba->pport); | ||
2333 | goto out; | ||
2334 | } | ||
2335 | |||
2154 | /* Upload new FCF record to the failover FCF record */ | 2336 | /* Upload new FCF record to the failover FCF record */ |
2155 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2337 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2156 | "2834 Update the current FCF record (x%x) " | 2338 | "2834 Update current FCF (x%x) with new FCF (x%x)\n", |
2157 | "with the next FCF record (x%x)\n", | 2339 | phba->fcf.failover_rec.fcf_indx, fcf_index); |
2158 | phba->fcf.failover_rec.fcf_indx, | ||
2159 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); | ||
2160 | spin_lock_irq(&phba->hbalock); | 2340 | spin_lock_irq(&phba->hbalock); |
2161 | __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, | 2341 | __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, |
2162 | new_fcf_record, addr_mode, vlan_id, | 2342 | new_fcf_record, addr_mode, vlan_id, |
@@ -2173,14 +2353,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2173 | sizeof(struct lpfc_fcf_rec)); | 2353 | sizeof(struct lpfc_fcf_rec)); |
2174 | 2354 | ||
2175 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2355 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2176 | "2783 FLOGI round robin FCF failover from FCF " | 2356 | "2783 Perform FLOGI roundrobin FCF failover: FCF " |
2177 | "(x%x) to FCF (x%x).\n", | 2357 | "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); |
2178 | current_fcf_index, | ||
2179 | bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); | ||
2180 | 2358 | ||
2359 | error_out: | ||
2360 | lpfc_register_fcf(phba); | ||
2181 | out: | 2361 | out: |
2182 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 2362 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
2183 | lpfc_register_fcf(phba); | ||
2184 | } | 2363 | } |
2185 | 2364 | ||
2186 | /** | 2365 | /** |
@@ -2189,10 +2368,10 @@ out: | |||
2189 | * @mboxq: pointer to mailbox object. | 2368 | * @mboxq: pointer to mailbox object. |
2190 | * | 2369 | * |
2191 | * This is the callback function of read FCF record mailbox command for | 2370 | * This is the callback function of read FCF record mailbox command for |
2192 | * updating the eligible FCF bmask for FLOGI failure round robin FCF | 2371 | * updating the eligible FCF bmask for FLOGI failure roundrobin FCF |
2193 | * failover when a new FCF event happened. If the FCF read back is | 2372 | * failover when a new FCF event happened. If the FCF read back is |
2194 | * valid/available and it passes the connection list check, it updates | 2373 | * valid/available and it passes the connection list check, it updates |
2195 | * the bmask for the eligible FCF record for round robin failover. | 2374 | * the bmask for the eligible FCF record for roundrobin failover. |
2196 | */ | 2375 | */ |
2197 | void | 2376 | void |
2198 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | 2377 | lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) |
@@ -2634,7 +2813,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2634 | * and get the FCF Table. | 2813 | * and get the FCF Table. |
2635 | */ | 2814 | */ |
2636 | spin_lock_irq(&phba->hbalock); | 2815 | spin_lock_irq(&phba->hbalock); |
2637 | if (phba->hba_flag & FCF_DISC_INPROGRESS) { | 2816 | if (phba->hba_flag & FCF_TS_INPROG) { |
2638 | spin_unlock_irq(&phba->hbalock); | 2817 | spin_unlock_irq(&phba->hbalock); |
2639 | return; | 2818 | return; |
2640 | } | 2819 | } |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 9a2e2c792876..814d0b324d70 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -2936,8 +2936,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) | |||
2936 | phba->fcf.fcf_flag |= FCF_REDISC_EVT; | 2936 | phba->fcf.fcf_flag |= FCF_REDISC_EVT; |
2937 | spin_unlock_irq(&phba->hbalock); | 2937 | spin_unlock_irq(&phba->hbalock); |
2938 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 2938 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
2939 | "2776 FCF rediscover wait timer expired, post " | 2939 | "2776 FCF rediscover quiescent timer expired\n"); |
2940 | "a worker thread event for FCF table scan\n"); | ||
2941 | /* wake up worker thread */ | 2940 | /* wake up worker thread */ |
2942 | lpfc_worker_wake_up(phba); | 2941 | lpfc_worker_wake_up(phba); |
2943 | } | 2942 | } |
@@ -3312,35 +3311,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3312 | if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) | 3311 | if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) |
3313 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3312 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
3314 | LOG_DISCOVERY, | 3313 | LOG_DISCOVERY, |
3315 | "2546 New FCF found event: " | 3314 | "2546 New FCF event, evt_tag:x%x, " |
3316 | "evt_tag:x%x, fcf_index:x%x\n", | 3315 | "index:x%x\n", |
3317 | acqe_fcoe->event_tag, | 3316 | acqe_fcoe->event_tag, |
3318 | acqe_fcoe->index); | 3317 | acqe_fcoe->index); |
3319 | else | 3318 | else |
3320 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | | 3319 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | |
3321 | LOG_DISCOVERY, | 3320 | LOG_DISCOVERY, |
3322 | "2788 FCF parameter modified event: " | 3321 | "2788 FCF param modified event, " |
3323 | "evt_tag:x%x, fcf_index:x%x\n", | 3322 | "evt_tag:x%x, index:x%x\n", |
3324 | acqe_fcoe->event_tag, | 3323 | acqe_fcoe->event_tag, |
3325 | acqe_fcoe->index); | 3324 | acqe_fcoe->index); |
3326 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | 3325 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
3327 | /* | 3326 | /* |
3328 | * During period of FCF discovery, read the FCF | 3327 | * During period of FCF discovery, read the FCF |
3329 | * table record indexed by the event to update | 3328 | * table record indexed by the event to update |
3330 | * FCF round robin failover eligible FCF bmask. | 3329 | * FCF roundrobin failover eligible FCF bmask. |
3331 | */ | 3330 | */ |
3332 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | | 3331 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | |
3333 | LOG_DISCOVERY, | 3332 | LOG_DISCOVERY, |
3334 | "2779 Read new FCF record with " | 3333 | "2779 Read FCF (x%x) for updating " |
3335 | "fcf_index:x%x for updating FCF " | 3334 | "roundrobin FCF failover bmask\n", |
3336 | "round robin failover bmask\n", | ||
3337 | acqe_fcoe->index); | 3335 | acqe_fcoe->index); |
3338 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); | 3336 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); |
3339 | } | 3337 | } |
3340 | 3338 | ||
3341 | /* If the FCF discovery is in progress, do nothing. */ | 3339 | /* If the FCF discovery is in progress, do nothing. */ |
3342 | spin_lock_irq(&phba->hbalock); | 3340 | spin_lock_irq(&phba->hbalock); |
3343 | if (phba->hba_flag & FCF_DISC_INPROGRESS) { | 3341 | if (phba->hba_flag & FCF_TS_INPROG) { |
3344 | spin_unlock_irq(&phba->hbalock); | 3342 | spin_unlock_irq(&phba->hbalock); |
3345 | break; | 3343 | break; |
3346 | } | 3344 | } |
@@ -3359,15 +3357,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3359 | 3357 | ||
3360 | /* Otherwise, scan the entire FCF table and re-discover SAN */ | 3358 | /* Otherwise, scan the entire FCF table and re-discover SAN */ |
3361 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3359 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3362 | "2770 Start FCF table scan due to new FCF " | 3360 | "2770 Start FCF table scan per async FCF " |
3363 | "event: evt_tag:x%x, fcf_index:x%x\n", | 3361 | "event, evt_tag:x%x, index:x%x\n", |
3364 | acqe_fcoe->event_tag, acqe_fcoe->index); | 3362 | acqe_fcoe->event_tag, acqe_fcoe->index); |
3365 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, | 3363 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
3366 | LPFC_FCOE_FCF_GET_FIRST); | 3364 | LPFC_FCOE_FCF_GET_FIRST); |
3367 | if (rc) | 3365 | if (rc) |
3368 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3366 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3369 | "2547 Issue FCF scan read FCF mailbox " | 3367 | "2547 Issue FCF scan read FCF mailbox " |
3370 | "command failed 0x%x\n", rc); | 3368 | "command failed (x%x)\n", rc); |
3371 | break; | 3369 | break; |
3372 | 3370 | ||
3373 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: | 3371 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: |
@@ -3379,9 +3377,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3379 | 3377 | ||
3380 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: | 3378 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: |
3381 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3379 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3382 | "2549 FCF disconnected from network index 0x%x" | 3380 | "2549 FCF (x%x) disconnected from network, " |
3383 | " tag 0x%x\n", acqe_fcoe->index, | 3381 | "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); |
3384 | acqe_fcoe->event_tag); | ||
3385 | /* | 3382 | /* |
3386 | * If we are in the middle of FCF failover process, clear | 3383 | * If we are in the middle of FCF failover process, clear |
3387 | * the corresponding FCF bit in the roundrobin bitmap. | 3384 | * the corresponding FCF bit in the roundrobin bitmap. |
@@ -3495,9 +3492,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3495 | spin_unlock_irq(&phba->hbalock); | 3492 | spin_unlock_irq(&phba->hbalock); |
3496 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | | 3493 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | |
3497 | LOG_DISCOVERY, | 3494 | LOG_DISCOVERY, |
3498 | "2773 Start FCF fast failover due " | 3495 | "2773 Start FCF failover per CVL, " |
3499 | "to CVL event: evt_tag:x%x\n", | 3496 | "evt_tag:x%x\n", acqe_fcoe->event_tag); |
3500 | acqe_fcoe->event_tag); | ||
3501 | rc = lpfc_sli4_redisc_fcf_table(phba); | 3497 | rc = lpfc_sli4_redisc_fcf_table(phba); |
3502 | if (rc) { | 3498 | if (rc) { |
3503 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3499 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
@@ -3647,8 +3643,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) | |||
3647 | 3643 | ||
3648 | /* Scan FCF table from the first entry to re-discover SAN */ | 3644 | /* Scan FCF table from the first entry to re-discover SAN */ |
3649 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3645 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3650 | "2777 Start FCF table scan after FCF " | 3646 | "2777 Start post-quiescent FCF table scan\n"); |
3651 | "rediscovery quiescent period over\n"); | ||
3652 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); | 3647 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); |
3653 | if (rc) | 3648 | if (rc) |
3654 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3649 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
@@ -4166,7 +4161,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4166 | goto out_free_active_sgl; | 4161 | goto out_free_active_sgl; |
4167 | } | 4162 | } |
4168 | 4163 | ||
4169 | /* Allocate eligible FCF bmask memory for FCF round robin failover */ | 4164 | /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ |
4170 | longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; | 4165 | longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; |
4171 | phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), | 4166 | phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), |
4172 | GFP_KERNEL); | 4167 | GFP_KERNEL); |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0d1e187b005d..9d2e1347cb1d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5921 | * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution | 5921 | * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution |
5922 | * @phba: Pointer to HBA context object. | 5922 | * @phba: Pointer to HBA context object. |
5923 | * | 5923 | * |
5924 | * This routine performs a round robin SCSI command to SLI4 FCP WQ index | 5924 | * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index |
5925 | * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock | 5925 | * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock |
5926 | * held. | 5926 | * held. |
5927 | * | 5927 | * |
@@ -12242,13 +12242,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12242 | /* Issue the mailbox command asynchronously */ | 12242 | /* Issue the mailbox command asynchronously */ |
12243 | mboxq->vport = phba->pport; | 12243 | mboxq->vport = phba->pport; |
12244 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; | 12244 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; |
12245 | |||
12246 | spin_lock_irq(&phba->hbalock); | ||
12247 | phba->hba_flag |= FCF_TS_INPROG; | ||
12248 | spin_unlock_irq(&phba->hbalock); | ||
12249 | |||
12245 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 12250 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
12246 | if (rc == MBX_NOT_FINISHED) | 12251 | if (rc == MBX_NOT_FINISHED) |
12247 | error = -EIO; | 12252 | error = -EIO; |
12248 | else { | 12253 | else { |
12249 | spin_lock_irq(&phba->hbalock); | ||
12250 | phba->hba_flag |= FCF_DISC_INPROGRESS; | ||
12251 | spin_unlock_irq(&phba->hbalock); | ||
12252 | /* Reset eligible FCF count for new scan */ | 12254 | /* Reset eligible FCF count for new scan */ |
12253 | if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) | 12255 | if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) |
12254 | phba->fcf.eligible_fcf_cnt = 0; | 12256 | phba->fcf.eligible_fcf_cnt = 0; |
@@ -12258,21 +12260,21 @@ fail_fcf_scan: | |||
12258 | if (error) { | 12260 | if (error) { |
12259 | if (mboxq) | 12261 | if (mboxq) |
12260 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 12262 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
12261 | /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ | 12263 | /* FCF scan failed, clear FCF_TS_INPROG flag */ |
12262 | spin_lock_irq(&phba->hbalock); | 12264 | spin_lock_irq(&phba->hbalock); |
12263 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | 12265 | phba->hba_flag &= ~FCF_TS_INPROG; |
12264 | spin_unlock_irq(&phba->hbalock); | 12266 | spin_unlock_irq(&phba->hbalock); |
12265 | } | 12267 | } |
12266 | return error; | 12268 | return error; |
12267 | } | 12269 | } |
12268 | 12270 | ||
12269 | /** | 12271 | /** |
12270 | * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. | 12272 | * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. |
12271 | * @phba: pointer to lpfc hba data structure. | 12273 | * @phba: pointer to lpfc hba data structure. |
12272 | * @fcf_index: FCF table entry offset. | 12274 | * @fcf_index: FCF table entry offset. |
12273 | * | 12275 | * |
12274 | * This routine is invoked to read an FCF record indicated by @fcf_index | 12276 | * This routine is invoked to read an FCF record indicated by @fcf_index |
12275 | * and to use it for FLOGI round robin FCF failover. | 12277 | * and to use it for FLOGI roundrobin FCF failover. |
12276 | * | 12278 | * |
12277 | * Return 0 if the mailbox command is submitted sucessfully, none 0 | 12279 | * Return 0 if the mailbox command is submitted sucessfully, none 0 |
12278 | * otherwise. | 12280 | * otherwise. |
@@ -12318,7 +12320,7 @@ fail_fcf_read: | |||
12318 | * @fcf_index: FCF table entry offset. | 12320 | * @fcf_index: FCF table entry offset. |
12319 | * | 12321 | * |
12320 | * This routine is invoked to read an FCF record indicated by @fcf_index to | 12322 | * This routine is invoked to read an FCF record indicated by @fcf_index to |
12321 | * determine whether it's eligible for FLOGI round robin failover list. | 12323 | * determine whether it's eligible for FLOGI roundrobin failover list. |
12322 | * | 12324 | * |
12323 | * Return 0 if the mailbox command is submitted sucessfully, none 0 | 12325 | * Return 0 if the mailbox command is submitted sucessfully, none 0 |
12324 | * otherwise. | 12326 | * otherwise. |
@@ -12364,7 +12366,7 @@ fail_fcf_read: | |||
12364 | * | 12366 | * |
12365 | * This routine is to get the next eligible FCF record index in a round | 12367 | * This routine is to get the next eligible FCF record index in a round |
12366 | * robin fashion. If the next eligible FCF record index equals to the | 12368 | * robin fashion. If the next eligible FCF record index equals to the |
12367 | * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) | 12369 | * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) |
12368 | * shall be returned, otherwise, the next eligible FCF record's index | 12370 | * shall be returned, otherwise, the next eligible FCF record's index |
12369 | * shall be returned. | 12371 | * shall be returned. |
12370 | **/ | 12372 | **/ |
@@ -12392,28 +12394,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) | |||
12392 | return LPFC_FCOE_FCF_NEXT_NONE; | 12394 | return LPFC_FCOE_FCF_NEXT_NONE; |
12393 | } | 12395 | } |
12394 | 12396 | ||
12395 | /* Check roundrobin failover index bmask stop condition */ | ||
12396 | if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { | ||
12397 | if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { | ||
12398 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, | ||
12399 | "2847 Round robin failover FCF index " | ||
12400 | "search hit stop condition:x%x\n", | ||
12401 | next_fcf_index); | ||
12402 | return LPFC_FCOE_FCF_NEXT_NONE; | ||
12403 | } | ||
12404 | /* The roundrobin failover index bmask updated, start over */ | ||
12405 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | ||
12406 | "2848 Round robin failover FCF index bmask " | ||
12407 | "updated, start over\n"); | ||
12408 | spin_lock_irq(&phba->hbalock); | ||
12409 | phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; | ||
12410 | spin_unlock_irq(&phba->hbalock); | ||
12411 | return phba->fcf.fcf_rr_init_indx; | ||
12412 | } | ||
12413 | |||
12414 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12397 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12415 | "2845 Get next round robin failover " | 12398 | "2845 Get next roundrobin failover FCF (x%x)\n", |
12416 | "FCF index x%x\n", next_fcf_index); | 12399 | next_fcf_index); |
12400 | |||
12417 | return next_fcf_index; | 12401 | return next_fcf_index; |
12418 | } | 12402 | } |
12419 | 12403 | ||
@@ -12422,7 +12406,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) | |||
12422 | * @phba: pointer to lpfc hba data structure. | 12406 | * @phba: pointer to lpfc hba data structure. |
12423 | * | 12407 | * |
12424 | * This routine sets the FCF record index in to the eligible bmask for | 12408 | * This routine sets the FCF record index in to the eligible bmask for |
12425 | * round robin failover search. It checks to make sure that the index | 12409 | * roundrobin failover search. It checks to make sure that the index |
12426 | * does not go beyond the range of the driver allocated bmask dimension | 12410 | * does not go beyond the range of the driver allocated bmask dimension |
12427 | * before setting the bit. | 12411 | * before setting the bit. |
12428 | * | 12412 | * |
@@ -12434,22 +12418,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12434 | { | 12418 | { |
12435 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 12419 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
12436 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 12420 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
12437 | "2610 HBA FCF index reached driver's " | 12421 | "2610 FCF (x%x) reached driver's book " |
12438 | "book keeping dimension: fcf_index:%d, " | 12422 | "keeping dimension:x%x\n", |
12439 | "driver_bmask_max:%d\n", | ||
12440 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); | 12423 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
12441 | return -EINVAL; | 12424 | return -EINVAL; |
12442 | } | 12425 | } |
12443 | /* Set the eligible FCF record index bmask */ | 12426 | /* Set the eligible FCF record index bmask */ |
12444 | set_bit(fcf_index, phba->fcf.fcf_rr_bmask); | 12427 | set_bit(fcf_index, phba->fcf.fcf_rr_bmask); |
12445 | 12428 | ||
12446 | /* Set the roundrobin index bmask updated */ | ||
12447 | spin_lock_irq(&phba->hbalock); | ||
12448 | phba->fcf.fcf_flag |= FCF_REDISC_RRU; | ||
12449 | spin_unlock_irq(&phba->hbalock); | ||
12450 | |||
12451 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12429 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12452 | "2790 Set FCF index x%x to round robin failover " | 12430 | "2790 Set FCF (x%x) to roundrobin FCF failover " |
12453 | "bmask\n", fcf_index); | 12431 | "bmask\n", fcf_index); |
12454 | 12432 | ||
12455 | return 0; | 12433 | return 0; |
@@ -12460,7 +12438,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12460 | * @phba: pointer to lpfc hba data structure. | 12438 | * @phba: pointer to lpfc hba data structure. |
12461 | * | 12439 | * |
12462 | * This routine clears the FCF record index from the eligible bmask for | 12440 | * This routine clears the FCF record index from the eligible bmask for |
12463 | * round robin failover search. It checks to make sure that the index | 12441 | * roundrobin failover search. It checks to make sure that the index |
12464 | * does not go beyond the range of the driver allocated bmask dimension | 12442 | * does not go beyond the range of the driver allocated bmask dimension |
12465 | * before clearing the bit. | 12443 | * before clearing the bit. |
12466 | **/ | 12444 | **/ |
@@ -12469,9 +12447,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12469 | { | 12447 | { |
12470 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 12448 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
12471 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 12449 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
12472 | "2762 HBA FCF index goes beyond driver's " | 12450 | "2762 FCF (x%x) reached driver's book " |
12473 | "book keeping dimension: fcf_index:%d, " | 12451 | "keeping dimension:x%x\n", |
12474 | "driver_bmask_max:%d\n", | ||
12475 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); | 12452 | fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); |
12476 | return; | 12453 | return; |
12477 | } | 12454 | } |
@@ -12479,7 +12456,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
12479 | clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); | 12456 | clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); |
12480 | 12457 | ||
12481 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12458 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12482 | "2791 Clear FCF index x%x from round robin failover " | 12459 | "2791 Clear FCF (x%x) from roundrobin failover " |
12483 | "bmask\n", fcf_index); | 12460 | "bmask\n", fcf_index); |
12484 | } | 12461 | } |
12485 | 12462 | ||
@@ -12530,8 +12507,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) | |||
12530 | } | 12507 | } |
12531 | } else { | 12508 | } else { |
12532 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, | 12509 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP, |
12533 | "2775 Start FCF rediscovery quiescent period " | 12510 | "2775 Start FCF rediscover quiescent timer\n"); |
12534 | "wait timer before scaning FCF table\n"); | ||
12535 | /* | 12511 | /* |
12536 | * Start FCF rediscovery wait timer for pending FCF | 12512 | * Start FCF rediscovery wait timer for pending FCF |
12537 | * before rescan FCF record table. | 12513 | * before rescan FCF record table. |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index a0ca572ec28b..98da223e19e0 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -23,6 +23,9 @@ | |||
23 | #define LPFC_GET_QE_REL_INT 32 | 23 | #define LPFC_GET_QE_REL_INT 32 |
24 | #define LPFC_RPI_LOW_WATER_MARK 10 | 24 | #define LPFC_RPI_LOW_WATER_MARK 10 |
25 | 25 | ||
26 | #define LPFC_UNREG_FCF 1 | ||
27 | #define LPFC_SKIP_UNREG_FCF 0 | ||
28 | |||
26 | /* Amount of time in seconds for waiting FCF rediscovery to complete */ | 29 | /* Amount of time in seconds for waiting FCF rediscovery to complete */ |
27 | #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ | 30 | #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ |
28 | 31 | ||
@@ -163,9 +166,8 @@ struct lpfc_fcf { | |||
163 | #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ | 166 | #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ |
164 | #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ | 167 | #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ |
165 | #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ | 168 | #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ |
166 | #define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ | 169 | #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) |
167 | uint32_t addr_mode; | 170 | uint32_t addr_mode; |
168 | uint16_t fcf_rr_init_indx; | ||
169 | uint32_t eligible_fcf_cnt; | 171 | uint32_t eligible_fcf_cnt; |
170 | struct lpfc_fcf_rec current_rec; | 172 | struct lpfc_fcf_rec current_rec; |
171 | struct lpfc_fcf_rec failover_rec; | 173 | struct lpfc_fcf_rec failover_rec; |