aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c220
1 files changed, 132 insertions, 88 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a1f751e79405..5c396171ebe8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -283,16 +283,18 @@ lpfc_linkdown(struct lpfc_hba * phba)
283{ 283{
284 struct lpfc_sli *psli; 284 struct lpfc_sli *psli;
285 struct lpfc_nodelist *ndlp, *next_ndlp; 285 struct lpfc_nodelist *ndlp, *next_ndlp;
286 struct list_head *listp; 286 struct list_head *listp, *node_list[7];
287 struct list_head *node_list[7];
288 LPFC_MBOXQ_t *mb; 287 LPFC_MBOXQ_t *mb;
289 int rc, i; 288 int rc, i;
290 289
291 psli = &phba->sli; 290 psli = &phba->sli;
292 291
293 spin_lock_irq(phba->host->host_lock); 292 /* sysfs or selective reset may call this routine to clean up */
294 phba->hba_state = LPFC_LINK_DOWN; 293 if (phba->hba_state > LPFC_LINK_DOWN) {
295 spin_unlock_irq(phba->host->host_lock); 294 spin_lock_irq(phba->host->host_lock);
295 phba->hba_state = LPFC_LINK_DOWN;
296 spin_unlock_irq(phba->host->host_lock);
297 }
296 298
297 /* Clean up any firmware default rpi's */ 299 /* Clean up any firmware default rpi's */
298 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 300 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
@@ -324,32 +326,20 @@ lpfc_linkdown(struct lpfc_hba * phba)
324 continue; 326 continue;
325 327
326 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) { 328 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
327 /* Fabric nodes are not handled thru state machine for 329
328 link down */ 330 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
329 if (ndlp->nlp_type & NLP_FABRIC) { 331 NLP_EVT_DEVICE_RECOVERY);
330 /* Remove ALL Fabric nodes except Fabric_DID */ 332
331 if (ndlp->nlp_DID != Fabric_DID) { 333 /* Check config parameter use-adisc or FCP-2 */
332 /* Take it off current list and free */ 334 if ((rc != NLP_STE_FREED_NODE) &&
333 lpfc_nlp_list(phba, ndlp, 335 (phba->cfg_use_adisc == 0) &&
334 NLP_NO_LIST); 336 !(ndlp->nlp_fcp_info &
335 } 337 NLP_FCP_2_DEVICE)) {
336 } 338 /* We know we will have to relogin, so
337 else { 339 * unreglogin the rpi right now to fail
338 340 * any outstanding I/Os quickly.
339 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 341 */
340 NLP_EVT_DEVICE_RECOVERY); 342 lpfc_unreg_rpi(phba, ndlp);
341
342 /* Check config parameter use-adisc or FCP-2 */
343 if ((rc != NLP_STE_FREED_NODE) &&
344 (phba->cfg_use_adisc == 0) &&
345 !(ndlp->nlp_fcp_info &
346 NLP_FCP_2_DEVICE)) {
347 /* We know we will have to relogin, so
348 * unreglogin the rpi right now to fail
349 * any outstanding I/Os quickly.
350 */
351 lpfc_unreg_rpi(phba, ndlp);
352 }
353 } 343 }
354 } 344 }
355 } 345 }
@@ -391,6 +381,8 @@ static int
391lpfc_linkup(struct lpfc_hba * phba) 381lpfc_linkup(struct lpfc_hba * phba)
392{ 382{
393 struct lpfc_nodelist *ndlp, *next_ndlp; 383 struct lpfc_nodelist *ndlp, *next_ndlp;
384 struct list_head *listp, *node_list[7];
385 int i;
394 386
395 spin_lock_irq(phba->host->host_lock); 387 spin_lock_irq(phba->host->host_lock);
396 phba->hba_state = LPFC_LINK_UP; 388 phba->hba_state = LPFC_LINK_UP;
@@ -401,14 +393,33 @@ lpfc_linkup(struct lpfc_hba * phba)
401 spin_unlock_irq(phba->host->host_lock); 393 spin_unlock_irq(phba->host->host_lock);
402 394
403 395
404 /* 396 node_list[0] = &phba->fc_plogi_list;
405 * Clean up old Fabric NLP_FABRIC logins. 397 node_list[1] = &phba->fc_adisc_list;
406 */ 398 node_list[2] = &phba->fc_reglogin_list;
407 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 399 node_list[3] = &phba->fc_prli_list;
408 nlp_listp) { 400 node_list[4] = &phba->fc_nlpunmap_list;
409 if (ndlp->nlp_DID == Fabric_DID) { 401 node_list[5] = &phba->fc_nlpmap_list;
410 /* Take it off current list and free */ 402 node_list[6] = &phba->fc_npr_list;
411 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 403 for (i = 0; i < 7; i++) {
404 listp = node_list[i];
405 if (list_empty(listp))
406 continue;
407
408 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
409 if (phba->fc_flag & FC_LBIT) {
410 if (ndlp->nlp_type & NLP_FABRIC) {
411 /* On Linkup its safe to clean up the
412 * ndlp from Fabric connections.
413 */
414 lpfc_nlp_list(phba, ndlp,
415 NLP_UNUSED_LIST);
416 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
417 /* Fail outstanding IO now since device
418 * is marked for PLOGI.
419 */
420 lpfc_unreg_rpi(phba, ndlp);
421 }
422 }
412 } 423 }
413 } 424 }
414 425
@@ -784,6 +795,13 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
784 795
785 memcpy(&phba->alpa_map[0], mp->virt, 128); 796 memcpy(&phba->alpa_map[0], mp->virt, 128);
786 797
798 spin_lock_irq(phba->host->host_lock);
799 if (la->pb)
800 phba->fc_flag |= FC_BYPASSED_MODE;
801 else
802 phba->fc_flag &= ~FC_BYPASSED_MODE;
803 spin_unlock_irq(phba->host->host_lock);
804
787 if (((phba->fc_eventTag + 1) < la->eventTag) || 805 if (((phba->fc_eventTag + 1) < la->eventTag) ||
788 (phba->fc_eventTag == la->eventTag)) { 806 (phba->fc_eventTag == la->eventTag)) {
789 phba->fc_stat.LinkMultiEvent++; 807 phba->fc_stat.LinkMultiEvent++;
@@ -904,32 +922,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
904 */ 922 */
905 lpfc_issue_els_scr(phba, SCR_DID, 0); 923 lpfc_issue_els_scr(phba, SCR_DID, 0);
906 924
907 /* Allocate a new node instance. If the pool is empty, just 925 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
908 * start the discovery process and skip the Nameserver login 926 if (!ndlp) {
909 * process. This is attempted again later on. Otherwise, issue 927 /* Allocate a new node instance. If the pool is empty,
910 * a Port Login (PLOGI) to the NameServer 928 * start the discovery process and skip the Nameserver
911 */ 929 * login process. This is attempted again later on.
912 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL)) 930 * Otherwise, issue a Port Login (PLOGI) to NameServer.
913 == 0) { 931 */
914 lpfc_disc_start(phba); 932 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
915 } else { 933 if (!ndlp) {
916 lpfc_nlp_init(phba, ndlp, NameServer_DID); 934 lpfc_disc_start(phba);
917 ndlp->nlp_type |= NLP_FABRIC; 935 lpfc_mbuf_free(phba, mp->virt, mp->phys);
918 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 936 kfree(mp);
919 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST); 937 mempool_free( pmb, phba->mbox_mem_pool);
920 lpfc_issue_els_plogi(phba, ndlp, 0); 938 return;
921 if (phba->cfg_fdmi_on) { 939 } else {
922 if ((ndlp_fdmi = mempool_alloc( 940 lpfc_nlp_init(phba, ndlp, NameServer_DID);
923 phba->nlp_mem_pool, 941 ndlp->nlp_type |= NLP_FABRIC;
924 GFP_KERNEL))) { 942 }
925 lpfc_nlp_init(phba, ndlp_fdmi, 943 }
926 FDMI_DID); 944 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
927 ndlp_fdmi->nlp_type |= NLP_FABRIC; 945 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
928 ndlp_fdmi->nlp_state = 946 lpfc_issue_els_plogi(phba, ndlp, 0);
929 NLP_STE_PLOGI_ISSUE; 947 if (phba->cfg_fdmi_on) {
930 lpfc_issue_els_plogi(phba, ndlp_fdmi, 948 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
931 0); 949 GFP_KERNEL);
932 } 950 if (ndlp_fdmi) {
951 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
952 ndlp_fdmi->nlp_type |= NLP_FABRIC;
953 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
954 lpfc_issue_els_plogi(phba, ndlp_fdmi, 0);
933 } 955 }
934 } 956 }
935 } 957 }
@@ -937,7 +959,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
937 lpfc_mbuf_free(phba, mp->virt, mp->phys); 959 lpfc_mbuf_free(phba, mp->virt, mp->phys);
938 kfree(mp); 960 kfree(mp);
939 mempool_free( pmb, phba->mbox_mem_pool); 961 mempool_free( pmb, phba->mbox_mem_pool);
940
941 return; 962 return;
942} 963}
943 964
@@ -1241,16 +1262,9 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1241 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); 1262 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1242 phba->fc_npr_cnt++; 1263 phba->fc_npr_cnt++;
1243 1264
1244 /* 1265 if (!(nlp->nlp_flag & NLP_NODEV_TMO)) {
1245 * Sanity check for Fabric entity.
1246 * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1247 */
1248 if (nlp->nlp_type & NLP_FABRIC) {
1249 mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
1250 }
1251 else {
1252 mod_timer(&nlp->nlp_tmofunc, 1266 mod_timer(&nlp->nlp_tmofunc,
1253 jiffies + HZ * phba->cfg_nodev_tmo); 1267 jiffies + HZ * phba->cfg_nodev_tmo);
1254 } 1268 }
1255 spin_lock_irq(phba->host->host_lock); 1269 spin_lock_irq(phba->host->host_lock);
1256 nlp->nlp_flag |= NLP_NODEV_TMO; 1270 nlp->nlp_flag |= NLP_NODEV_TMO;
@@ -1314,7 +1328,15 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
1314{ 1328{
1315 uint32_t tmo; 1329 uint32_t tmo;
1316 1330
1317 tmo = ((phba->fc_ratov * 2) + 1); 1331 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1332 /* For FAN, timeout should be greater then edtov */
1333 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1334 } else {
1335 /* Normal discovery timeout should be > then ELS/CT timeout
1336 * FC spec states we need 3 * ratov for CT requests
1337 */
1338 tmo = ((phba->fc_ratov * 3) + 3);
1339 }
1318 1340
1319 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); 1341 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1320 spin_lock_irq(phba->host->host_lock); 1342 spin_lock_irq(phba->host->host_lock);
@@ -1846,8 +1868,9 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1846 struct lpfc_nodelist *ndlp; 1868 struct lpfc_nodelist *ndlp;
1847 uint32_t flg; 1869 uint32_t flg;
1848 1870
1849 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) { 1871 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1850 if ((phba->hba_state == LPFC_HBA_READY) && 1872 if (!ndlp) {
1873 if ((phba->fc_flag & FC_RSCN_MODE) &&
1851 ((lpfc_rscn_payload_check(phba, did) == 0))) 1874 ((lpfc_rscn_payload_check(phba, did) == 0)))
1852 return NULL; 1875 return NULL;
1853 ndlp = (struct lpfc_nodelist *) 1876 ndlp = (struct lpfc_nodelist *)
@@ -1860,10 +1883,23 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1860 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1883 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1861 return ndlp; 1884 return ndlp;
1862 } 1885 }
1863 if ((phba->hba_state == LPFC_HBA_READY) && 1886 if (phba->fc_flag & FC_RSCN_MODE) {
1864 (phba->fc_flag & FC_RSCN_MODE)) {
1865 if (lpfc_rscn_payload_check(phba, did)) { 1887 if (lpfc_rscn_payload_check(phba, did)) {
1866 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1888 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1889
1890 /* Since this node is marked for discovery,
1891 * delay timeout is not needed.
1892 */
1893 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1894 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1895 spin_unlock_irq(phba->host->host_lock);
1896 del_timer_sync(&ndlp->nlp_delayfunc);
1897 spin_lock_irq(phba->host->host_lock);
1898 if (!list_empty(&ndlp->els_retry_evt.
1899 evt_listp))
1900 list_del_init(&ndlp->els_retry_evt.
1901 evt_listp);
1902 }
1867 } 1903 }
1868 else { 1904 else {
1869 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1905 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
@@ -1872,10 +1908,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1872 } 1908 }
1873 else { 1909 else {
1874 flg = ndlp->nlp_flag & NLP_LIST_MASK; 1910 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1875 if ((flg == NLP_ADISC_LIST) || 1911 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
1876 (flg == NLP_PLOGI_LIST)) {
1877 return NULL; 1912 return NULL;
1878 }
1879 ndlp->nlp_state = NLP_STE_NPR_NODE; 1913 ndlp->nlp_state = NLP_STE_NPR_NODE;
1880 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1914 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1881 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1915 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2174,7 +2208,7 @@ static void
2174lpfc_disc_timeout_handler(struct lpfc_hba *phba) 2208lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2175{ 2209{
2176 struct lpfc_sli *psli; 2210 struct lpfc_sli *psli;
2177 struct lpfc_nodelist *ndlp; 2211 struct lpfc_nodelist *ndlp, *next_ndlp;
2178 LPFC_MBOXQ_t *clearlambox, *initlinkmbox; 2212 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2179 int rc, clrlaerr = 0; 2213 int rc, clrlaerr = 0;
2180 2214
@@ -2201,10 +2235,20 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2201 "%d:0221 FAN timeout\n", 2235 "%d:0221 FAN timeout\n",
2202 phba->brd_no); 2236 phba->brd_no);
2203 2237
2204 /* Forget about FAN, Start discovery by sending a FLOGI 2238 /* Start discovery by sending FLOGI, clean up old rpis */
2205 * hba_state is identically LPFC_FLOGI while waiting for FLOGI 2239 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2206 * cmpl 2240 nlp_listp) {
2207 */ 2241 if (ndlp->nlp_type & NLP_FABRIC) {
2242 /* Clean up the ndlp on Fabric connections */
2243 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
2244 }
2245 else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2246 /* Fail outstanding IO now since device
2247 * is marked for PLOGI.
2248 */
2249 lpfc_unreg_rpi(phba, ndlp);
2250 }
2251 }
2208 phba->hba_state = LPFC_FLOGI; 2252 phba->hba_state = LPFC_FLOGI;
2209 lpfc_set_disctmo(phba); 2253 lpfc_set_disctmo(phba);
2210 lpfc_initial_flogi(phba); 2254 lpfc_initial_flogi(phba);