aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c296
1 files changed, 254 insertions, 42 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dc042bd97baa..1ee3e62c78a7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
272 if (!(vport->load_flag & FC_UNLOADING) && 272 if (!(vport->load_flag & FC_UNLOADING) &&
273 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { 275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277 }
278} 277}
279 278
280 279
@@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
566 int rc; 565 int rc;
567 566
568 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 567 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
568 if (!NLP_CHK_NODE_ACT(ndlp))
569 continue;
569 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 570 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
570 continue; 571 continue;
571
572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
573 ((vport->port_type == LPFC_NPIV_PORT) && 573 ((vport->port_type == LPFC_NPIV_PORT) &&
574 (ndlp->nlp_DID == NameServer_DID))) 574 (ndlp->nlp_DID == NameServer_DID)))
@@ -684,20 +684,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
684 struct lpfc_nodelist *ndlp; 684 struct lpfc_nodelist *ndlp;
685 685
686 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 686 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
687 if (!NLP_CHK_NODE_ACT(ndlp))
688 continue;
687 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 689 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
688 continue; 690 continue;
689
690 if (ndlp->nlp_type & NLP_FABRIC) { 691 if (ndlp->nlp_type & NLP_FABRIC) {
691 /* On Linkup its safe to clean up the ndlp 692 /* On Linkup its safe to clean up the ndlp
692 * from Fabric connections. 693 * from Fabric connections.
693 */ 694 */
694 if (ndlp->nlp_DID != Fabric_DID) 695 if (ndlp->nlp_DID != Fabric_DID)
695 lpfc_unreg_rpi(vport, ndlp); 696 lpfc_unreg_rpi(vport, ndlp);
696 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 697 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
697 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 698 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
698 /* Fail outstanding IO now since device is 699 /* Fail outstanding IO now since device is
699 * marked for PLOGI. 700 * marked for PLOGI.
700 */ 701 */
701 lpfc_unreg_rpi(vport, ndlp); 702 lpfc_unreg_rpi(vport, ndlp);
702 } 703 }
703 } 704 }
@@ -1305,7 +1306,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1305 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1306 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1306 kfree(mp); 1307 kfree(mp);
1307 mempool_free(pmb, phba->mbox_mem_pool); 1308 mempool_free(pmb, phba->mbox_mem_pool);
1308 lpfc_nlp_put(ndlp);
1309 1309
1310 if (phba->fc_topology == TOPOLOGY_LOOP) { 1310 if (phba->fc_topology == TOPOLOGY_LOOP) {
1311 /* FLOGI failed, use loop map to make discovery list */ 1311 /* FLOGI failed, use loop map to make discovery list */
@@ -1313,6 +1313,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1313 1313
1314 /* Start discovery */ 1314 /* Start discovery */
1315 lpfc_disc_start(vport); 1315 lpfc_disc_start(vport);
1316 /* Decrement the reference count to ndlp after the
1317 * reference to the ndlp are done.
1318 */
1319 lpfc_nlp_put(ndlp);
1316 return; 1320 return;
1317 } 1321 }
1318 1322
@@ -1320,6 +1324,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1320 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1324 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1321 "0258 Register Fabric login error: 0x%x\n", 1325 "0258 Register Fabric login error: 0x%x\n",
1322 mb->mbxStatus); 1326 mb->mbxStatus);
1327 /* Decrement the reference count to ndlp after the reference
1328 * to the ndlp are done.
1329 */
1330 lpfc_nlp_put(ndlp);
1323 return; 1331 return;
1324 } 1332 }
1325 1333
@@ -1327,8 +1335,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1327 ndlp->nlp_type |= NLP_FABRIC; 1335 ndlp->nlp_type |= NLP_FABRIC;
1328 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1336 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1329 1337
1330 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1331
1332 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1338 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1333 vports = lpfc_create_vport_work_array(phba); 1339 vports = lpfc_create_vport_work_array(phba);
1334 if (vports != NULL) 1340 if (vports != NULL)
@@ -1356,6 +1362,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1356 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1362 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1357 kfree(mp); 1363 kfree(mp);
1358 mempool_free(pmb, phba->mbox_mem_pool); 1364 mempool_free(pmb, phba->mbox_mem_pool);
1365
1366 /* Drop the reference count from the mbox at the end after
1367 * all the current reference to the ndlp have been done.
1368 */
1369 lpfc_nlp_put(ndlp);
1359 return; 1370 return;
1360} 1371}
1361 1372
@@ -1463,9 +1474,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1463 * registered the port. 1474 * registered the port.
1464 */ 1475 */
1465 if (ndlp->rport && ndlp->rport->dd_data && 1476 if (ndlp->rport && ndlp->rport->dd_data &&
1466 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { 1477 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
1467 lpfc_nlp_put(ndlp); 1478 lpfc_nlp_put(ndlp);
1468 }
1469 1479
1470 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 1480 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1471 "rport add: did:x%x flg:x%x type x%x", 1481 "rport add: did:x%x flg:x%x type x%x",
@@ -1660,6 +1670,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1660} 1670}
1661 1671
1662void 1672void
1673lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1674{
1675 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1676
1677 if (list_empty(&ndlp->nlp_listp)) {
1678 spin_lock_irq(shost->host_lock);
1679 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1680 spin_unlock_irq(shost->host_lock);
1681 }
1682}
1683
1684void
1663lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1685lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1664{ 1686{
1665 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -1672,7 +1694,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1672 list_del_init(&ndlp->nlp_listp); 1694 list_del_init(&ndlp->nlp_listp);
1673 spin_unlock_irq(shost->host_lock); 1695 spin_unlock_irq(shost->host_lock);
1674 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1696 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1675 NLP_STE_UNUSED_NODE); 1697 NLP_STE_UNUSED_NODE);
1698}
1699
1700void
1701lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1702{
1703 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1704 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1705 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1706 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1707 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1708 NLP_STE_UNUSED_NODE);
1709}
1710
1711struct lpfc_nodelist *
1712lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1713 int state)
1714{
1715 struct lpfc_hba *phba = vport->phba;
1716 uint32_t did;
1717 unsigned long flags;
1718
1719 if (!ndlp)
1720 return NULL;
1721
1722 spin_lock_irqsave(&phba->ndlp_lock, flags);
1723 /* The ndlp should not be in memory free mode */
1724 if (NLP_CHK_FREE_REQ(ndlp)) {
1725 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1726 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1727 "0277 lpfc_enable_node: ndlp:x%p "
1728 "usgmap:x%x refcnt:%d\n",
1729 (void *)ndlp, ndlp->nlp_usg_map,
1730 atomic_read(&ndlp->kref.refcount));
1731 return NULL;
1732 }
1733 /* The ndlp should not already be in active mode */
1734 if (NLP_CHK_NODE_ACT(ndlp)) {
1735 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1736 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1737 "0278 lpfc_enable_node: ndlp:x%p "
1738 "usgmap:x%x refcnt:%d\n",
1739 (void *)ndlp, ndlp->nlp_usg_map,
1740 atomic_read(&ndlp->kref.refcount));
1741 return NULL;
1742 }
1743
1744 /* Keep the original DID */
1745 did = ndlp->nlp_DID;
1746
1747 /* re-initialize ndlp except of ndlp linked list pointer */
1748 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
1749 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
1750 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
1751 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1752 init_timer(&ndlp->nlp_delayfunc);
1753 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1754 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1755 ndlp->nlp_DID = did;
1756 ndlp->vport = vport;
1757 ndlp->nlp_sid = NLP_NO_SID;
1758 /* ndlp management re-initialize */
1759 kref_init(&ndlp->kref);
1760 NLP_INT_NODE_ACT(ndlp);
1761
1762 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1763
1764 if (state != NLP_STE_UNUSED_NODE)
1765 lpfc_nlp_set_state(vport, ndlp, state);
1766
1767 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1768 "node enable: did:x%x",
1769 ndlp->nlp_DID, 0, 0);
1770 return ndlp;
1676} 1771}
1677 1772
1678void 1773void
@@ -1972,7 +2067,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1972 "Data: x%x x%x x%x\n", 2067 "Data: x%x x%x x%x\n",
1973 ndlp->nlp_DID, ndlp->nlp_flag, 2068 ndlp->nlp_DID, ndlp->nlp_flag,
1974 ndlp->nlp_state, ndlp->nlp_rpi); 2069 ndlp->nlp_state, ndlp->nlp_rpi);
1975 lpfc_dequeue_node(vport, ndlp); 2070 if (NLP_CHK_FREE_REQ(ndlp)) {
2071 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2072 "0280 lpfc_cleanup_node: ndlp:x%p "
2073 "usgmap:x%x refcnt:%d\n",
2074 (void *)ndlp, ndlp->nlp_usg_map,
2075 atomic_read(&ndlp->kref.refcount));
2076 lpfc_dequeue_node(vport, ndlp);
2077 } else {
2078 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2079 "0281 lpfc_cleanup_node: ndlp:x%p "
2080 "usgmap:x%x refcnt:%d\n",
2081 (void *)ndlp, ndlp->nlp_usg_map,
2082 atomic_read(&ndlp->kref.refcount));
2083 lpfc_disable_node(vport, ndlp);
2084 }
1976 2085
1977 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 2086 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1978 if ((mb = phba->sli.mbox_active)) { 2087 if ((mb = phba->sli.mbox_active)) {
@@ -1994,12 +2103,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1994 } 2103 }
1995 list_del(&mb->list); 2104 list_del(&mb->list);
1996 mempool_free(mb, phba->mbox_mem_pool); 2105 mempool_free(mb, phba->mbox_mem_pool);
1997 lpfc_nlp_put(ndlp); 2106 /* We shall not invoke the lpfc_nlp_put to decrement
2107 * the ndlp reference count as we are in the process
2108 * of lpfc_nlp_release.
2109 */
1998 } 2110 }
1999 } 2111 }
2000 spin_unlock_irq(&phba->hbalock); 2112 spin_unlock_irq(&phba->hbalock);
2001 2113
2002 lpfc_els_abort(phba,ndlp); 2114 lpfc_els_abort(phba, ndlp);
2115
2003 spin_lock_irq(shost->host_lock); 2116 spin_lock_irq(shost->host_lock);
2004 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2117 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2005 spin_unlock_irq(shost->host_lock); 2118 spin_unlock_irq(shost->host_lock);
@@ -2057,7 +2170,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2057 } 2170 }
2058 } 2171 }
2059 } 2172 }
2060
2061 lpfc_cleanup_node(vport, ndlp); 2173 lpfc_cleanup_node(vport, ndlp);
2062 2174
2063 /* 2175 /*
@@ -2182,7 +2294,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2182 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2294 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2183 spin_unlock_irq(shost->host_lock); 2295 spin_unlock_irq(shost->host_lock);
2184 return ndlp; 2296 return ndlp;
2297 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2298 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
2299 if (!ndlp)
2300 return NULL;
2301 spin_lock_irq(shost->host_lock);
2302 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2303 spin_unlock_irq(shost->host_lock);
2304 return ndlp;
2185 } 2305 }
2306
2186 if (vport->fc_flag & FC_RSCN_MODE) { 2307 if (vport->fc_flag & FC_RSCN_MODE) {
2187 if (lpfc_rscn_payload_check(vport, did)) { 2308 if (lpfc_rscn_payload_check(vport, did)) {
2188 /* If we've already recieved a PLOGI from this NPort 2309 /* If we've already recieved a PLOGI from this NPort
@@ -2485,6 +2606,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2485 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 2606 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2486 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2607 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2487 nlp_listp) { 2608 nlp_listp) {
2609 if (!NLP_CHK_NODE_ACT(ndlp))
2610 continue;
2488 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2611 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2489 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2612 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2490 lpfc_free_tx(phba, ndlp); 2613 lpfc_free_tx(phba, ndlp);
@@ -2572,6 +2695,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2572 /* Start discovery by sending FLOGI, clean up old rpis */ 2695 /* Start discovery by sending FLOGI, clean up old rpis */
2573 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2696 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2574 nlp_listp) { 2697 nlp_listp) {
2698 if (!NLP_CHK_NODE_ACT(ndlp))
2699 continue;
2575 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2700 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2576 continue; 2701 continue;
2577 if (ndlp->nlp_type & NLP_FABRIC) { 2702 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -2618,7 +2743,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2618 "NameServer login\n"); 2743 "NameServer login\n");
2619 /* Next look for NameServer ndlp */ 2744 /* Next look for NameServer ndlp */
2620 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2745 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2621 if (ndlp) 2746 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2622 lpfc_els_abort(phba, ndlp); 2747 lpfc_els_abort(phba, ndlp);
2623 2748
2624 /* ReStart discovery */ 2749 /* ReStart discovery */
@@ -2897,6 +3022,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2897 ndlp->nlp_sid = NLP_NO_SID; 3022 ndlp->nlp_sid = NLP_NO_SID;
2898 INIT_LIST_HEAD(&ndlp->nlp_listp); 3023 INIT_LIST_HEAD(&ndlp->nlp_listp);
2899 kref_init(&ndlp->kref); 3024 kref_init(&ndlp->kref);
3025 NLP_INT_NODE_ACT(ndlp);
2900 3026
2901 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3027 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2902 "node init: did:x%x", 3028 "node init: did:x%x",
@@ -2911,6 +3037,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2911static void 3037static void
2912lpfc_nlp_release(struct kref *kref) 3038lpfc_nlp_release(struct kref *kref)
2913{ 3039{
3040 struct lpfc_hba *phba;
3041 unsigned long flags;
2914 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 3042 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2915 kref); 3043 kref);
2916 3044
@@ -2918,8 +3046,24 @@ lpfc_nlp_release(struct kref *kref)
2918 "node release: did:x%x flg:x%x type:x%x", 3046 "node release: did:x%x flg:x%x type:x%x",
2919 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3047 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2920 3048
3049 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3050 "0279 lpfc_nlp_release: ndlp:x%p "
3051 "usgmap:x%x refcnt:%d\n",
3052 (void *)ndlp, ndlp->nlp_usg_map,
3053 atomic_read(&ndlp->kref.refcount));
3054
3055 /* remove ndlp from action. */
2921 lpfc_nlp_remove(ndlp->vport, ndlp); 3056 lpfc_nlp_remove(ndlp->vport, ndlp);
2922 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3057
3058 /* clear the ndlp active flag for all release cases */
3059 phba = ndlp->vport->phba;
3060 spin_lock_irqsave(&phba->ndlp_lock, flags);
3061 NLP_CLR_NODE_ACT(ndlp);
3062 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3063
3064 /* free ndlp memory for final ndlp release */
3065 if (NLP_CHK_FREE_REQ(ndlp))
3066 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2923} 3067}
2924 3068
2925/* This routine bumps the reference count for a ndlp structure to ensure 3069/* This routine bumps the reference count for a ndlp structure to ensure
@@ -2929,37 +3073,108 @@ lpfc_nlp_release(struct kref *kref)
2929struct lpfc_nodelist * 3073struct lpfc_nodelist *
2930lpfc_nlp_get(struct lpfc_nodelist *ndlp) 3074lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2931{ 3075{
3076 struct lpfc_hba *phba;
3077 unsigned long flags;
3078
2932 if (ndlp) { 3079 if (ndlp) {
2933 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3080 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2934 "node get: did:x%x flg:x%x refcnt:x%x", 3081 "node get: did:x%x flg:x%x refcnt:x%x",
2935 ndlp->nlp_DID, ndlp->nlp_flag, 3082 ndlp->nlp_DID, ndlp->nlp_flag,
2936 atomic_read(&ndlp->kref.refcount)); 3083 atomic_read(&ndlp->kref.refcount));
2937 kref_get(&ndlp->kref); 3084 /* The check of ndlp usage to prevent incrementing the
3085 * ndlp reference count that is in the process of being
3086 * released.
3087 */
3088 phba = ndlp->vport->phba;
3089 spin_lock_irqsave(&phba->ndlp_lock, flags);
3090 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
3091 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3092 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3093 "0276 lpfc_nlp_get: ndlp:x%p "
3094 "usgmap:x%x refcnt:%d\n",
3095 (void *)ndlp, ndlp->nlp_usg_map,
3096 atomic_read(&ndlp->kref.refcount));
3097 return NULL;
3098 } else
3099 kref_get(&ndlp->kref);
3100 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2938 } 3101 }
2939 return ndlp; 3102 return ndlp;
2940} 3103}
2941 3104
2942
2943/* This routine decrements the reference count for a ndlp structure. If the 3105/* This routine decrements the reference count for a ndlp structure. If the
2944 * count goes to 0, this indicates the the associated nodelist should be freed. 3106 * count goes to 0, this indicates the the associated nodelist should be
3107 * freed. Returning 1 indicates the ndlp resource has been released; on the
3108 * other hand, returning 0 indicates the ndlp resource has not been released
3109 * yet.
2945 */ 3110 */
2946int 3111int
2947lpfc_nlp_put(struct lpfc_nodelist *ndlp) 3112lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2948{ 3113{
2949 if (ndlp) { 3114 struct lpfc_hba *phba;
2950 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3115 unsigned long flags;
2951 "node put: did:x%x flg:x%x refcnt:x%x", 3116
2952 ndlp->nlp_DID, ndlp->nlp_flag, 3117 if (!ndlp)
2953 atomic_read(&ndlp->kref.refcount)); 3118 return 1;
3119
3120 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
3121 "node put: did:x%x flg:x%x refcnt:x%x",
3122 ndlp->nlp_DID, ndlp->nlp_flag,
3123 atomic_read(&ndlp->kref.refcount));
3124 phba = ndlp->vport->phba;
3125 spin_lock_irqsave(&phba->ndlp_lock, flags);
3126 /* Check the ndlp memory free acknowledge flag to avoid the
3127 * possible race condition that kref_put got invoked again
3128 * after previous one has done ndlp memory free.
3129 */
3130 if (NLP_CHK_FREE_ACK(ndlp)) {
3131 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3132 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3133 "0274 lpfc_nlp_put: ndlp:x%p "
3134 "usgmap:x%x refcnt:%d\n",
3135 (void *)ndlp, ndlp->nlp_usg_map,
3136 atomic_read(&ndlp->kref.refcount));
3137 return 1;
3138 }
3139 /* Check the ndlp inactivate log flag to avoid the possible
3140 * race condition that kref_put got invoked again after ndlp
3141 * is already in inactivating state.
3142 */
3143 if (NLP_CHK_IACT_REQ(ndlp)) {
3144 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3145 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3146 "0275 lpfc_nlp_put: ndlp:x%p "
3147 "usgmap:x%x refcnt:%d\n",
3148 (void *)ndlp, ndlp->nlp_usg_map,
3149 atomic_read(&ndlp->kref.refcount));
3150 return 1;
2954 } 3151 }
2955 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 3152 /* For last put, mark the ndlp usage flags to make sure no
3153 * other kref_get and kref_put on the same ndlp shall get
3154 * in between the process when the final kref_put has been
3155 * invoked on this ndlp.
3156 */
3157 if (atomic_read(&ndlp->kref.refcount) == 1) {
3158 /* Indicate ndlp is put to inactive state. */
3159 NLP_SET_IACT_REQ(ndlp);
3160 /* Acknowledge ndlp memory free has been seen. */
3161 if (NLP_CHK_FREE_REQ(ndlp))
3162 NLP_SET_FREE_ACK(ndlp);
3163 }
3164 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3165 /* Note, the kref_put returns 1 when decrementing a reference
3166 * count that was 1, it invokes the release callback function,
3167 * but it still left the reference count as 1 (not actually
3168 * performs the last decrementation). Otherwise, it actually
3169 * decrements the reference count and returns 0.
3170 */
3171 return kref_put(&ndlp->kref, lpfc_nlp_release);
2956} 3172}
2957 3173
2958/* This routine free's the specified nodelist if it is not in use 3174/* This routine free's the specified nodelist if it is not in use
2959 * by any other discovery thread. This routine returns 1 if the ndlp 3175 * by any other discovery thread. This routine returns 1 if the
2960 * is not being used by anyone and has been freed. A return value of 3176 * ndlp has been freed. A return value of 0 indicates the ndlp is
2961 * 0 indicates it is being used by another discovery thread and the 3177 * not yet been released.
2962 * refcount is left unchanged.
2963 */ 3178 */
2964int 3179int
2965lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 3180lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
@@ -2968,11 +3183,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2968 "node not used: did:x%x flg:x%x refcnt:x%x", 3183 "node not used: did:x%x flg:x%x refcnt:x%x",
2969 ndlp->nlp_DID, ndlp->nlp_flag, 3184 ndlp->nlp_DID, ndlp->nlp_flag,
2970 atomic_read(&ndlp->kref.refcount)); 3185 atomic_read(&ndlp->kref.refcount));
2971 3186 if (atomic_read(&ndlp->kref.refcount) == 1)
2972 if (atomic_read(&ndlp->kref.refcount) == 1) { 3187 if (lpfc_nlp_put(ndlp))
2973 lpfc_nlp_put(ndlp); 3188 return 1;
2974 return 1;
2975 }
2976 return 0; 3189 return 0;
2977} 3190}
2978