diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-13 19:23:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-13 19:23:44 -0500 |
commit | e760e716d47b48caf98da348368fd41b4a9b9e7e (patch) | |
tree | 92d401fdbc618a4bdf4afe7ae5ee509e09dda0e6 | |
parent | b2e3e658b344c6bcfb8fb694100ab2f2b5b2edb0 (diff) | |
parent | 99109301d103fbf0de43fc5a580a406c12a501e0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
[SCSI] gdth: update deprecated pci_find_device
[SCSI] gdth: scan for scsi devices
[SCSI] sym53c416: fix module parameters
[SCSI] lpfc 8.2.5 : Update lpfc driver version to 8.2.5
[SCSI] lpfc 8.2.5 : Fix buffer leaks
[SCSI] lpfc 8.2.5 : Miscellaneous discovery Fixes
[SCSI] lpfc 8.2.5 : Add MSI-X single message support
[SCSI] lpfc 8.2.5 : Miscellaneous Fixes
[SCSI] lpfc 8.2.5 : Correct ndlp referencing issues
[SCSI] update SG_ALL to avoid causing chaining
[SCSI] aic94xx: fix ABORT_TASK define conflict
[SCSI] fas216: Use scsi_eh API for REQUEST_SENSE invocation
[SCSI] ses: fix memory leaks
[SCSI] aacraid: informational sysfs value corrections
[SCSI] mpt fusion: Request I/O resources only when required
[SCSI] aacraid: ignore adapter reset check polarity
[SCSI] aacraid: add optional MSI support
[SCSI] mpt fusion: Avoid racing when mptsas and mptcl module are loaded in parallel
[SCSI] MegaRAID driver management char device moved to misc
[SCSI] advansys: fix overrun_buf aligned bug
36 files changed, 1232 insertions, 370 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 425f60c21fdd..bfda731696f7 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -1470,9 +1470,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1470 | if (mpt_debug_level) | 1470 | if (mpt_debug_level) |
1471 | printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); | 1471 | printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); |
1472 | 1472 | ||
1473 | if (pci_enable_device(pdev)) | ||
1474 | return r; | ||
1475 | |||
1476 | ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); | 1473 | ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); |
1477 | if (ioc == NULL) { | 1474 | if (ioc == NULL) { |
1478 | printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); | 1475 | printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); |
@@ -1482,6 +1479,20 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1482 | ioc->id = mpt_ids++; | 1479 | ioc->id = mpt_ids++; |
1483 | sprintf(ioc->name, "ioc%d", ioc->id); | 1480 | sprintf(ioc->name, "ioc%d", ioc->id); |
1484 | 1481 | ||
1482 | ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1483 | if (pci_enable_device_mem(pdev)) { | ||
1484 | kfree(ioc); | ||
1485 | printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " | ||
1486 | "failed\n", ioc->name); | ||
1487 | return r; | ||
1488 | } | ||
1489 | if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { | ||
1490 | kfree(ioc); | ||
1491 | printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " | ||
1492 | "MEM failed\n", ioc->name); | ||
1493 | return r; | ||
1494 | } | ||
1495 | |||
1485 | dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); | 1496 | dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); |
1486 | 1497 | ||
1487 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | 1498 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { |
@@ -1658,6 +1669,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1658 | ioc->active = 0; | 1669 | ioc->active = 0; |
1659 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); | 1670 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); |
1660 | 1671 | ||
1672 | /* Set IOC ptr in the pcidev's driver data. */ | ||
1673 | pci_set_drvdata(ioc->pcidev, ioc); | ||
1674 | |||
1661 | /* Set lookup ptr. */ | 1675 | /* Set lookup ptr. */ |
1662 | list_add_tail(&ioc->list, &ioc_list); | 1676 | list_add_tail(&ioc->list, &ioc_list); |
1663 | 1677 | ||
@@ -1791,6 +1805,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state) | |||
1791 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); | 1805 | CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); |
1792 | 1806 | ||
1793 | pci_disable_device(pdev); | 1807 | pci_disable_device(pdev); |
1808 | pci_release_selected_regions(pdev, ioc->bars); | ||
1794 | pci_set_power_state(pdev, device_state); | 1809 | pci_set_power_state(pdev, device_state); |
1795 | 1810 | ||
1796 | return 0; | 1811 | return 0; |
@@ -1807,7 +1822,6 @@ mpt_resume(struct pci_dev *pdev) | |||
1807 | MPT_ADAPTER *ioc = pci_get_drvdata(pdev); | 1822 | MPT_ADAPTER *ioc = pci_get_drvdata(pdev); |
1808 | u32 device_state = pdev->current_state; | 1823 | u32 device_state = pdev->current_state; |
1809 | int recovery_state; | 1824 | int recovery_state; |
1810 | int err; | ||
1811 | 1825 | ||
1812 | printk(MYIOC_s_INFO_FMT | 1826 | printk(MYIOC_s_INFO_FMT |
1813 | "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", | 1827 | "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", |
@@ -1815,9 +1829,18 @@ mpt_resume(struct pci_dev *pdev) | |||
1815 | 1829 | ||
1816 | pci_set_power_state(pdev, 0); | 1830 | pci_set_power_state(pdev, 0); |
1817 | pci_restore_state(pdev); | 1831 | pci_restore_state(pdev); |
1818 | err = pci_enable_device(pdev); | 1832 | if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { |
1819 | if (err) | 1833 | ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | |
1820 | return err; | 1834 | IORESOURCE_IO); |
1835 | if (pci_enable_device(pdev)) | ||
1836 | return 0; | ||
1837 | } else { | ||
1838 | ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1839 | if (pci_enable_device_mem(pdev)) | ||
1840 | return 0; | ||
1841 | } | ||
1842 | if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) | ||
1843 | return 0; | ||
1821 | 1844 | ||
1822 | /* enable interrupts */ | 1845 | /* enable interrupts */ |
1823 | CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); | 1846 | CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); |
@@ -1878,6 +1901,7 @@ mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase) | |||
1878 | * -2 if READY but IOCFacts Failed | 1901 | * -2 if READY but IOCFacts Failed |
1879 | * -3 if READY but PrimeIOCFifos Failed | 1902 | * -3 if READY but PrimeIOCFifos Failed |
1880 | * -4 if READY but IOCInit Failed | 1903 | * -4 if READY but IOCInit Failed |
1904 | * -5 if failed to enable_device and/or request_selected_regions | ||
1881 | */ | 1905 | */ |
1882 | static int | 1906 | static int |
1883 | mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | 1907 | mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) |
@@ -1976,6 +2000,18 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | |||
1976 | } | 2000 | } |
1977 | } | 2001 | } |
1978 | 2002 | ||
2003 | if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) && | ||
2004 | (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) { | ||
2005 | pci_release_selected_regions(ioc->pcidev, ioc->bars); | ||
2006 | ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | | ||
2007 | IORESOURCE_IO); | ||
2008 | if (pci_enable_device(ioc->pcidev)) | ||
2009 | return -5; | ||
2010 | if (pci_request_selected_regions(ioc->pcidev, ioc->bars, | ||
2011 | "mpt")) | ||
2012 | return -5; | ||
2013 | } | ||
2014 | |||
1979 | /* | 2015 | /* |
1980 | * Device is reset now. It must have de-asserted the interrupt line | 2016 | * Device is reset now. It must have de-asserted the interrupt line |
1981 | * (if it was asserted) and it should be safe to register for the | 2017 | * (if it was asserted) and it should be safe to register for the |
@@ -1999,7 +2035,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) | |||
1999 | irq_allocated = 1; | 2035 | irq_allocated = 1; |
2000 | ioc->pci_irq = ioc->pcidev->irq; | 2036 | ioc->pci_irq = ioc->pcidev->irq; |
2001 | pci_set_master(ioc->pcidev); /* ?? */ | 2037 | pci_set_master(ioc->pcidev); /* ?? */ |
2002 | pci_set_drvdata(ioc->pcidev, ioc); | ||
2003 | dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " | 2038 | dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " |
2004 | "%d\n", ioc->name, ioc->pcidev->irq)); | 2039 | "%d\n", ioc->name, ioc->pcidev->irq)); |
2005 | } | 2040 | } |
@@ -2381,6 +2416,9 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc) | |||
2381 | ioc->memmap = NULL; | 2416 | ioc->memmap = NULL; |
2382 | } | 2417 | } |
2383 | 2418 | ||
2419 | pci_disable_device(ioc->pcidev); | ||
2420 | pci_release_selected_regions(ioc->pcidev, ioc->bars); | ||
2421 | |||
2384 | #if defined(CONFIG_MTRR) && 0 | 2422 | #if defined(CONFIG_MTRR) && 0 |
2385 | if (ioc->mtrr_reg > 0) { | 2423 | if (ioc->mtrr_reg > 0) { |
2386 | mtrr_del(ioc->mtrr_reg, 0, 0); | 2424 | mtrr_del(ioc->mtrr_reg, 0, 0); |
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index b49b706c0020..d83ea96fe135 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -629,6 +629,7 @@ typedef struct _MPT_ADAPTER | |||
629 | dma_addr_t HostPageBuffer_dma; | 629 | dma_addr_t HostPageBuffer_dma; |
630 | int mtrr_reg; | 630 | int mtrr_reg; |
631 | struct pci_dev *pcidev; /* struct pci_dev pointer */ | 631 | struct pci_dev *pcidev; /* struct pci_dev pointer */ |
632 | int bars; /* bitmask of BAR's that must be configured */ | ||
632 | u8 __iomem *memmap; /* mmap address */ | 633 | u8 __iomem *memmap; /* mmap address */ |
633 | struct Scsi_Host *sh; /* Scsi Host pointer */ | 634 | struct Scsi_Host *sh; /* Scsi Host pointer */ |
634 | SpiCfgData spi_data; /* Scsi config. data */ | 635 | SpiCfgData spi_data; /* Scsi config. data */ |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a5f0aaaf0dd4..a7a0813b24cb 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -722,7 +722,7 @@ config SCSI_FD_MCS | |||
722 | 722 | ||
723 | config SCSI_GDTH | 723 | config SCSI_GDTH |
724 | tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" | 724 | tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" |
725 | depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY | 725 | depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API |
726 | ---help--- | 726 | ---help--- |
727 | Formerly called GDT SCSI Disk Array Controller Support. | 727 | Formerly called GDT SCSI Disk Array Controller Support. |
728 | 728 | ||
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index bfd0e64964ac..c05092fd3a9d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -144,51 +144,77 @@ static char *aac_get_status_string(u32 status); | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | static int nondasd = -1; | 146 | static int nondasd = -1; |
147 | static int aac_cache = 0; | 147 | static int aac_cache; |
148 | static int dacmode = -1; | 148 | static int dacmode = -1; |
149 | 149 | int aac_msi; | |
150 | int aac_commit = -1; | 150 | int aac_commit = -1; |
151 | int startup_timeout = 180; | 151 | int startup_timeout = 180; |
152 | int aif_timeout = 120; | 152 | int aif_timeout = 120; |
153 | 153 | ||
154 | module_param(nondasd, int, S_IRUGO|S_IWUSR); | 154 | module_param(nondasd, int, S_IRUGO|S_IWUSR); |
155 | MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); | 155 | MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." |
156 | " 0=off, 1=on"); | ||
156 | module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); | 157 | module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); |
157 | MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache"); | 158 | MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" |
159 | "\tbit 0 - Disable FUA in WRITE SCSI commands\n" | ||
160 | "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" | ||
161 | "\tbit 2 - Disable only if Battery not protecting Cache"); | ||
158 | module_param(dacmode, int, S_IRUGO|S_IWUSR); | 162 | module_param(dacmode, int, S_IRUGO|S_IWUSR); |
159 | MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); | 163 | MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." |
164 | " 0=off, 1=on"); | ||
160 | module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); | 165 | module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); |
161 | MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); | 166 | MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" |
167 | " adapter for foreign arrays.\n" | ||
168 | "This is typically needed in systems that do not have a BIOS." | ||
169 | " 0=off, 1=on"); | ||
170 | module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); | ||
171 | MODULE_PARM_DESC(msi, "IRQ handling." | ||
172 | " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)"); | ||
162 | module_param(startup_timeout, int, S_IRUGO|S_IWUSR); | 173 | module_param(startup_timeout, int, S_IRUGO|S_IWUSR); |
163 | MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS."); | 174 | MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" |
175 | " adapter to have it's kernel up and\n" | ||
176 | "running. This is typically adjusted for large systems that do not" | ||
177 | " have a BIOS."); | ||
164 | module_param(aif_timeout, int, S_IRUGO|S_IWUSR); | 178 | module_param(aif_timeout, int, S_IRUGO|S_IWUSR); |
165 | MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems."); | 179 | MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" |
180 | " applications to pick up AIFs before\n" | ||
181 | "deregistering them. This is typically adjusted for heavily burdened" | ||
182 | " systems."); | ||
166 | 183 | ||
167 | int numacb = -1; | 184 | int numacb = -1; |
168 | module_param(numacb, int, S_IRUGO|S_IWUSR); | 185 | module_param(numacb, int, S_IRUGO|S_IWUSR); |
169 | MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware."); | 186 | MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" |
187 | " blocks (FIB) allocated. Valid values are 512 and down. Default is" | ||
188 | " to use suggestion from Firmware."); | ||
170 | 189 | ||
171 | int acbsize = -1; | 190 | int acbsize = -1; |
172 | module_param(acbsize, int, S_IRUGO|S_IWUSR); | 191 | module_param(acbsize, int, S_IRUGO|S_IWUSR); |
173 | MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); | 192 | MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" |
193 | " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" | ||
194 | " suggestion from Firmware."); | ||
174 | 195 | ||
175 | int update_interval = 30 * 60; | 196 | int update_interval = 30 * 60; |
176 | module_param(update_interval, int, S_IRUGO|S_IWUSR); | 197 | module_param(update_interval, int, S_IRUGO|S_IWUSR); |
177 | MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter."); | 198 | MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" |
199 | " updates issued to adapter."); | ||
178 | 200 | ||
179 | int check_interval = 24 * 60 * 60; | 201 | int check_interval = 24 * 60 * 60; |
180 | module_param(check_interval, int, S_IRUGO|S_IWUSR); | 202 | module_param(check_interval, int, S_IRUGO|S_IWUSR); |
181 | MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks."); | 203 | MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" |
204 | " checks."); | ||
182 | 205 | ||
183 | int aac_check_reset = 1; | 206 | int aac_check_reset = 1; |
184 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); | 207 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); |
185 | MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it."); | 208 | MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the" |
209 | " adapter. a value of -1 forces the reset to adapters programmed to" | ||
210 | " ignore it."); | ||
186 | 211 | ||
187 | int expose_physicals = -1; | 212 | int expose_physicals = -1; |
188 | module_param(expose_physicals, int, S_IRUGO|S_IWUSR); | 213 | module_param(expose_physicals, int, S_IRUGO|S_IWUSR); |
189 | MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); | 214 | MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." |
215 | " -1=protect 0=off, 1=on"); | ||
190 | 216 | ||
191 | int aac_reset_devices = 0; | 217 | int aac_reset_devices; |
192 | module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); | 218 | module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); |
193 | MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); | 219 | MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); |
194 | 220 | ||
@@ -1315,7 +1341,7 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1315 | (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), | 1341 | (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), |
1316 | dev->supplement_adapter_info.VpdInfo.Tsid); | 1342 | dev->supplement_adapter_info.VpdInfo.Tsid); |
1317 | } | 1343 | } |
1318 | if (!aac_check_reset || ((aac_check_reset != 1) && | 1344 | if (!aac_check_reset || ((aac_check_reset == 1) && |
1319 | (dev->supplement_adapter_info.SupportedOptions2 & | 1345 | (dev->supplement_adapter_info.SupportedOptions2 & |
1320 | AAC_OPTION_IGNORE_RESET))) { | 1346 | AAC_OPTION_IGNORE_RESET))) { |
1321 | printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", | 1347 | printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", |
@@ -1353,13 +1379,14 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1353 | 1379 | ||
1354 | if (nondasd != -1) | 1380 | if (nondasd != -1) |
1355 | dev->nondasd_support = (nondasd!=0); | 1381 | dev->nondasd_support = (nondasd!=0); |
1356 | if(dev->nondasd_support != 0) { | 1382 | if (dev->nondasd_support && !dev->in_reset) |
1357 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); | 1383 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); |
1358 | } | ||
1359 | 1384 | ||
1360 | dev->dac_support = 0; | 1385 | dev->dac_support = 0; |
1361 | if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ | 1386 | if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ |
1362 | printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); | 1387 | if (!dev->in_reset) |
1388 | printk(KERN_INFO "%s%d: 64bit support enabled.\n", | ||
1389 | dev->name, dev->id); | ||
1363 | dev->dac_support = 1; | 1390 | dev->dac_support = 1; |
1364 | } | 1391 | } |
1365 | 1392 | ||
@@ -1369,8 +1396,9 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1369 | if(dev->dac_support != 0) { | 1396 | if(dev->dac_support != 0) { |
1370 | if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && | 1397 | if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && |
1371 | !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { | 1398 | !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { |
1372 | printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", | 1399 | if (!dev->in_reset) |
1373 | dev->name, dev->id); | 1400 | printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", |
1401 | dev->name, dev->id); | ||
1374 | } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && | 1402 | } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && |
1375 | !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { | 1403 | !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { |
1376 | printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", | 1404 | printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3195d29f2177..ace0b751c131 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -1026,6 +1026,7 @@ struct aac_dev | |||
1026 | u8 raw_io_64; | 1026 | u8 raw_io_64; |
1027 | u8 printf_enabled; | 1027 | u8 printf_enabled; |
1028 | u8 in_reset; | 1028 | u8 in_reset; |
1029 | u8 msi; | ||
1029 | }; | 1030 | }; |
1030 | 1031 | ||
1031 | #define aac_adapter_interrupt(dev) \ | 1032 | #define aac_adapter_interrupt(dev) \ |
@@ -1881,6 +1882,7 @@ extern int startup_timeout; | |||
1881 | extern int aif_timeout; | 1882 | extern int aif_timeout; |
1882 | extern int expose_physicals; | 1883 | extern int expose_physicals; |
1883 | extern int aac_reset_devices; | 1884 | extern int aac_reset_devices; |
1885 | extern int aac_msi; | ||
1884 | extern int aac_commit; | 1886 | extern int aac_commit; |
1885 | extern int update_interval; | 1887 | extern int update_interval; |
1886 | extern int check_interval; | 1888 | extern int check_interval; |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 81b36923e0ef..47434499e82b 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1458,7 +1458,7 @@ int aac_check_health(struct aac_dev * aac) | |||
1458 | 1458 | ||
1459 | printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); | 1459 | printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); |
1460 | 1460 | ||
1461 | if (!aac_check_reset || ((aac_check_reset != 1) && | 1461 | if (!aac_check_reset || ((aac_check_reset == 1) && |
1462 | (aac->supplement_adapter_info.SupportedOptions2 & | 1462 | (aac->supplement_adapter_info.SupportedOptions2 & |
1463 | AAC_OPTION_IGNORE_RESET))) | 1463 | AAC_OPTION_IGNORE_RESET))) |
1464 | goto out; | 1464 | goto out; |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e80d2a0c46af..ae5f74fb62d5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -275,9 +275,9 @@ static const char *aac_info(struct Scsi_Host *shost) | |||
275 | 275 | ||
276 | /** | 276 | /** |
277 | * aac_get_driver_ident | 277 | * aac_get_driver_ident |
278 | * @devtype: index into lookup table | 278 | * @devtype: index into lookup table |
279 | * | 279 | * |
280 | * Returns a pointer to the entry in the driver lookup table. | 280 | * Returns a pointer to the entry in the driver lookup table. |
281 | */ | 281 | */ |
282 | 282 | ||
283 | struct aac_driver_ident* aac_get_driver_ident(int devtype) | 283 | struct aac_driver_ident* aac_get_driver_ident(int devtype) |
@@ -494,13 +494,14 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) | |||
494 | 494 | ||
495 | static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) | 495 | static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) |
496 | { | 496 | { |
497 | struct scsi_device * sdev = to_scsi_device(dev); | 497 | struct scsi_device *sdev = to_scsi_device(dev); |
498 | struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); | ||
498 | if (sdev_channel(sdev) != CONTAINER_CHANNEL) | 499 | if (sdev_channel(sdev) != CONTAINER_CHANNEL) |
499 | return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach | 500 | return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach |
500 | ? "Hidden\n" : "JBOD"); | 501 | ? "Hidden\n" : |
502 | ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); | ||
501 | return snprintf(buf, PAGE_SIZE, "%s\n", | 503 | return snprintf(buf, PAGE_SIZE, "%s\n", |
502 | get_container_type(((struct aac_dev *)(sdev->host->hostdata)) | 504 | get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); |
503 | ->fsa_dev[sdev_id(sdev)].type)); | ||
504 | } | 505 | } |
505 | 506 | ||
506 | static struct device_attribute aac_raid_level_attr = { | 507 | static struct device_attribute aac_raid_level_attr = { |
@@ -641,7 +642,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) | |||
641 | AAC_OPTION_MU_RESET) && | 642 | AAC_OPTION_MU_RESET) && |
642 | aac_check_reset && | 643 | aac_check_reset && |
643 | ((aac_check_reset != 1) || | 644 | ((aac_check_reset != 1) || |
644 | (aac->supplement_adapter_info.SupportedOptions2 & | 645 | !(aac->supplement_adapter_info.SupportedOptions2 & |
645 | AAC_OPTION_IGNORE_RESET))) | 646 | AAC_OPTION_IGNORE_RESET))) |
646 | aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ | 647 | aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ |
647 | return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ | 648 | return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ |
@@ -860,8 +861,8 @@ ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf) | |||
860 | le32_to_cpu(dev->adapter_info.serial[0])); | 861 | le32_to_cpu(dev->adapter_info.serial[0])); |
861 | if (len && | 862 | if (len && |
862 | !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ | 863 | !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ |
863 | sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len], | 864 | sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], |
864 | buf, len)) | 865 | buf, len-1)) |
865 | len = snprintf(buf, PAGE_SIZE, "%.*s\n", | 866 | len = snprintf(buf, PAGE_SIZE, "%.*s\n", |
866 | (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), | 867 | (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), |
867 | dev->supplement_adapter_info.MfgPcbaSerialNo); | 868 | dev->supplement_adapter_info.MfgPcbaSerialNo); |
@@ -1004,32 +1005,32 @@ static const struct file_operations aac_cfg_fops = { | |||
1004 | 1005 | ||
1005 | static struct scsi_host_template aac_driver_template = { | 1006 | static struct scsi_host_template aac_driver_template = { |
1006 | .module = THIS_MODULE, | 1007 | .module = THIS_MODULE, |
1007 | .name = "AAC", | 1008 | .name = "AAC", |
1008 | .proc_name = AAC_DRIVERNAME, | 1009 | .proc_name = AAC_DRIVERNAME, |
1009 | .info = aac_info, | 1010 | .info = aac_info, |
1010 | .ioctl = aac_ioctl, | 1011 | .ioctl = aac_ioctl, |
1011 | #ifdef CONFIG_COMPAT | 1012 | #ifdef CONFIG_COMPAT |
1012 | .compat_ioctl = aac_compat_ioctl, | 1013 | .compat_ioctl = aac_compat_ioctl, |
1013 | #endif | 1014 | #endif |
1014 | .queuecommand = aac_queuecommand, | 1015 | .queuecommand = aac_queuecommand, |
1015 | .bios_param = aac_biosparm, | 1016 | .bios_param = aac_biosparm, |
1016 | .shost_attrs = aac_attrs, | 1017 | .shost_attrs = aac_attrs, |
1017 | .slave_configure = aac_slave_configure, | 1018 | .slave_configure = aac_slave_configure, |
1018 | .change_queue_depth = aac_change_queue_depth, | 1019 | .change_queue_depth = aac_change_queue_depth, |
1019 | .sdev_attrs = aac_dev_attrs, | 1020 | .sdev_attrs = aac_dev_attrs, |
1020 | .eh_abort_handler = aac_eh_abort, | 1021 | .eh_abort_handler = aac_eh_abort, |
1021 | .eh_host_reset_handler = aac_eh_reset, | 1022 | .eh_host_reset_handler = aac_eh_reset, |
1022 | .can_queue = AAC_NUM_IO_FIB, | 1023 | .can_queue = AAC_NUM_IO_FIB, |
1023 | .this_id = MAXIMUM_NUM_CONTAINERS, | 1024 | .this_id = MAXIMUM_NUM_CONTAINERS, |
1024 | .sg_tablesize = 16, | 1025 | .sg_tablesize = 16, |
1025 | .max_sectors = 128, | 1026 | .max_sectors = 128, |
1026 | #if (AAC_NUM_IO_FIB > 256) | 1027 | #if (AAC_NUM_IO_FIB > 256) |
1027 | .cmd_per_lun = 256, | 1028 | .cmd_per_lun = 256, |
1028 | #else | 1029 | #else |
1029 | .cmd_per_lun = AAC_NUM_IO_FIB, | 1030 | .cmd_per_lun = AAC_NUM_IO_FIB, |
1030 | #endif | 1031 | #endif |
1031 | .use_clustering = ENABLE_CLUSTERING, | 1032 | .use_clustering = ENABLE_CLUSTERING, |
1032 | .emulated = 1, | 1033 | .emulated = 1, |
1033 | }; | 1034 | }; |
1034 | 1035 | ||
1035 | static void __aac_shutdown(struct aac_dev * aac) | 1036 | static void __aac_shutdown(struct aac_dev * aac) |
@@ -1039,6 +1040,8 @@ static void __aac_shutdown(struct aac_dev * aac) | |||
1039 | aac_send_shutdown(aac); | 1040 | aac_send_shutdown(aac); |
1040 | aac_adapter_disable_int(aac); | 1041 | aac_adapter_disable_int(aac); |
1041 | free_irq(aac->pdev->irq, aac); | 1042 | free_irq(aac->pdev->irq, aac); |
1043 | if (aac->msi) | ||
1044 | pci_disable_msi(aac->pdev); | ||
1042 | } | 1045 | } |
1043 | 1046 | ||
1044 | static int __devinit aac_probe_one(struct pci_dev *pdev, | 1047 | static int __devinit aac_probe_one(struct pci_dev *pdev, |
@@ -1254,7 +1257,7 @@ static struct pci_driver aac_pci_driver = { | |||
1254 | .id_table = aac_pci_tbl, | 1257 | .id_table = aac_pci_tbl, |
1255 | .probe = aac_probe_one, | 1258 | .probe = aac_probe_one, |
1256 | .remove = __devexit_p(aac_remove_one), | 1259 | .remove = __devexit_p(aac_remove_one), |
1257 | .shutdown = aac_shutdown, | 1260 | .shutdown = aac_shutdown, |
1258 | }; | 1261 | }; |
1259 | 1262 | ||
1260 | static int __init aac_init(void) | 1263 | static int __init aac_init(void) |
@@ -1271,7 +1274,7 @@ static int __init aac_init(void) | |||
1271 | aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); | 1274 | aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); |
1272 | if (aac_cfg_major < 0) { | 1275 | if (aac_cfg_major < 0) { |
1273 | printk(KERN_WARNING | 1276 | printk(KERN_WARNING |
1274 | "aacraid: unable to register \"aac\" device.\n"); | 1277 | "aacraid: unable to register \"aac\" device.\n"); |
1275 | } | 1278 | } |
1276 | 1279 | ||
1277 | return 0; | 1280 | return 0; |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index a08bbf1fd76c..1f18b83e1e02 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -625,8 +625,11 @@ int _aac_rx_init(struct aac_dev *dev) | |||
625 | if (aac_init_adapter(dev) == NULL) | 625 | if (aac_init_adapter(dev) == NULL) |
626 | goto error_iounmap; | 626 | goto error_iounmap; |
627 | aac_adapter_comm(dev, dev->comm_interface); | 627 | aac_adapter_comm(dev, dev->comm_interface); |
628 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, | 628 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); |
629 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, | ||
629 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | 630 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { |
631 | if (dev->msi) | ||
632 | pci_disable_msi(dev->pdev); | ||
630 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | 633 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", |
631 | name, instance); | 634 | name, instance); |
632 | goto error_iounmap; | 635 | goto error_iounmap; |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 85b91bc578c9..cfc3410ec073 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
34 | #include <linux/pci.h> | ||
34 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include <linux/blkdev.h> | 37 | #include <linux/blkdev.h> |
@@ -385,7 +386,7 @@ int aac_sa_init(struct aac_dev *dev) | |||
385 | 386 | ||
386 | if(aac_init_adapter(dev) == NULL) | 387 | if(aac_init_adapter(dev) == NULL) |
387 | goto error_irq; | 388 | goto error_irq; |
388 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, | 389 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, |
389 | IRQF_SHARED|IRQF_DISABLED, | 390 | IRQF_SHARED|IRQF_DISABLED, |
390 | "aacraid", (void *)dev ) < 0) { | 391 | "aacraid", (void *)dev ) < 0) { |
391 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", | 392 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", |
@@ -403,7 +404,7 @@ int aac_sa_init(struct aac_dev *dev) | |||
403 | 404 | ||
404 | error_irq: | 405 | error_irq: |
405 | aac_sa_disable_interrupt(dev); | 406 | aac_sa_disable_interrupt(dev); |
406 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 407 | free_irq(dev->pdev->irq, (void *)dev); |
407 | 408 | ||
408 | error_iounmap: | 409 | error_iounmap: |
409 | 410 | ||
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ccef891d642f..3c2d6888bb8c 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -566,7 +566,7 @@ typedef struct asc_dvc_var { | |||
566 | ASC_SCSI_BIT_ID_TYPE unit_not_ready; | 566 | ASC_SCSI_BIT_ID_TYPE unit_not_ready; |
567 | ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; | 567 | ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; |
568 | ASC_SCSI_BIT_ID_TYPE start_motor; | 568 | ASC_SCSI_BIT_ID_TYPE start_motor; |
569 | uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8); | 569 | uchar *overrun_buf; |
570 | dma_addr_t overrun_dma; | 570 | dma_addr_t overrun_dma; |
571 | uchar scsi_reset_wait; | 571 | uchar scsi_reset_wait; |
572 | uchar chip_no; | 572 | uchar chip_no; |
@@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
13833 | */ | 13833 | */ |
13834 | if (ASC_NARROW_BOARD(boardp)) { | 13834 | if (ASC_NARROW_BOARD(boardp)) { |
13835 | ASC_DBG(2, "AscInitAsc1000Driver()\n"); | 13835 | ASC_DBG(2, "AscInitAsc1000Driver()\n"); |
13836 | |||
13837 | asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); | ||
13838 | if (!asc_dvc_varp->overrun_buf) { | ||
13839 | ret = -ENOMEM; | ||
13840 | goto err_free_wide_mem; | ||
13841 | } | ||
13836 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); | 13842 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); |
13837 | 13843 | ||
13838 | if (warn_code || asc_dvc_varp->err_code) { | 13844 | if (warn_code || asc_dvc_varp->err_code) { |
@@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
13840 | "warn 0x%x, error 0x%x\n", | 13846 | "warn 0x%x, error 0x%x\n", |
13841 | asc_dvc_varp->init_state, warn_code, | 13847 | asc_dvc_varp->init_state, warn_code, |
13842 | asc_dvc_varp->err_code); | 13848 | asc_dvc_varp->err_code); |
13843 | if (asc_dvc_varp->err_code) | 13849 | if (asc_dvc_varp->err_code) { |
13844 | ret = -ENODEV; | 13850 | ret = -ENODEV; |
13851 | kfree(asc_dvc_varp->overrun_buf); | ||
13852 | } | ||
13845 | } | 13853 | } |
13846 | } else { | 13854 | } else { |
13847 | if (advansys_wide_init_chip(shost)) | 13855 | if (advansys_wide_init_chip(shost)) |
@@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost) | |||
13894 | dma_unmap_single(board->dev, | 13902 | dma_unmap_single(board->dev, |
13895 | board->dvc_var.asc_dvc_var.overrun_dma, | 13903 | board->dvc_var.asc_dvc_var.overrun_dma, |
13896 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); | 13904 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); |
13905 | kfree(board->dvc_var.asc_dvc_var.overrun_buf); | ||
13897 | } else { | 13906 | } else { |
13898 | iounmap(board->ioremap_addr); | 13907 | iounmap(board->ioremap_addr); |
13899 | advansys_wide_free_mem(board); | 13908 | advansys_wide_free_mem(board); |
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h index fa7c5290257d..912e6b755f74 100644 --- a/drivers/scsi/aic94xx/aic94xx_sas.h +++ b/drivers/scsi/aic94xx/aic94xx_sas.h | |||
@@ -292,7 +292,7 @@ struct scb_header { | |||
292 | #define INITIATE_SSP_TASK 0x00 | 292 | #define INITIATE_SSP_TASK 0x00 |
293 | #define INITIATE_LONG_SSP_TASK 0x01 | 293 | #define INITIATE_LONG_SSP_TASK 0x01 |
294 | #define INITIATE_BIDIR_SSP_TASK 0x02 | 294 | #define INITIATE_BIDIR_SSP_TASK 0x02 |
295 | #define ABORT_TASK 0x03 | 295 | #define SCB_ABORT_TASK 0x03 |
296 | #define INITIATE_SSP_TMF 0x04 | 296 | #define INITIATE_SSP_TMF 0x04 |
297 | #define SSP_TARG_GET_DATA 0x05 | 297 | #define SSP_TARG_GET_DATA 0x05 |
298 | #define SSP_TARG_GET_DATA_GOOD 0x06 | 298 | #define SSP_TARG_GET_DATA_GOOD 0x06 |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 87b2f6e6adfe..b52124f3d3ac 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -369,7 +369,7 @@ int asd_abort_task(struct sas_task *task) | |||
369 | return -ENOMEM; | 369 | return -ENOMEM; |
370 | scb = ascb->scb; | 370 | scb = ascb->scb; |
371 | 371 | ||
372 | scb->header.opcode = ABORT_TASK; | 372 | scb->header.opcode = SCB_ABORT_TASK; |
373 | 373 | ||
374 | switch (task->task_proto) { | 374 | switch (task->task_proto) { |
375 | case SAS_PROTOCOL_SATA: | 375 | case SAS_PROTOCOL_SATA: |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index fb5f20284389..a715632e19d4 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2018,6 +2018,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, | |||
2018 | * the upper layers to process. This would have been set | 2018 | * the upper layers to process. This would have been set |
2019 | * correctly by fas216_std_done. | 2019 | * correctly by fas216_std_done. |
2020 | */ | 2020 | */ |
2021 | scsi_eh_restore_cmnd(SCpnt, &info->ses); | ||
2021 | SCpnt->scsi_done(SCpnt); | 2022 | SCpnt->scsi_done(SCpnt); |
2022 | } | 2023 | } |
2023 | 2024 | ||
@@ -2103,23 +2104,12 @@ request_sense: | |||
2103 | if (SCpnt->cmnd[0] == REQUEST_SENSE) | 2104 | if (SCpnt->cmnd[0] == REQUEST_SENSE) |
2104 | goto done; | 2105 | goto done; |
2105 | 2106 | ||
2107 | scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); | ||
2106 | fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, | 2108 | fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, |
2107 | "requesting sense"); | 2109 | "requesting sense"); |
2108 | memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd)); | 2110 | init_SCp(SCpnt); |
2109 | SCpnt->cmnd[0] = REQUEST_SENSE; | ||
2110 | SCpnt->cmnd[1] = SCpnt->device->lun << 5; | ||
2111 | SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer); | ||
2112 | SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); | ||
2113 | SCpnt->SCp.buffer = NULL; | ||
2114 | SCpnt->SCp.buffers_residual = 0; | ||
2115 | SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; | ||
2116 | SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); | ||
2117 | SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer); | ||
2118 | SCpnt->SCp.Message = 0; | 2111 | SCpnt->SCp.Message = 0; |
2119 | SCpnt->SCp.Status = 0; | 2112 | SCpnt->SCp.Status = 0; |
2120 | SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); | ||
2121 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | ||
2122 | SCpnt->use_sg = 0; | ||
2123 | SCpnt->tag = 0; | 2113 | SCpnt->tag = 0; |
2124 | SCpnt->host_scribble = (void *)fas216_rq_sns_done; | 2114 | SCpnt->host_scribble = (void *)fas216_rq_sns_done; |
2125 | 2115 | ||
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 00e5f055afdc..3e73e264972e 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #define NO_IRQ 255 | 16 | #define NO_IRQ 255 |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #include <scsi/scsi_eh.h> | ||
20 | |||
19 | #include "queue.h" | 21 | #include "queue.h" |
20 | #include "msgqueue.h" | 22 | #include "msgqueue.h" |
21 | 23 | ||
@@ -311,6 +313,7 @@ typedef struct { | |||
311 | 313 | ||
312 | /* miscellaneous */ | 314 | /* miscellaneous */ |
313 | int internal_done; /* flag to indicate request done */ | 315 | int internal_done; /* flag to indicate request done */ |
316 | struct scsi_eh_save *ses; /* holds request sense restore info */ | ||
314 | unsigned long magic_end; | 317 | unsigned long magic_end; |
315 | } FAS216_Info; | 318 | } FAS216_Info; |
316 | 319 | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index c82523908c2e..6d67f5c0eb8e 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -642,12 +642,15 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, | |||
642 | *cnt, vendor, device)); | 642 | *cnt, vendor, device)); |
643 | 643 | ||
644 | pdev = NULL; | 644 | pdev = NULL; |
645 | while ((pdev = pci_find_device(vendor, device, pdev)) | 645 | while ((pdev = pci_get_device(vendor, device, pdev)) |
646 | != NULL) { | 646 | != NULL) { |
647 | if (pci_enable_device(pdev)) | 647 | if (pci_enable_device(pdev)) |
648 | continue; | 648 | continue; |
649 | if (*cnt >= MAXHA) | 649 | if (*cnt >= MAXHA) { |
650 | pci_dev_put(pdev); | ||
650 | return; | 651 | return; |
652 | } | ||
653 | |||
651 | /* GDT PCI controller found, resources are already in pdev */ | 654 | /* GDT PCI controller found, resources are already in pdev */ |
652 | pcistr[*cnt].pdev = pdev; | 655 | pcistr[*cnt].pdev = pdev; |
653 | pcistr[*cnt].irq = pdev->irq; | 656 | pcistr[*cnt].irq = pdev->irq; |
@@ -4836,6 +4839,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios) | |||
4836 | if (error) | 4839 | if (error) |
4837 | goto out_free_coal_stat; | 4840 | goto out_free_coal_stat; |
4838 | list_add_tail(&ha->list, &gdth_instances); | 4841 | list_add_tail(&ha->list, &gdth_instances); |
4842 | |||
4843 | scsi_scan_host(shp); | ||
4844 | |||
4839 | return 0; | 4845 | return 0; |
4840 | 4846 | ||
4841 | out_free_coal_stat: | 4847 | out_free_coal_stat: |
@@ -4963,6 +4969,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot) | |||
4963 | if (error) | 4969 | if (error) |
4964 | goto out_free_coal_stat; | 4970 | goto out_free_coal_stat; |
4965 | list_add_tail(&ha->list, &gdth_instances); | 4971 | list_add_tail(&ha->list, &gdth_instances); |
4972 | |||
4973 | scsi_scan_host(shp); | ||
4974 | |||
4966 | return 0; | 4975 | return 0; |
4967 | 4976 | ||
4968 | out_free_ccb_phys: | 4977 | out_free_ccb_phys: |
@@ -5100,6 +5109,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
5100 | if (error) | 5109 | if (error) |
5101 | goto out_free_coal_stat; | 5110 | goto out_free_coal_stat; |
5102 | list_add_tail(&ha->list, &gdth_instances); | 5111 | list_add_tail(&ha->list, &gdth_instances); |
5112 | |||
5113 | scsi_scan_host(shp); | ||
5114 | |||
5103 | return 0; | 5115 | return 0; |
5104 | 5116 | ||
5105 | out_free_coal_stat: | 5117 | out_free_coal_stat: |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 83567b9755b4..2ab2d24dcc15 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -307,6 +307,7 @@ struct lpfc_vport { | |||
307 | 307 | ||
308 | uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ | 308 | uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ |
309 | uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ | 309 | uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ |
310 | uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ | ||
310 | struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; | 311 | struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; |
311 | struct lpfc_name fc_nodename; /* fc nodename */ | 312 | struct lpfc_name fc_nodename; /* fc nodename */ |
312 | struct lpfc_name fc_portname; /* fc portname */ | 313 | struct lpfc_name fc_portname; /* fc portname */ |
@@ -392,6 +393,13 @@ enum hba_temp_state { | |||
392 | HBA_OVER_TEMP | 393 | HBA_OVER_TEMP |
393 | }; | 394 | }; |
394 | 395 | ||
396 | enum intr_type_t { | ||
397 | NONE = 0, | ||
398 | INTx, | ||
399 | MSI, | ||
400 | MSIX, | ||
401 | }; | ||
402 | |||
395 | struct lpfc_hba { | 403 | struct lpfc_hba { |
396 | struct lpfc_sli sli; | 404 | struct lpfc_sli sli; |
397 | uint32_t sli_rev; /* SLI2 or SLI3 */ | 405 | uint32_t sli_rev; /* SLI2 or SLI3 */ |
@@ -409,7 +417,7 @@ struct lpfc_hba { | |||
409 | /* This flag is set while issuing */ | 417 | /* This flag is set while issuing */ |
410 | /* INIT_LINK mailbox command */ | 418 | /* INIT_LINK mailbox command */ |
411 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 419 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
412 | #define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ | 420 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
413 | 421 | ||
414 | struct lpfc_sli2_slim *slim2p; | 422 | struct lpfc_sli2_slim *slim2p; |
415 | struct lpfc_dmabuf hbqslimp; | 423 | struct lpfc_dmabuf hbqslimp; |
@@ -487,6 +495,8 @@ struct lpfc_hba { | |||
487 | wait_queue_head_t *work_wait; | 495 | wait_queue_head_t *work_wait; |
488 | struct task_struct *worker_thread; | 496 | struct task_struct *worker_thread; |
489 | 497 | ||
498 | uint32_t hbq_in_use; /* HBQs in use flag */ | ||
499 | struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ | ||
490 | uint32_t hbq_count; /* Count of configured HBQs */ | 500 | uint32_t hbq_count; /* Count of configured HBQs */ |
491 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ | 501 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ |
492 | 502 | ||
@@ -555,7 +565,8 @@ struct lpfc_hba { | |||
555 | mempool_t *nlp_mem_pool; | 565 | mempool_t *nlp_mem_pool; |
556 | 566 | ||
557 | struct fc_host_statistics link_stats; | 567 | struct fc_host_statistics link_stats; |
558 | uint8_t using_msi; | 568 | enum intr_type_t intr_type; |
569 | struct msix_entry msix_entries[1]; | ||
559 | 570 | ||
560 | struct list_head port_list; | 571 | struct list_head port_list; |
561 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ | 572 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ |
@@ -595,6 +606,8 @@ struct lpfc_hba { | |||
595 | unsigned long last_completion_time; | 606 | unsigned long last_completion_time; |
596 | struct timer_list hb_tmofunc; | 607 | struct timer_list hb_tmofunc; |
597 | uint8_t hb_outstanding; | 608 | uint8_t hb_outstanding; |
609 | /* ndlp reference management */ | ||
610 | spinlock_t ndlp_lock; | ||
598 | /* | 611 | /* |
599 | * Following bit will be set for all buffer tags which are not | 612 | * Following bit will be set for all buffer tags which are not |
600 | * associated with any HBQ. | 613 | * associated with any HBQ. |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4bae4a2ed2f1..b12a841703ca 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1191,7 +1191,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
1191 | shost = lpfc_shost_from_vport(vport); | 1191 | shost = lpfc_shost_from_vport(vport); |
1192 | spin_lock_irq(shost->host_lock); | 1192 | spin_lock_irq(shost->host_lock); |
1193 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) | 1193 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) |
1194 | if (ndlp->rport) | 1194 | if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) |
1195 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 1195 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
1196 | spin_unlock_irq(shost->host_lock); | 1196 | spin_unlock_irq(shost->host_lock); |
1197 | } | 1197 | } |
@@ -1592,9 +1592,11 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
1592 | # support this feature | 1592 | # support this feature |
1593 | # 0 = MSI disabled (default) | 1593 | # 0 = MSI disabled (default) |
1594 | # 1 = MSI enabled | 1594 | # 1 = MSI enabled |
1595 | # Value range is [0,1]. Default value is 0. | 1595 | # 2 = MSI-X enabled |
1596 | # Value range is [0,2]. Default value is 0. | ||
1596 | */ | 1597 | */ |
1597 | LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); | 1598 | LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " |
1599 | "MSI-X (2), if possible"); | ||
1598 | 1600 | ||
1599 | /* | 1601 | /* |
1600 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. | 1602 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. |
@@ -1946,11 +1948,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1946 | } | 1948 | } |
1947 | 1949 | ||
1948 | /* If HBA encountered an error attention, allow only DUMP | 1950 | /* If HBA encountered an error attention, allow only DUMP |
1949 | * mailbox command until the HBA is restarted. | 1951 | * or RESTART mailbox commands until the HBA is restarted. |
1950 | */ | 1952 | */ |
1951 | if ((phba->pport->stopped) && | 1953 | if ((phba->pport->stopped) && |
1952 | (phba->sysfs_mbox.mbox->mb.mbxCommand | 1954 | (phba->sysfs_mbox.mbox->mb.mbxCommand != |
1953 | != MBX_DUMP_MEMORY)) { | 1955 | MBX_DUMP_MEMORY && |
1956 | phba->sysfs_mbox.mbox->mb.mbxCommand != | ||
1957 | MBX_RESTART)) { | ||
1954 | sysfs_mbox_idle(phba); | 1958 | sysfs_mbox_idle(phba); |
1955 | spin_unlock_irq(&phba->hbalock); | 1959 | spin_unlock_irq(&phba->hbalock); |
1956 | return -EPERM; | 1960 | return -EPERM; |
@@ -2384,7 +2388,8 @@ lpfc_get_node_by_target(struct scsi_target *starget) | |||
2384 | spin_lock_irq(shost->host_lock); | 2388 | spin_lock_irq(shost->host_lock); |
2385 | /* Search for this, mapped, target ID */ | 2389 | /* Search for this, mapped, target ID */ |
2386 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 2390 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
2387 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && | 2391 | if (NLP_CHK_NODE_ACT(ndlp) && |
2392 | ndlp->nlp_state == NLP_STE_MAPPED_NODE && | ||
2388 | starget->id == ndlp->nlp_sid) { | 2393 | starget->id == ndlp->nlp_sid) { |
2389 | spin_unlock_irq(shost->host_lock); | 2394 | spin_unlock_irq(shost->host_lock); |
2390 | return ndlp; | 2395 | return ndlp; |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 50fcb7c930bc..848d97744b4d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -53,7 +53,11 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
53 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 53 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
54 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 54 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
56 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); | ||
56 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 57 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
58 | void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *); | ||
59 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, | ||
60 | struct lpfc_nodelist *, int); | ||
57 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); | 61 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); |
58 | void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); | 62 | void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); |
59 | void lpfc_set_disctmo(struct lpfc_vport *); | 63 | void lpfc_set_disctmo(struct lpfc_vport *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 92441ce610ed..3d0ccd9b341d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -294,7 +294,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
294 | /* Save for completion so we can release these resources */ | 294 | /* Save for completion so we can release these resources */ |
295 | geniocb->context1 = (uint8_t *) inp; | 295 | geniocb->context1 = (uint8_t *) inp; |
296 | geniocb->context2 = (uint8_t *) outp; | 296 | geniocb->context2 = (uint8_t *) outp; |
297 | geniocb->context_un.ndlp = ndlp; | 297 | geniocb->context_un.ndlp = lpfc_nlp_get(ndlp); |
298 | 298 | ||
299 | /* Fill in payload, bp points to frame payload */ | 299 | /* Fill in payload, bp points to frame payload */ |
300 | icmd->ulpCommand = CMD_GEN_REQUEST64_CR; | 300 | icmd->ulpCommand = CMD_GEN_REQUEST64_CR; |
@@ -489,8 +489,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) | |||
489 | */ | 489 | */ |
490 | ndlp = lpfc_findnode_did(vport, | 490 | ndlp = lpfc_findnode_did(vport, |
491 | Did); | 491 | Did); |
492 | if (ndlp && (ndlp->nlp_type & | 492 | if (ndlp && |
493 | NLP_FCP_TARGET)) | 493 | NLP_CHK_NODE_ACT(ndlp) |
494 | && (ndlp->nlp_type & | ||
495 | NLP_FCP_TARGET)) | ||
494 | lpfc_setup_disc_node | 496 | lpfc_setup_disc_node |
495 | (vport, Did); | 497 | (vport, Did); |
496 | else if (lpfc_ns_cmd(vport, | 498 | else if (lpfc_ns_cmd(vport, |
@@ -773,7 +775,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
773 | "0267 NameServer GFF Rsp " | 775 | "0267 NameServer GFF Rsp " |
774 | "x%x Error (%d %d) Data: x%x x%x\n", | 776 | "x%x Error (%d %d) Data: x%x x%x\n", |
775 | did, irsp->ulpStatus, irsp->un.ulpWord[4], | 777 | did, irsp->ulpStatus, irsp->un.ulpWord[4], |
776 | vport->fc_flag, vport->fc_rscn_id_cnt) | 778 | vport->fc_flag, vport->fc_rscn_id_cnt); |
777 | } | 779 | } |
778 | 780 | ||
779 | /* This is a target port, unregistered port, or the GFF_ID failed */ | 781 | /* This is a target port, unregistered port, or the GFF_ID failed */ |
@@ -1064,7 +1066,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1064 | int rc = 0; | 1066 | int rc = 0; |
1065 | 1067 | ||
1066 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 1068 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
1067 | if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { | 1069 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) |
1070 | || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { | ||
1068 | rc=1; | 1071 | rc=1; |
1069 | goto ns_cmd_exit; | 1072 | goto ns_cmd_exit; |
1070 | } | 1073 | } |
@@ -1213,8 +1216,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1213 | cmpl = lpfc_cmpl_ct_cmd_rff_id; | 1216 | cmpl = lpfc_cmpl_ct_cmd_rff_id; |
1214 | break; | 1217 | break; |
1215 | } | 1218 | } |
1216 | lpfc_nlp_get(ndlp); | 1219 | /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count |
1217 | 1220 | * to hold ndlp reference for the corresponding callback function. | |
1221 | */ | ||
1218 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { | 1222 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { |
1219 | /* On success, The cmpl function will free the buffers */ | 1223 | /* On success, The cmpl function will free the buffers */ |
1220 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, | 1224 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, |
@@ -1222,9 +1226,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1222 | cmdcode, ndlp->nlp_DID, 0); | 1226 | cmdcode, ndlp->nlp_DID, 0); |
1223 | return 0; | 1227 | return 0; |
1224 | } | 1228 | } |
1225 | |||
1226 | rc=6; | 1229 | rc=6; |
1230 | |||
1231 | /* Decrement ndlp reference count to release ndlp reference held | ||
1232 | * for the failed command's callback function. | ||
1233 | */ | ||
1227 | lpfc_nlp_put(ndlp); | 1234 | lpfc_nlp_put(ndlp); |
1235 | |||
1228 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 1236 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
1229 | ns_cmd_free_bmp: | 1237 | ns_cmd_free_bmp: |
1230 | kfree(bmp); | 1238 | kfree(bmp); |
@@ -1271,6 +1279,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1271 | } | 1279 | } |
1272 | 1280 | ||
1273 | ndlp = lpfc_findnode_did(vport, FDMI_DID); | 1281 | ndlp = lpfc_findnode_did(vport, FDMI_DID); |
1282 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) | ||
1283 | goto fail_out; | ||
1284 | |||
1274 | if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { | 1285 | if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { |
1275 | /* FDMI rsp failed */ | 1286 | /* FDMI rsp failed */ |
1276 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 1287 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
@@ -1294,6 +1305,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1294 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); | 1305 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); |
1295 | break; | 1306 | break; |
1296 | } | 1307 | } |
1308 | |||
1309 | fail_out: | ||
1297 | lpfc_ct_free_iocb(phba, cmdiocb); | 1310 | lpfc_ct_free_iocb(phba, cmdiocb); |
1298 | return; | 1311 | return; |
1299 | } | 1312 | } |
@@ -1650,12 +1663,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) | |||
1650 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 1663 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1651 | 1664 | ||
1652 | cmpl = lpfc_cmpl_ct_cmd_fdmi; | 1665 | cmpl = lpfc_cmpl_ct_cmd_fdmi; |
1653 | lpfc_nlp_get(ndlp); | ||
1654 | 1666 | ||
1667 | /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count | ||
1668 | * to hold ndlp reference for the corresponding callback function. | ||
1669 | */ | ||
1655 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) | 1670 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) |
1656 | return 0; | 1671 | return 0; |
1657 | 1672 | ||
1673 | /* Decrement ndlp reference count to release ndlp reference held | ||
1674 | * for the failed command's callback function. | ||
1675 | */ | ||
1658 | lpfc_nlp_put(ndlp); | 1676 | lpfc_nlp_put(ndlp); |
1677 | |||
1659 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 1678 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
1660 | fdmi_cmd_free_bmp: | 1679 | fdmi_cmd_free_bmp: |
1661 | kfree(bmp); | 1680 | kfree(bmp); |
@@ -1698,7 +1717,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) | |||
1698 | struct lpfc_nodelist *ndlp; | 1717 | struct lpfc_nodelist *ndlp; |
1699 | 1718 | ||
1700 | ndlp = lpfc_findnode_did(vport, FDMI_DID); | 1719 | ndlp = lpfc_findnode_did(vport, FDMI_DID); |
1701 | if (ndlp) { | 1720 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
1702 | if (init_utsname()->nodename[0] != '\0') | 1721 | if (init_utsname()->nodename[0] != '\0') |
1703 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); | 1722 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); |
1704 | else | 1723 | else |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index cfe81c50529a..2db0b74b6fad 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -73,6 +73,12 @@ struct lpfc_nodelist { | |||
73 | uint8_t nlp_fcp_info; /* class info, bits 0-3 */ | 73 | uint8_t nlp_fcp_info; /* class info, bits 0-3 */ |
74 | #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ | 74 | #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ |
75 | 75 | ||
76 | uint16_t nlp_usg_map; /* ndlp management usage bitmap */ | ||
77 | #define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ | ||
78 | #define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ | ||
79 | #define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */ | ||
80 | #define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ | ||
81 | |||
76 | struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ | 82 | struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ |
77 | struct fc_rport *rport; /* Corresponding FC transport | 83 | struct fc_rport *rport; /* Corresponding FC transport |
78 | port structure */ | 84 | port structure */ |
@@ -85,25 +91,51 @@ struct lpfc_nodelist { | |||
85 | }; | 91 | }; |
86 | 92 | ||
87 | /* Defines for nlp_flag (uint32) */ | 93 | /* Defines for nlp_flag (uint32) */ |
88 | #define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ | 94 | #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ |
89 | #define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ | 95 | #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ |
90 | #define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ | 96 | #define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ |
91 | #define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ | 97 | #define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ |
92 | #define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ | 98 | #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ |
93 | #define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ | 99 | #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ |
94 | #define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ | 100 | #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ |
95 | #define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ | 101 | #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ |
96 | #define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ | 102 | #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ |
97 | #define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ | 103 | #define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ |
98 | #define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ | 104 | #define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ |
99 | #define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ | 105 | #define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ |
100 | #define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful | 106 | #define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful |
101 | ACC */ | 107 | ACC */ |
102 | #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from | 108 | #define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from |
103 | NPR list */ | 109 | NPR list */ |
104 | #define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ | 110 | #define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ |
105 | #define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ | 111 | #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ |
106 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ | 112 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ |
113 | #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ | ||
114 | |||
115 | /* ndlp usage management macros */ | ||
116 | #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ | ||
117 | & NLP_USG_NODE_ACT_BIT) \ | ||
118 | && \ | ||
119 | !((ndlp)->nlp_usg_map \ | ||
120 | & NLP_USG_FREE_ACK_BIT)) | ||
121 | #define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
122 | |= NLP_USG_NODE_ACT_BIT) | ||
123 | #define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
124 | = NLP_USG_NODE_ACT_BIT) | ||
125 | #define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
126 | &= ~NLP_USG_NODE_ACT_BIT) | ||
127 | #define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
128 | & NLP_USG_IACT_REQ_BIT) | ||
129 | #define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
130 | |= NLP_USG_IACT_REQ_BIT) | ||
131 | #define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
132 | & NLP_USG_FREE_REQ_BIT) | ||
133 | #define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
134 | |= NLP_USG_FREE_REQ_BIT) | ||
135 | #define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ | ||
136 | & NLP_USG_FREE_ACK_BIT) | ||
137 | #define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ | ||
138 | |= NLP_USG_FREE_ACK_BIT) | ||
107 | 139 | ||
108 | /* There are 4 different double linked lists nodelist entries can reside on. | 140 | /* There are 4 different double linked lists nodelist entries can reside on. |
109 | * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used | 141 | * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c6b739dc6bc3..cbb68a942255 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -113,6 +113,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
113 | 113 | ||
114 | if (elsiocb == NULL) | 114 | if (elsiocb == NULL) |
115 | return NULL; | 115 | return NULL; |
116 | |||
116 | icmd = &elsiocb->iocb; | 117 | icmd = &elsiocb->iocb; |
117 | 118 | ||
118 | /* fill in BDEs for command */ | 119 | /* fill in BDEs for command */ |
@@ -134,9 +135,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
134 | if (!prsp || !prsp->virt) | 135 | if (!prsp || !prsp->virt) |
135 | goto els_iocb_free_prsp_exit; | 136 | goto els_iocb_free_prsp_exit; |
136 | INIT_LIST_HEAD(&prsp->list); | 137 | INIT_LIST_HEAD(&prsp->list); |
137 | } else { | 138 | } else |
138 | prsp = NULL; | 139 | prsp = NULL; |
139 | } | ||
140 | 140 | ||
141 | /* Allocate buffer for Buffer ptr list */ | 141 | /* Allocate buffer for Buffer ptr list */ |
142 | pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | 142 | pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
@@ -246,7 +246,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
246 | 246 | ||
247 | sp = &phba->fc_fabparam; | 247 | sp = &phba->fc_fabparam; |
248 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 248 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
249 | if (!ndlp) { | 249 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
250 | err = 1; | 250 | err = 1; |
251 | goto fail; | 251 | goto fail; |
252 | } | 252 | } |
@@ -282,6 +282,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
282 | 282 | ||
283 | mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; | 283 | mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; |
284 | mbox->vport = vport; | 284 | mbox->vport = vport; |
285 | /* increment the reference count on ndlp to hold reference | ||
286 | * for the callback routine. | ||
287 | */ | ||
285 | mbox->context2 = lpfc_nlp_get(ndlp); | 288 | mbox->context2 = lpfc_nlp_get(ndlp); |
286 | 289 | ||
287 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | 290 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
@@ -293,6 +296,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
293 | return 0; | 296 | return 0; |
294 | 297 | ||
295 | fail_issue_reg_login: | 298 | fail_issue_reg_login: |
299 | /* decrement the reference count on ndlp just incremented | ||
300 | * for the failed mbox command. | ||
301 | */ | ||
296 | lpfc_nlp_put(ndlp); | 302 | lpfc_nlp_put(ndlp); |
297 | mp = (struct lpfc_dmabuf *) mbox->context1; | 303 | mp = (struct lpfc_dmabuf *) mbox->context1; |
298 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 304 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -381,6 +387,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
381 | */ | 387 | */ |
382 | list_for_each_entry_safe(np, next_np, | 388 | list_for_each_entry_safe(np, next_np, |
383 | &vport->fc_nodes, nlp_listp) { | 389 | &vport->fc_nodes, nlp_listp) { |
390 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
391 | continue; | ||
384 | if ((np->nlp_state != NLP_STE_NPR_NODE) || | 392 | if ((np->nlp_state != NLP_STE_NPR_NODE) || |
385 | !(np->nlp_flag & NLP_NPR_ADISC)) | 393 | !(np->nlp_flag & NLP_NPR_ADISC)) |
386 | continue; | 394 | continue; |
@@ -456,6 +464,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
456 | mempool_free(mbox, phba->mbox_mem_pool); | 464 | mempool_free(mbox, phba->mbox_mem_pool); |
457 | goto fail; | 465 | goto fail; |
458 | } | 466 | } |
467 | /* Decrement ndlp reference count indicating that ndlp can be | ||
468 | * safely released when other references to it are done. | ||
469 | */ | ||
459 | lpfc_nlp_put(ndlp); | 470 | lpfc_nlp_put(ndlp); |
460 | 471 | ||
461 | ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); | 472 | ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); |
@@ -467,22 +478,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
467 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 478 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); |
468 | if (!ndlp) | 479 | if (!ndlp) |
469 | goto fail; | 480 | goto fail; |
470 | |||
471 | lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); | 481 | lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); |
482 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
483 | ndlp = lpfc_enable_node(vport, ndlp, | ||
484 | NLP_STE_UNUSED_NODE); | ||
485 | if(!ndlp) | ||
486 | goto fail; | ||
472 | } | 487 | } |
473 | 488 | ||
474 | memcpy(&ndlp->nlp_portname, &sp->portName, | 489 | memcpy(&ndlp->nlp_portname, &sp->portName, |
475 | sizeof(struct lpfc_name)); | 490 | sizeof(struct lpfc_name)); |
476 | memcpy(&ndlp->nlp_nodename, &sp->nodeName, | 491 | memcpy(&ndlp->nlp_nodename, &sp->nodeName, |
477 | sizeof(struct lpfc_name)); | 492 | sizeof(struct lpfc_name)); |
493 | /* Set state will put ndlp onto node list if not already done */ | ||
478 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 494 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
479 | spin_lock_irq(shost->host_lock); | 495 | spin_lock_irq(shost->host_lock); |
480 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 496 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
481 | spin_unlock_irq(shost->host_lock); | 497 | spin_unlock_irq(shost->host_lock); |
482 | } else { | 498 | } else |
483 | /* This side will wait for the PLOGI */ | 499 | /* This side will wait for the PLOGI, decrement ndlp reference |
500 | * count indicating that ndlp can be released when other | ||
501 | * references to it are done. | ||
502 | */ | ||
484 | lpfc_nlp_put(ndlp); | 503 | lpfc_nlp_put(ndlp); |
485 | } | ||
486 | 504 | ||
487 | /* If we are pt2pt with another NPort, force NPIV off! */ | 505 | /* If we are pt2pt with another NPort, force NPIV off! */ |
488 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; | 506 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
@@ -728,16 +746,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
728 | if (!ndlp) | 746 | if (!ndlp) |
729 | return 0; | 747 | return 0; |
730 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 748 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
731 | } else { | 749 | /* Put ndlp onto node list */ |
732 | lpfc_dequeue_node(vport, ndlp); | 750 | lpfc_enqueue_node(vport, ndlp); |
751 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
752 | /* re-setup ndlp without removing from node list */ | ||
753 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
754 | if (!ndlp) | ||
755 | return 0; | ||
733 | } | 756 | } |
734 | 757 | ||
735 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) { | 758 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) |
736 | /* This decrement of reference count to node shall kick off | 759 | /* This decrement of reference count to node shall kick off |
737 | * the release of the node. | 760 | * the release of the node. |
738 | */ | 761 | */ |
739 | lpfc_nlp_put(ndlp); | 762 | lpfc_nlp_put(ndlp); |
740 | } | 763 | |
741 | return 1; | 764 | return 1; |
742 | } | 765 | } |
743 | 766 | ||
@@ -755,9 +778,15 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) | |||
755 | if (!ndlp) | 778 | if (!ndlp) |
756 | return 0; | 779 | return 0; |
757 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 780 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
758 | } else { | 781 | /* Put ndlp onto node list */ |
759 | lpfc_dequeue_node(vport, ndlp); | 782 | lpfc_enqueue_node(vport, ndlp); |
783 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
784 | /* re-setup ndlp without removing from node list */ | ||
785 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
786 | if (!ndlp) | ||
787 | return 0; | ||
760 | } | 788 | } |
789 | |||
761 | if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { | 790 | if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { |
762 | /* decrement node reference count to trigger the release of | 791 | /* decrement node reference count to trigger the release of |
763 | * the node. | 792 | * the node. |
@@ -816,7 +845,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
816 | */ | 845 | */ |
817 | new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); | 846 | new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); |
818 | 847 | ||
819 | if (new_ndlp == ndlp) | 848 | if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) |
820 | return ndlp; | 849 | return ndlp; |
821 | 850 | ||
822 | if (!new_ndlp) { | 851 | if (!new_ndlp) { |
@@ -827,8 +856,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
827 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); | 856 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); |
828 | if (!new_ndlp) | 857 | if (!new_ndlp) |
829 | return ndlp; | 858 | return ndlp; |
830 | |||
831 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); | 859 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); |
860 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { | ||
861 | new_ndlp = lpfc_enable_node(vport, new_ndlp, | ||
862 | NLP_STE_UNUSED_NODE); | ||
863 | if (!new_ndlp) | ||
864 | return ndlp; | ||
832 | } | 865 | } |
833 | 866 | ||
834 | lpfc_unreg_rpi(vport, new_ndlp); | 867 | lpfc_unreg_rpi(vport, new_ndlp); |
@@ -839,6 +872,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
839 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 872 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
840 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; | 873 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
841 | 874 | ||
875 | /* Set state will put new_ndlp on to node list if not already done */ | ||
842 | lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); | 876 | lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); |
843 | 877 | ||
844 | /* Move this back to NPR state */ | 878 | /* Move this back to NPR state */ |
@@ -912,7 +946,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
912 | irsp->un.elsreq64.remoteID); | 946 | irsp->un.elsreq64.remoteID); |
913 | 947 | ||
914 | ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); | 948 | ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); |
915 | if (!ndlp) { | 949 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
916 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 950 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
917 | "0136 PLOGI completes to NPort x%x " | 951 | "0136 PLOGI completes to NPort x%x " |
918 | "with no ndlp. Data: x%x x%x x%x\n", | 952 | "with no ndlp. Data: x%x x%x x%x\n", |
@@ -962,12 +996,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
962 | } | 996 | } |
963 | /* PLOGI failed */ | 997 | /* PLOGI failed */ |
964 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 998 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
965 | if (lpfc_error_lost_link(irsp)) { | 999 | if (lpfc_error_lost_link(irsp)) |
966 | rc = NLP_STE_FREED_NODE; | 1000 | rc = NLP_STE_FREED_NODE; |
967 | } else { | 1001 | else |
968 | rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1002 | rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
969 | NLP_EVT_CMPL_PLOGI); | 1003 | NLP_EVT_CMPL_PLOGI); |
970 | } | ||
971 | } else { | 1004 | } else { |
972 | /* Good status, call state machine */ | 1005 | /* Good status, call state machine */ |
973 | prsp = list_entry(((struct lpfc_dmabuf *) | 1006 | prsp = list_entry(((struct lpfc_dmabuf *) |
@@ -1015,8 +1048,10 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1015 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1048 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1016 | 1049 | ||
1017 | ndlp = lpfc_findnode_did(vport, did); | 1050 | ndlp = lpfc_findnode_did(vport, did); |
1018 | /* If ndlp if not NULL, we will bump the reference count on it */ | 1051 | if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) |
1052 | ndlp = NULL; | ||
1019 | 1053 | ||
1054 | /* If ndlp is not NULL, we will bump the reference count on it */ | ||
1020 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); | 1055 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); |
1021 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, | 1056 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, |
1022 | ELS_CMD_PLOGI); | 1057 | ELS_CMD_PLOGI); |
@@ -1097,18 +1132,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1097 | } | 1132 | } |
1098 | /* PRLI failed */ | 1133 | /* PRLI failed */ |
1099 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 1134 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
1100 | if (lpfc_error_lost_link(irsp)) { | 1135 | if (lpfc_error_lost_link(irsp)) |
1101 | goto out; | 1136 | goto out; |
1102 | } else { | 1137 | else |
1103 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1138 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1104 | NLP_EVT_CMPL_PRLI); | 1139 | NLP_EVT_CMPL_PRLI); |
1105 | } | 1140 | } else |
1106 | } else { | ||
1107 | /* Good status, call state machine */ | 1141 | /* Good status, call state machine */ |
1108 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1142 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1109 | NLP_EVT_CMPL_PRLI); | 1143 | NLP_EVT_CMPL_PRLI); |
1110 | } | ||
1111 | |||
1112 | out: | 1144 | out: |
1113 | lpfc_els_free_iocb(phba, cmdiocb); | 1145 | lpfc_els_free_iocb(phba, cmdiocb); |
1114 | return; | 1146 | return; |
@@ -1275,15 +1307,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1275 | } | 1307 | } |
1276 | /* ADISC failed */ | 1308 | /* ADISC failed */ |
1277 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 1309 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
1278 | if (!lpfc_error_lost_link(irsp)) { | 1310 | if (!lpfc_error_lost_link(irsp)) |
1279 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1311 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1280 | NLP_EVT_CMPL_ADISC); | 1312 | NLP_EVT_CMPL_ADISC); |
1281 | } | 1313 | } else |
1282 | } else { | ||
1283 | /* Good status, call state machine */ | 1314 | /* Good status, call state machine */ |
1284 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1315 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1285 | NLP_EVT_CMPL_ADISC); | 1316 | NLP_EVT_CMPL_ADISC); |
1286 | } | ||
1287 | 1317 | ||
1288 | if (disc && vport->num_disc_nodes) { | 1318 | if (disc && vport->num_disc_nodes) { |
1289 | /* Check to see if there are more ADISCs to be sent */ | 1319 | /* Check to see if there are more ADISCs to be sent */ |
@@ -1443,14 +1473,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1443 | else | 1473 | else |
1444 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1474 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1445 | NLP_EVT_CMPL_LOGO); | 1475 | NLP_EVT_CMPL_LOGO); |
1446 | } else { | 1476 | } else |
1447 | /* Good status, call state machine. | 1477 | /* Good status, call state machine. |
1448 | * This will unregister the rpi if needed. | 1478 | * This will unregister the rpi if needed. |
1449 | */ | 1479 | */ |
1450 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1480 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1451 | NLP_EVT_CMPL_LOGO); | 1481 | NLP_EVT_CMPL_LOGO); |
1452 | } | ||
1453 | |||
1454 | out: | 1482 | out: |
1455 | lpfc_els_free_iocb(phba, cmdiocb); | 1483 | lpfc_els_free_iocb(phba, cmdiocb); |
1456 | return; | 1484 | return; |
@@ -1556,11 +1584,19 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1556 | psli = &phba->sli; | 1584 | psli = &phba->sli; |
1557 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1585 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1558 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); | 1586 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); |
1559 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1560 | if (!ndlp) | ||
1561 | return 1; | ||
1562 | 1587 | ||
1563 | lpfc_nlp_init(vport, ndlp, nportid); | 1588 | ndlp = lpfc_findnode_did(vport, nportid); |
1589 | if (!ndlp) { | ||
1590 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1591 | if (!ndlp) | ||
1592 | return 1; | ||
1593 | lpfc_nlp_init(vport, ndlp, nportid); | ||
1594 | lpfc_enqueue_node(vport, ndlp); | ||
1595 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1596 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
1597 | if (!ndlp) | ||
1598 | return 1; | ||
1599 | } | ||
1564 | 1600 | ||
1565 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, | 1601 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
1566 | ndlp->nlp_DID, ELS_CMD_SCR); | 1602 | ndlp->nlp_DID, ELS_CMD_SCR); |
@@ -1623,11 +1659,19 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1623 | psli = &phba->sli; | 1659 | psli = &phba->sli; |
1624 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1660 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1625 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); | 1661 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); |
1626 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1627 | if (!ndlp) | ||
1628 | return 1; | ||
1629 | 1662 | ||
1630 | lpfc_nlp_init(vport, ndlp, nportid); | 1663 | ndlp = lpfc_findnode_did(vport, nportid); |
1664 | if (!ndlp) { | ||
1665 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1666 | if (!ndlp) | ||
1667 | return 1; | ||
1668 | lpfc_nlp_init(vport, ndlp, nportid); | ||
1669 | lpfc_enqueue_node(vport, ndlp); | ||
1670 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1671 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
1672 | if (!ndlp) | ||
1673 | return 1; | ||
1674 | } | ||
1631 | 1675 | ||
1632 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, | 1676 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
1633 | ndlp->nlp_DID, ELS_CMD_RNID); | 1677 | ndlp->nlp_DID, ELS_CMD_RNID); |
@@ -1657,7 +1701,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1657 | memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); | 1701 | memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); |
1658 | memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); | 1702 | memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); |
1659 | ondlp = lpfc_findnode_did(vport, nportid); | 1703 | ondlp = lpfc_findnode_did(vport, nportid); |
1660 | if (ondlp) { | 1704 | if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { |
1661 | memcpy(&fp->OportName, &ondlp->nlp_portname, | 1705 | memcpy(&fp->OportName, &ondlp->nlp_portname, |
1662 | sizeof(struct lpfc_name)); | 1706 | sizeof(struct lpfc_name)); |
1663 | memcpy(&fp->OnodeName, &ondlp->nlp_nodename, | 1707 | memcpy(&fp->OnodeName, &ondlp->nlp_nodename, |
@@ -1690,6 +1734,7 @@ void | |||
1690 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | 1734 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) |
1691 | { | 1735 | { |
1692 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 1736 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1737 | struct lpfc_work_evt *evtp; | ||
1693 | 1738 | ||
1694 | spin_lock_irq(shost->host_lock); | 1739 | spin_lock_irq(shost->host_lock); |
1695 | nlp->nlp_flag &= ~NLP_DELAY_TMO; | 1740 | nlp->nlp_flag &= ~NLP_DELAY_TMO; |
@@ -1697,8 +1742,12 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | |||
1697 | del_timer_sync(&nlp->nlp_delayfunc); | 1742 | del_timer_sync(&nlp->nlp_delayfunc); |
1698 | nlp->nlp_last_elscmd = 0; | 1743 | nlp->nlp_last_elscmd = 0; |
1699 | 1744 | ||
1700 | if (!list_empty(&nlp->els_retry_evt.evt_listp)) | 1745 | if (!list_empty(&nlp->els_retry_evt.evt_listp)) { |
1701 | list_del_init(&nlp->els_retry_evt.evt_listp); | 1746 | list_del_init(&nlp->els_retry_evt.evt_listp); |
1747 | /* Decrement nlp reference count held for the delayed retry */ | ||
1748 | evtp = &nlp->els_retry_evt; | ||
1749 | lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); | ||
1750 | } | ||
1702 | 1751 | ||
1703 | if (nlp->nlp_flag & NLP_NPR_2B_DISC) { | 1752 | if (nlp->nlp_flag & NLP_NPR_2B_DISC) { |
1704 | spin_lock_irq(shost->host_lock); | 1753 | spin_lock_irq(shost->host_lock); |
@@ -1842,13 +1891,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1842 | cmd = *elscmd++; | 1891 | cmd = *elscmd++; |
1843 | } | 1892 | } |
1844 | 1893 | ||
1845 | if (ndlp) | 1894 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
1846 | did = ndlp->nlp_DID; | 1895 | did = ndlp->nlp_DID; |
1847 | else { | 1896 | else { |
1848 | /* We should only hit this case for retrying PLOGI */ | 1897 | /* We should only hit this case for retrying PLOGI */ |
1849 | did = irsp->un.elsreq64.remoteID; | 1898 | did = irsp->un.elsreq64.remoteID; |
1850 | ndlp = lpfc_findnode_did(vport, did); | 1899 | ndlp = lpfc_findnode_did(vport, did); |
1851 | if (!ndlp && (cmd != ELS_CMD_PLOGI)) | 1900 | if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) |
1901 | && (cmd != ELS_CMD_PLOGI)) | ||
1852 | return 1; | 1902 | return 1; |
1853 | } | 1903 | } |
1854 | 1904 | ||
@@ -1870,18 +1920,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1870 | break; | 1920 | break; |
1871 | 1921 | ||
1872 | case IOERR_ILLEGAL_COMMAND: | 1922 | case IOERR_ILLEGAL_COMMAND: |
1873 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && | 1923 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
1874 | (cmd == ELS_CMD_FDISC)) { | 1924 | "0124 Retry illegal cmd x%x " |
1875 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 1925 | "retry:x%x delay:x%x\n", |
1876 | "0124 FDISC failed (3/6) " | 1926 | cmd, cmdiocb->retry, delay); |
1877 | "retrying...\n"); | 1927 | retry = 1; |
1878 | lpfc_mbx_unreg_vpi(vport); | 1928 | /* All command's retry policy */ |
1879 | retry = 1; | 1929 | maxretry = 8; |
1880 | /* FDISC retry policy */ | 1930 | if (cmdiocb->retry > 2) |
1881 | maxretry = 48; | 1931 | delay = 1000; |
1882 | if (cmdiocb->retry >= 32) | ||
1883 | delay = 1000; | ||
1884 | } | ||
1885 | break; | 1932 | break; |
1886 | 1933 | ||
1887 | case IOERR_NO_RESOURCES: | 1934 | case IOERR_NO_RESOURCES: |
@@ -1967,6 +2014,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1967 | break; | 2014 | break; |
1968 | 2015 | ||
1969 | case LSRJT_LOGICAL_ERR: | 2016 | case LSRJT_LOGICAL_ERR: |
2017 | /* There are some cases where switches return this | ||
2018 | * error when they are not ready and should be returning | ||
2019 | * Logical Busy. We should delay every time. | ||
2020 | */ | ||
2021 | if (cmd == ELS_CMD_FDISC && | ||
2022 | stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { | ||
2023 | maxretry = 3; | ||
2024 | delay = 1000; | ||
2025 | retry = 1; | ||
2026 | break; | ||
2027 | } | ||
1970 | case LSRJT_PROTOCOL_ERR: | 2028 | case LSRJT_PROTOCOL_ERR: |
1971 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2029 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
1972 | (cmd == ELS_CMD_FDISC) && | 2030 | (cmd == ELS_CMD_FDISC) && |
@@ -1996,7 +2054,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1996 | retry = 1; | 2054 | retry = 1; |
1997 | 2055 | ||
1998 | if ((cmd == ELS_CMD_FLOGI) && | 2056 | if ((cmd == ELS_CMD_FLOGI) && |
1999 | (phba->fc_topology != TOPOLOGY_LOOP)) { | 2057 | (phba->fc_topology != TOPOLOGY_LOOP) && |
2058 | !lpfc_error_lost_link(irsp)) { | ||
2000 | /* FLOGI retry policy */ | 2059 | /* FLOGI retry policy */ |
2001 | retry = 1; | 2060 | retry = 1; |
2002 | maxretry = 48; | 2061 | maxretry = 48; |
@@ -2322,6 +2381,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2322 | if ((rspiocb->iocb.ulpStatus == 0) | 2381 | if ((rspiocb->iocb.ulpStatus == 0) |
2323 | && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { | 2382 | && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { |
2324 | lpfc_unreg_rpi(vport, ndlp); | 2383 | lpfc_unreg_rpi(vport, ndlp); |
2384 | /* Increment reference count to ndlp to hold the | ||
2385 | * reference to ndlp for the callback function. | ||
2386 | */ | ||
2325 | mbox->context2 = lpfc_nlp_get(ndlp); | 2387 | mbox->context2 = lpfc_nlp_get(ndlp); |
2326 | mbox->vport = vport; | 2388 | mbox->vport = vport; |
2327 | if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { | 2389 | if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { |
@@ -2335,9 +2397,13 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2335 | NLP_STE_REG_LOGIN_ISSUE); | 2397 | NLP_STE_REG_LOGIN_ISSUE); |
2336 | } | 2398 | } |
2337 | if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) | 2399 | if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) |
2338 | != MBX_NOT_FINISHED) { | 2400 | != MBX_NOT_FINISHED) |
2339 | goto out; | 2401 | goto out; |
2340 | } | 2402 | else |
2403 | /* Decrement the ndlp reference count we | ||
2404 | * set for this failed mailbox command. | ||
2405 | */ | ||
2406 | lpfc_nlp_put(ndlp); | ||
2341 | 2407 | ||
2342 | /* ELS rsp: Cannot issue reg_login for <NPortid> */ | 2408 | /* ELS rsp: Cannot issue reg_login for <NPortid> */ |
2343 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 2409 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
@@ -2796,6 +2862,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) | |||
2796 | 2862 | ||
2797 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ | 2863 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ |
2798 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 2864 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
2865 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2866 | continue; | ||
2799 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && | 2867 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && |
2800 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && | 2868 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && |
2801 | (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { | 2869 | (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { |
@@ -2833,6 +2901,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) | |||
2833 | 2901 | ||
2834 | /* go thru NPR nodes and issue any remaining ELS PLOGIs */ | 2902 | /* go thru NPR nodes and issue any remaining ELS PLOGIs */ |
2835 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 2903 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
2904 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2905 | continue; | ||
2836 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && | 2906 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && |
2837 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && | 2907 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && |
2838 | (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && | 2908 | (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && |
@@ -2869,6 +2939,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) | |||
2869 | struct lpfc_hba *phba = vport->phba; | 2939 | struct lpfc_hba *phba = vport->phba; |
2870 | int i; | 2940 | int i; |
2871 | 2941 | ||
2942 | spin_lock_irq(shost->host_lock); | ||
2943 | if (vport->fc_rscn_flush) { | ||
2944 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
2945 | spin_unlock_irq(shost->host_lock); | ||
2946 | return; | ||
2947 | } | ||
2948 | /* Indicate we are walking lpfc_els_flush_rscn on this vport */ | ||
2949 | vport->fc_rscn_flush = 1; | ||
2950 | spin_unlock_irq(shost->host_lock); | ||
2951 | |||
2872 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { | 2952 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { |
2873 | lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); | 2953 | lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); |
2874 | vport->fc_rscn_id_list[i] = NULL; | 2954 | vport->fc_rscn_id_list[i] = NULL; |
@@ -2878,6 +2958,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) | |||
2878 | vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); | 2958 | vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); |
2879 | spin_unlock_irq(shost->host_lock); | 2959 | spin_unlock_irq(shost->host_lock); |
2880 | lpfc_can_disctmo(vport); | 2960 | lpfc_can_disctmo(vport); |
2961 | /* Indicate we are done walking this fc_rscn_id_list */ | ||
2962 | vport->fc_rscn_flush = 0; | ||
2881 | } | 2963 | } |
2882 | 2964 | ||
2883 | int | 2965 | int |
@@ -2887,6 +2969,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2887 | D_ID rscn_did; | 2969 | D_ID rscn_did; |
2888 | uint32_t *lp; | 2970 | uint32_t *lp; |
2889 | uint32_t payload_len, i; | 2971 | uint32_t payload_len, i; |
2972 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2890 | 2973 | ||
2891 | ns_did.un.word = did; | 2974 | ns_did.un.word = did; |
2892 | 2975 | ||
@@ -2898,6 +2981,15 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2898 | if (vport->fc_flag & FC_RSCN_DISCOVERY) | 2981 | if (vport->fc_flag & FC_RSCN_DISCOVERY) |
2899 | return did; | 2982 | return did; |
2900 | 2983 | ||
2984 | spin_lock_irq(shost->host_lock); | ||
2985 | if (vport->fc_rscn_flush) { | ||
2986 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
2987 | spin_unlock_irq(shost->host_lock); | ||
2988 | return 0; | ||
2989 | } | ||
2990 | /* Indicate we are walking fc_rscn_id_list on this vport */ | ||
2991 | vport->fc_rscn_flush = 1; | ||
2992 | spin_unlock_irq(shost->host_lock); | ||
2901 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { | 2993 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { |
2902 | lp = vport->fc_rscn_id_list[i]->virt; | 2994 | lp = vport->fc_rscn_id_list[i]->virt; |
2903 | payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); | 2995 | payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); |
@@ -2908,16 +3000,16 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2908 | switch (rscn_did.un.b.resv) { | 3000 | switch (rscn_did.un.b.resv) { |
2909 | case 0: /* Single N_Port ID effected */ | 3001 | case 0: /* Single N_Port ID effected */ |
2910 | if (ns_did.un.word == rscn_did.un.word) | 3002 | if (ns_did.un.word == rscn_did.un.word) |
2911 | return did; | 3003 | goto return_did_out; |
2912 | break; | 3004 | break; |
2913 | case 1: /* Whole N_Port Area effected */ | 3005 | case 1: /* Whole N_Port Area effected */ |
2914 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) | 3006 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) |
2915 | && (ns_did.un.b.area == rscn_did.un.b.area)) | 3007 | && (ns_did.un.b.area == rscn_did.un.b.area)) |
2916 | return did; | 3008 | goto return_did_out; |
2917 | break; | 3009 | break; |
2918 | case 2: /* Whole N_Port Domain effected */ | 3010 | case 2: /* Whole N_Port Domain effected */ |
2919 | if (ns_did.un.b.domain == rscn_did.un.b.domain) | 3011 | if (ns_did.un.b.domain == rscn_did.un.b.domain) |
2920 | return did; | 3012 | goto return_did_out; |
2921 | break; | 3013 | break; |
2922 | default: | 3014 | default: |
2923 | /* Unknown Identifier in RSCN node */ | 3015 | /* Unknown Identifier in RSCN node */ |
@@ -2926,11 +3018,17 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2926 | "RSCN payload Data: x%x\n", | 3018 | "RSCN payload Data: x%x\n", |
2927 | rscn_did.un.word); | 3019 | rscn_did.un.word); |
2928 | case 3: /* Whole Fabric effected */ | 3020 | case 3: /* Whole Fabric effected */ |
2929 | return did; | 3021 | goto return_did_out; |
2930 | } | 3022 | } |
2931 | } | 3023 | } |
2932 | } | 3024 | } |
3025 | /* Indicate we are done with walking fc_rscn_id_list on this vport */ | ||
3026 | vport->fc_rscn_flush = 0; | ||
2933 | return 0; | 3027 | return 0; |
3028 | return_did_out: | ||
3029 | /* Indicate we are done with walking fc_rscn_id_list on this vport */ | ||
3030 | vport->fc_rscn_flush = 0; | ||
3031 | return did; | ||
2934 | } | 3032 | } |
2935 | 3033 | ||
2936 | static int | 3034 | static int |
@@ -2943,7 +3041,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |||
2943 | */ | 3041 | */ |
2944 | 3042 | ||
2945 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 3043 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
2946 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || | 3044 | if (!NLP_CHK_NODE_ACT(ndlp) || |
3045 | ndlp->nlp_state == NLP_STE_UNUSED_NODE || | ||
2947 | lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) | 3046 | lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) |
2948 | continue; | 3047 | continue; |
2949 | 3048 | ||
@@ -2971,7 +3070,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
2971 | uint32_t *lp, *datap; | 3070 | uint32_t *lp, *datap; |
2972 | IOCB_t *icmd; | 3071 | IOCB_t *icmd; |
2973 | uint32_t payload_len, length, nportid, *cmd; | 3072 | uint32_t payload_len, length, nportid, *cmd; |
2974 | int rscn_cnt = vport->fc_rscn_id_cnt; | 3073 | int rscn_cnt; |
2975 | int rscn_id = 0, hba_id = 0; | 3074 | int rscn_id = 0, hba_id = 0; |
2976 | int i; | 3075 | int i; |
2977 | 3076 | ||
@@ -2984,7 +3083,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
2984 | /* RSCN received */ | 3083 | /* RSCN received */ |
2985 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 3084 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
2986 | "0214 RSCN received Data: x%x x%x x%x x%x\n", | 3085 | "0214 RSCN received Data: x%x x%x x%x x%x\n", |
2987 | vport->fc_flag, payload_len, *lp, rscn_cnt); | 3086 | vport->fc_flag, payload_len, *lp, |
3087 | vport->fc_rscn_id_cnt); | ||
2988 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) | 3088 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) |
2989 | fc_host_post_event(shost, fc_get_event_number(), | 3089 | fc_host_post_event(shost, fc_get_event_number(), |
2990 | FCH_EVT_RSCN, lp[i]); | 3090 | FCH_EVT_RSCN, lp[i]); |
@@ -3022,7 +3122,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3022 | "0214 Ignore RSCN " | 3122 | "0214 Ignore RSCN " |
3023 | "Data: x%x x%x x%x x%x\n", | 3123 | "Data: x%x x%x x%x x%x\n", |
3024 | vport->fc_flag, payload_len, | 3124 | vport->fc_flag, payload_len, |
3025 | *lp, rscn_cnt); | 3125 | *lp, vport->fc_rscn_id_cnt); |
3026 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 3126 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
3027 | "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", | 3127 | "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", |
3028 | ndlp->nlp_DID, vport->port_state, | 3128 | ndlp->nlp_DID, vport->port_state, |
@@ -3034,6 +3134,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3034 | } | 3134 | } |
3035 | } | 3135 | } |
3036 | 3136 | ||
3137 | spin_lock_irq(shost->host_lock); | ||
3138 | if (vport->fc_rscn_flush) { | ||
3139 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
3140 | spin_unlock_irq(shost->host_lock); | ||
3141 | vport->fc_flag |= FC_RSCN_DISCOVERY; | ||
3142 | return 0; | ||
3143 | } | ||
3144 | /* Indicate we are walking fc_rscn_id_list on this vport */ | ||
3145 | vport->fc_rscn_flush = 1; | ||
3146 | spin_unlock_irq(shost->host_lock); | ||
3147 | /* Get the array count after sucessfully have the token */ | ||
3148 | rscn_cnt = vport->fc_rscn_id_cnt; | ||
3037 | /* If we are already processing an RSCN, save the received | 3149 | /* If we are already processing an RSCN, save the received |
3038 | * RSCN payload buffer, cmdiocb->context2 to process later. | 3150 | * RSCN payload buffer, cmdiocb->context2 to process later. |
3039 | */ | 3151 | */ |
@@ -3055,7 +3167,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3055 | if ((rscn_cnt) && | 3167 | if ((rscn_cnt) && |
3056 | (payload_len + length <= LPFC_BPL_SIZE)) { | 3168 | (payload_len + length <= LPFC_BPL_SIZE)) { |
3057 | *cmd &= ELS_CMD_MASK; | 3169 | *cmd &= ELS_CMD_MASK; |
3058 | *cmd |= be32_to_cpu(payload_len + length); | 3170 | *cmd |= cpu_to_be32(payload_len + length); |
3059 | memcpy(((uint8_t *)cmd) + length, lp, | 3171 | memcpy(((uint8_t *)cmd) + length, lp, |
3060 | payload_len); | 3172 | payload_len); |
3061 | } else { | 3173 | } else { |
@@ -3066,7 +3178,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3066 | */ | 3178 | */ |
3067 | cmdiocb->context2 = NULL; | 3179 | cmdiocb->context2 = NULL; |
3068 | } | 3180 | } |
3069 | |||
3070 | /* Deferred RSCN */ | 3181 | /* Deferred RSCN */ |
3071 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 3182 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
3072 | "0235 Deferred RSCN " | 3183 | "0235 Deferred RSCN " |
@@ -3083,9 +3194,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3083 | vport->fc_rscn_id_cnt, vport->fc_flag, | 3194 | vport->fc_rscn_id_cnt, vport->fc_flag, |
3084 | vport->port_state); | 3195 | vport->port_state); |
3085 | } | 3196 | } |
3197 | /* Indicate we are done walking fc_rscn_id_list on this vport */ | ||
3198 | vport->fc_rscn_flush = 0; | ||
3086 | /* Send back ACC */ | 3199 | /* Send back ACC */ |
3087 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 3200 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
3088 | |||
3089 | /* send RECOVERY event for ALL nodes that match RSCN payload */ | 3201 | /* send RECOVERY event for ALL nodes that match RSCN payload */ |
3090 | lpfc_rscn_recovery_check(vport); | 3202 | lpfc_rscn_recovery_check(vport); |
3091 | spin_lock_irq(shost->host_lock); | 3203 | spin_lock_irq(shost->host_lock); |
@@ -3093,7 +3205,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3093 | spin_unlock_irq(shost->host_lock); | 3205 | spin_unlock_irq(shost->host_lock); |
3094 | return 0; | 3206 | return 0; |
3095 | } | 3207 | } |
3096 | |||
3097 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 3208 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
3098 | "RCV RSCN: did:x%x/ste:x%x flg:x%x", | 3209 | "RCV RSCN: did:x%x/ste:x%x flg:x%x", |
3099 | ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); | 3210 | ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); |
@@ -3102,20 +3213,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3102 | vport->fc_flag |= FC_RSCN_MODE; | 3213 | vport->fc_flag |= FC_RSCN_MODE; |
3103 | spin_unlock_irq(shost->host_lock); | 3214 | spin_unlock_irq(shost->host_lock); |
3104 | vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; | 3215 | vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; |
3216 | /* Indicate we are done walking fc_rscn_id_list on this vport */ | ||
3217 | vport->fc_rscn_flush = 0; | ||
3105 | /* | 3218 | /* |
3106 | * If we zero, cmdiocb->context2, the calling routine will | 3219 | * If we zero, cmdiocb->context2, the calling routine will |
3107 | * not try to free it. | 3220 | * not try to free it. |
3108 | */ | 3221 | */ |
3109 | cmdiocb->context2 = NULL; | 3222 | cmdiocb->context2 = NULL; |
3110 | |||
3111 | lpfc_set_disctmo(vport); | 3223 | lpfc_set_disctmo(vport); |
3112 | |||
3113 | /* Send back ACC */ | 3224 | /* Send back ACC */ |
3114 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 3225 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
3115 | |||
3116 | /* send RECOVERY event for ALL nodes that match RSCN payload */ | 3226 | /* send RECOVERY event for ALL nodes that match RSCN payload */ |
3117 | lpfc_rscn_recovery_check(vport); | 3227 | lpfc_rscn_recovery_check(vport); |
3118 | |||
3119 | return lpfc_els_handle_rscn(vport); | 3228 | return lpfc_els_handle_rscn(vport); |
3120 | } | 3229 | } |
3121 | 3230 | ||
@@ -3145,7 +3254,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
3145 | vport->num_disc_nodes = 0; | 3254 | vport->num_disc_nodes = 0; |
3146 | 3255 | ||
3147 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 3256 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
3148 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 3257 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
3258 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
3149 | /* Good ndlp, issue CT Request to NameServer */ | 3259 | /* Good ndlp, issue CT Request to NameServer */ |
3150 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) | 3260 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) |
3151 | /* Wait for NameServer query cmpl before we can | 3261 | /* Wait for NameServer query cmpl before we can |
@@ -3155,25 +3265,35 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
3155 | /* If login to NameServer does not exist, issue one */ | 3265 | /* If login to NameServer does not exist, issue one */ |
3156 | /* Good status, issue PLOGI to NameServer */ | 3266 | /* Good status, issue PLOGI to NameServer */ |
3157 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 3267 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
3158 | if (ndlp) | 3268 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
3159 | /* Wait for NameServer login cmpl before we can | 3269 | /* Wait for NameServer login cmpl before we can |
3160 | continue */ | 3270 | continue */ |
3161 | return 1; | 3271 | return 1; |
3162 | 3272 | ||
3163 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 3273 | if (ndlp) { |
3164 | if (!ndlp) { | 3274 | ndlp = lpfc_enable_node(vport, ndlp, |
3165 | lpfc_els_flush_rscn(vport); | 3275 | NLP_STE_PLOGI_ISSUE); |
3166 | return 0; | 3276 | if (!ndlp) { |
3277 | lpfc_els_flush_rscn(vport); | ||
3278 | return 0; | ||
3279 | } | ||
3280 | ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; | ||
3167 | } else { | 3281 | } else { |
3282 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
3283 | if (!ndlp) { | ||
3284 | lpfc_els_flush_rscn(vport); | ||
3285 | return 0; | ||
3286 | } | ||
3168 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | 3287 | lpfc_nlp_init(vport, ndlp, NameServer_DID); |
3169 | ndlp->nlp_type |= NLP_FABRIC; | ||
3170 | ndlp->nlp_prev_state = ndlp->nlp_state; | 3288 | ndlp->nlp_prev_state = ndlp->nlp_state; |
3171 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 3289 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
3172 | lpfc_issue_els_plogi(vport, NameServer_DID, 0); | ||
3173 | /* Wait for NameServer login cmpl before we can | ||
3174 | continue */ | ||
3175 | return 1; | ||
3176 | } | 3290 | } |
3291 | ndlp->nlp_type |= NLP_FABRIC; | ||
3292 | lpfc_issue_els_plogi(vport, NameServer_DID, 0); | ||
3293 | /* Wait for NameServer login cmpl before we can | ||
3294 | * continue | ||
3295 | */ | ||
3296 | return 1; | ||
3177 | } | 3297 | } |
3178 | 3298 | ||
3179 | lpfc_els_flush_rscn(vport); | 3299 | lpfc_els_flush_rscn(vport); |
@@ -3672,6 +3792,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3672 | 3792 | ||
3673 | list_for_each_entry_safe(ndlp, next_ndlp, | 3793 | list_for_each_entry_safe(ndlp, next_ndlp, |
3674 | &vport->fc_nodes, nlp_listp) { | 3794 | &vport->fc_nodes, nlp_listp) { |
3795 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
3796 | continue; | ||
3675 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 3797 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
3676 | continue; | 3798 | continue; |
3677 | if (ndlp->nlp_type & NLP_FABRIC) { | 3799 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -3697,6 +3819,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3697 | */ | 3819 | */ |
3698 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 3820 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
3699 | nlp_listp) { | 3821 | nlp_listp) { |
3822 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
3823 | continue; | ||
3700 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 3824 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
3701 | continue; | 3825 | continue; |
3702 | 3826 | ||
@@ -3936,7 +4060,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3936 | uint32_t cmd, did, newnode, rjt_err = 0; | 4060 | uint32_t cmd, did, newnode, rjt_err = 0; |
3937 | IOCB_t *icmd = &elsiocb->iocb; | 4061 | IOCB_t *icmd = &elsiocb->iocb; |
3938 | 4062 | ||
3939 | if (vport == NULL || elsiocb->context2 == NULL) | 4063 | if (!vport || !(elsiocb->context2)) |
3940 | goto dropit; | 4064 | goto dropit; |
3941 | 4065 | ||
3942 | newnode = 0; | 4066 | newnode = 0; |
@@ -3971,14 +4095,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3971 | lpfc_nlp_init(vport, ndlp, did); | 4095 | lpfc_nlp_init(vport, ndlp, did); |
3972 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 4096 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
3973 | newnode = 1; | 4097 | newnode = 1; |
3974 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { | 4098 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) |
3975 | ndlp->nlp_type |= NLP_FABRIC; | 4099 | ndlp->nlp_type |= NLP_FABRIC; |
4100 | } else { | ||
4101 | if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
4102 | ndlp = lpfc_enable_node(vport, ndlp, | ||
4103 | NLP_STE_UNUSED_NODE); | ||
4104 | if (!ndlp) | ||
4105 | goto dropit; | ||
3976 | } | 4106 | } |
3977 | } | ||
3978 | else { | ||
3979 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { | 4107 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { |
3980 | /* This is simular to the new node path */ | 4108 | /* This is simular to the new node path */ |
3981 | lpfc_nlp_get(ndlp); | 4109 | ndlp = lpfc_nlp_get(ndlp); |
4110 | if (!ndlp) | ||
4111 | goto dropit; | ||
3982 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 4112 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
3983 | newnode = 1; | 4113 | newnode = 1; |
3984 | } | 4114 | } |
@@ -3987,6 +4117,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3987 | phba->fc_stat.elsRcvFrame++; | 4117 | phba->fc_stat.elsRcvFrame++; |
3988 | if (elsiocb->context1) | 4118 | if (elsiocb->context1) |
3989 | lpfc_nlp_put(elsiocb->context1); | 4119 | lpfc_nlp_put(elsiocb->context1); |
4120 | |||
3990 | elsiocb->context1 = lpfc_nlp_get(ndlp); | 4121 | elsiocb->context1 = lpfc_nlp_get(ndlp); |
3991 | elsiocb->vport = vport; | 4122 | elsiocb->vport = vport; |
3992 | 4123 | ||
@@ -4007,8 +4138,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4007 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); | 4138 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); |
4008 | 4139 | ||
4009 | if (vport->port_state < LPFC_DISC_AUTH) { | 4140 | if (vport->port_state < LPFC_DISC_AUTH) { |
4010 | rjt_err = LSRJT_UNABLE_TPC; | 4141 | if (!(phba->pport->fc_flag & FC_PT2PT) || |
4011 | break; | 4142 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { |
4143 | rjt_err = LSRJT_UNABLE_TPC; | ||
4144 | break; | ||
4145 | } | ||
4146 | /* We get here, and drop thru, if we are PT2PT with | ||
4147 | * another NPort and the other side has initiated | ||
4148 | * the PLOGI before responding to our FLOGI. | ||
4149 | */ | ||
4012 | } | 4150 | } |
4013 | 4151 | ||
4014 | shost = lpfc_shost_from_vport(vport); | 4152 | shost = lpfc_shost_from_vport(vport); |
@@ -4251,15 +4389,15 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4251 | vport = lpfc_find_vport_by_vpid(phba, vpi); | 4389 | vport = lpfc_find_vport_by_vpid(phba, vpi); |
4252 | } | 4390 | } |
4253 | } | 4391 | } |
4254 | /* If there are no BDEs associated | 4392 | /* If there are no BDEs associated |
4255 | * with this IOCB, there is nothing to do. | 4393 | * with this IOCB, there is nothing to do. |
4256 | */ | 4394 | */ |
4257 | if (icmd->ulpBdeCount == 0) | 4395 | if (icmd->ulpBdeCount == 0) |
4258 | return; | 4396 | return; |
4259 | 4397 | ||
4260 | /* type of ELS cmd is first 32bit word | 4398 | /* type of ELS cmd is first 32bit word |
4261 | * in packet | 4399 | * in packet |
4262 | */ | 4400 | */ |
4263 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 4401 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
4264 | elsiocb->context2 = bdeBuf1; | 4402 | elsiocb->context2 = bdeBuf1; |
4265 | } else { | 4403 | } else { |
@@ -4314,6 +4452,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4314 | } | 4452 | } |
4315 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | 4453 | lpfc_nlp_init(vport, ndlp, NameServer_DID); |
4316 | ndlp->nlp_type |= NLP_FABRIC; | 4454 | ndlp->nlp_type |= NLP_FABRIC; |
4455 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
4456 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
4457 | if (!ndlp) { | ||
4458 | if (phba->fc_topology == TOPOLOGY_LOOP) { | ||
4459 | lpfc_disc_start(vport); | ||
4460 | return; | ||
4461 | } | ||
4462 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
4463 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
4464 | "0348 NameServer login: node freed\n"); | ||
4465 | return; | ||
4466 | } | ||
4317 | } | 4467 | } |
4318 | 4468 | ||
4319 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 4469 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
@@ -4360,6 +4510,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4360 | switch (mb->mbxStatus) { | 4510 | switch (mb->mbxStatus) { |
4361 | case 0x11: /* unsupported feature */ | 4511 | case 0x11: /* unsupported feature */ |
4362 | case 0x9603: /* max_vpi exceeded */ | 4512 | case 0x9603: /* max_vpi exceeded */ |
4513 | case 0x9602: /* Link event since CLEAR_LA */ | ||
4363 | /* giving up on vport registration */ | 4514 | /* giving up on vport registration */ |
4364 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 4515 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
4365 | spin_lock_irq(shost->host_lock); | 4516 | spin_lock_irq(shost->host_lock); |
@@ -4373,7 +4524,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4373 | spin_lock_irq(shost->host_lock); | 4524 | spin_lock_irq(shost->host_lock); |
4374 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 4525 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
4375 | spin_unlock_irq(shost->host_lock); | 4526 | spin_unlock_irq(shost->host_lock); |
4376 | lpfc_initial_fdisc(vport); | 4527 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
4528 | lpfc_initial_flogi(vport); | ||
4529 | else | ||
4530 | lpfc_initial_fdisc(vport); | ||
4377 | break; | 4531 | break; |
4378 | } | 4532 | } |
4379 | 4533 | ||
@@ -4471,7 +4625,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4471 | irsp->ulpStatus, irsp->un.ulpWord[4]); | 4625 | irsp->ulpStatus, irsp->un.ulpWord[4]); |
4472 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) | 4626 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) |
4473 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 4627 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
4474 | |||
4475 | lpfc_nlp_put(ndlp); | 4628 | lpfc_nlp_put(ndlp); |
4476 | /* giving up on FDISC. Cancel discovery timer */ | 4629 | /* giving up on FDISC. Cancel discovery timer */ |
4477 | lpfc_can_disctmo(vport); | 4630 | lpfc_can_disctmo(vport); |
@@ -4492,8 +4645,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4492 | */ | 4645 | */ |
4493 | list_for_each_entry_safe(np, next_np, | 4646 | list_for_each_entry_safe(np, next_np, |
4494 | &vport->fc_nodes, nlp_listp) { | 4647 | &vport->fc_nodes, nlp_listp) { |
4495 | if (np->nlp_state != NLP_STE_NPR_NODE | 4648 | if (!NLP_CHK_NODE_ACT(ndlp) || |
4496 | || !(np->nlp_flag & NLP_NPR_ADISC)) | 4649 | (np->nlp_state != NLP_STE_NPR_NODE) || |
4650 | !(np->nlp_flag & NLP_NPR_ADISC)) | ||
4497 | continue; | 4651 | continue; |
4498 | spin_lock_irq(shost->host_lock); | 4652 | spin_lock_irq(shost->host_lock); |
4499 | np->nlp_flag &= ~NLP_NPR_ADISC; | 4653 | np->nlp_flag &= ~NLP_NPR_ADISC; |
@@ -4599,6 +4753,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4599 | { | 4753 | { |
4600 | struct lpfc_vport *vport = cmdiocb->vport; | 4754 | struct lpfc_vport *vport = cmdiocb->vport; |
4601 | IOCB_t *irsp; | 4755 | IOCB_t *irsp; |
4756 | struct lpfc_nodelist *ndlp; | ||
4757 | ndlp = (struct lpfc_nodelist *)cmdiocb->context1; | ||
4602 | 4758 | ||
4603 | irsp = &rspiocb->iocb; | 4759 | irsp = &rspiocb->iocb; |
4604 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | 4760 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
@@ -4607,6 +4763,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4607 | 4763 | ||
4608 | lpfc_els_free_iocb(phba, cmdiocb); | 4764 | lpfc_els_free_iocb(phba, cmdiocb); |
4609 | vport->unreg_vpi_cmpl = VPORT_ERROR; | 4765 | vport->unreg_vpi_cmpl = VPORT_ERROR; |
4766 | |||
4767 | /* Trigger the release of the ndlp after logo */ | ||
4768 | lpfc_nlp_put(ndlp); | ||
4610 | } | 4769 | } |
4611 | 4770 | ||
4612 | int | 4771 | int |
@@ -4686,11 +4845,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) | |||
4686 | repeat: | 4845 | repeat: |
4687 | iocb = NULL; | 4846 | iocb = NULL; |
4688 | spin_lock_irqsave(&phba->hbalock, iflags); | 4847 | spin_lock_irqsave(&phba->hbalock, iflags); |
4689 | /* Post any pending iocb to the SLI layer */ | 4848 | /* Post any pending iocb to the SLI layer */ |
4690 | if (atomic_read(&phba->fabric_iocb_count) == 0) { | 4849 | if (atomic_read(&phba->fabric_iocb_count) == 0) { |
4691 | list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), | 4850 | list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), |
4692 | list); | 4851 | list); |
4693 | if (iocb) | 4852 | if (iocb) |
4853 | /* Increment fabric iocb count to hold the position */ | ||
4694 | atomic_inc(&phba->fabric_iocb_count); | 4854 | atomic_inc(&phba->fabric_iocb_count); |
4695 | } | 4855 | } |
4696 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 4856 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
@@ -4737,9 +4897,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) | |||
4737 | int blocked; | 4897 | int blocked; |
4738 | 4898 | ||
4739 | blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); | 4899 | blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); |
4740 | /* Start a timer to unblock fabric | 4900 | /* Start a timer to unblock fabric iocbs after 100ms */ |
4741 | * iocbs after 100ms | ||
4742 | */ | ||
4743 | if (!blocked) | 4901 | if (!blocked) |
4744 | mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); | 4902 | mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); |
4745 | 4903 | ||
@@ -4787,8 +4945,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4787 | 4945 | ||
4788 | atomic_dec(&phba->fabric_iocb_count); | 4946 | atomic_dec(&phba->fabric_iocb_count); |
4789 | if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { | 4947 | if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { |
4790 | /* Post any pending iocbs to HBA */ | 4948 | /* Post any pending iocbs to HBA */ |
4791 | lpfc_resume_fabric_iocbs(phba); | 4949 | lpfc_resume_fabric_iocbs(phba); |
4792 | } | 4950 | } |
4793 | } | 4951 | } |
4794 | 4952 | ||
@@ -4807,6 +4965,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
4807 | ready = atomic_read(&phba->fabric_iocb_count) == 0 && | 4965 | ready = atomic_read(&phba->fabric_iocb_count) == 0 && |
4808 | !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); | 4966 | !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); |
4809 | 4967 | ||
4968 | if (ready) | ||
4969 | /* Increment fabric iocb count to hold the position */ | ||
4970 | atomic_inc(&phba->fabric_iocb_count); | ||
4810 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 4971 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
4811 | if (ready) { | 4972 | if (ready) { |
4812 | iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; | 4973 | iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; |
@@ -4817,7 +4978,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
4817 | "Fabric sched2: ste:x%x", | 4978 | "Fabric sched2: ste:x%x", |
4818 | iocb->vport->port_state, 0, 0); | 4979 | iocb->vport->port_state, 0, 0); |
4819 | 4980 | ||
4820 | atomic_inc(&phba->fabric_iocb_count); | ||
4821 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); | 4981 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); |
4822 | 4982 | ||
4823 | if (ret == IOCB_ERROR) { | 4983 | if (ret == IOCB_ERROR) { |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index dc042bd97baa..bd572d6b60af 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
272 | if (!(vport->load_flag & FC_UNLOADING) && | 272 | if (!(vport->load_flag & FC_UNLOADING) && |
273 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && | 273 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && |
274 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && | 274 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && |
275 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { | 275 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) |
276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
277 | } | ||
278 | } | 277 | } |
279 | 278 | ||
280 | 279 | ||
@@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
566 | int rc; | 565 | int rc; |
567 | 566 | ||
568 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 567 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
568 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
569 | continue; | ||
569 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 570 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
570 | continue; | 571 | continue; |
571 | |||
572 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || | 572 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || |
573 | ((vport->port_type == LPFC_NPIV_PORT) && | 573 | ((vport->port_type == LPFC_NPIV_PORT) && |
574 | (ndlp->nlp_DID == NameServer_DID))) | 574 | (ndlp->nlp_DID == NameServer_DID))) |
@@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba) | |||
629 | LPFC_MBOXQ_t *mb; | 629 | LPFC_MBOXQ_t *mb; |
630 | int i; | 630 | int i; |
631 | 631 | ||
632 | if (phba->link_state == LPFC_LINK_DOWN) { | 632 | if (phba->link_state == LPFC_LINK_DOWN) |
633 | return 0; | 633 | return 0; |
634 | } | ||
635 | spin_lock_irq(&phba->hbalock); | 634 | spin_lock_irq(&phba->hbalock); |
636 | if (phba->link_state > LPFC_LINK_DOWN) { | 635 | if (phba->link_state > LPFC_LINK_DOWN) { |
637 | phba->link_state = LPFC_LINK_DOWN; | 636 | phba->link_state = LPFC_LINK_DOWN; |
@@ -684,20 +683,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) | |||
684 | struct lpfc_nodelist *ndlp; | 683 | struct lpfc_nodelist *ndlp; |
685 | 684 | ||
686 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 685 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
686 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
687 | continue; | ||
687 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 688 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
688 | continue; | 689 | continue; |
689 | |||
690 | if (ndlp->nlp_type & NLP_FABRIC) { | 690 | if (ndlp->nlp_type & NLP_FABRIC) { |
691 | /* On Linkup its safe to clean up the ndlp | 691 | /* On Linkup its safe to clean up the ndlp |
692 | * from Fabric connections. | 692 | * from Fabric connections. |
693 | */ | 693 | */ |
694 | if (ndlp->nlp_DID != Fabric_DID) | 694 | if (ndlp->nlp_DID != Fabric_DID) |
695 | lpfc_unreg_rpi(vport, ndlp); | 695 | lpfc_unreg_rpi(vport, ndlp); |
696 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 696 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
697 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { | 697 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
698 | /* Fail outstanding IO now since device is | 698 | /* Fail outstanding IO now since device is |
699 | * marked for PLOGI. | 699 | * marked for PLOGI. |
700 | */ | 700 | */ |
701 | lpfc_unreg_rpi(vport, ndlp); | 701 | lpfc_unreg_rpi(vport, ndlp); |
702 | } | 702 | } |
703 | } | 703 | } |
@@ -799,21 +799,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
799 | writel(control, phba->HCregaddr); | 799 | writel(control, phba->HCregaddr); |
800 | readl(phba->HCregaddr); /* flush */ | 800 | readl(phba->HCregaddr); /* flush */ |
801 | spin_unlock_irq(&phba->hbalock); | 801 | spin_unlock_irq(&phba->hbalock); |
802 | mempool_free(pmb, phba->mbox_mem_pool); | ||
802 | return; | 803 | return; |
803 | 804 | ||
804 | vport->num_disc_nodes = 0; | ||
805 | /* go thru NPR nodes and issue ELS PLOGIs */ | ||
806 | if (vport->fc_npr_cnt) | ||
807 | lpfc_els_disc_plogi(vport); | ||
808 | |||
809 | if (!vport->num_disc_nodes) { | ||
810 | spin_lock_irq(shost->host_lock); | ||
811 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | ||
812 | spin_unlock_irq(shost->host_lock); | ||
813 | } | ||
814 | |||
815 | vport->port_state = LPFC_VPORT_READY; | ||
816 | |||
817 | out: | 805 | out: |
818 | /* Device Discovery completes */ | 806 | /* Device Discovery completes */ |
819 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 807 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
@@ -1133,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1133 | if (la->attType == AT_LINK_UP) { | 1121 | if (la->attType == AT_LINK_UP) { |
1134 | phba->fc_stat.LinkUp++; | 1122 | phba->fc_stat.LinkUp++; |
1135 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 1123 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1136 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 1124 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
1137 | "1306 Link Up Event in loop back mode " | 1125 | "1306 Link Up Event in loop back mode " |
1138 | "x%x received Data: x%x x%x x%x x%x\n", | 1126 | "x%x received Data: x%x x%x x%x x%x\n", |
1139 | la->eventTag, phba->fc_eventTag, | 1127 | la->eventTag, phba->fc_eventTag, |
@@ -1150,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1150 | lpfc_mbx_process_link_up(phba, la); | 1138 | lpfc_mbx_process_link_up(phba, la); |
1151 | } else { | 1139 | } else { |
1152 | phba->fc_stat.LinkDown++; | 1140 | phba->fc_stat.LinkDown++; |
1153 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1141 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1142 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1143 | "1308 Link Down Event in loop back mode " | ||
1144 | "x%x received " | ||
1145 | "Data: x%x x%x x%x\n", | ||
1146 | la->eventTag, phba->fc_eventTag, | ||
1147 | phba->pport->port_state, vport->fc_flag); | ||
1148 | } | ||
1149 | else { | ||
1150 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1154 | "1305 Link Down Event x%x received " | 1151 | "1305 Link Down Event x%x received " |
1155 | "Data: x%x x%x x%x\n", | 1152 | "Data: x%x x%x x%x\n", |
1156 | la->eventTag, phba->fc_eventTag, | 1153 | la->eventTag, phba->fc_eventTag, |
1157 | phba->pport->port_state, vport->fc_flag); | 1154 | phba->pport->port_state, vport->fc_flag); |
1155 | } | ||
1158 | lpfc_mbx_issue_link_down(phba); | 1156 | lpfc_mbx_issue_link_down(phba); |
1159 | } | 1157 | } |
1160 | 1158 | ||
@@ -1305,7 +1303,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1305 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1303 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1306 | kfree(mp); | 1304 | kfree(mp); |
1307 | mempool_free(pmb, phba->mbox_mem_pool); | 1305 | mempool_free(pmb, phba->mbox_mem_pool); |
1308 | lpfc_nlp_put(ndlp); | ||
1309 | 1306 | ||
1310 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 1307 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
1311 | /* FLOGI failed, use loop map to make discovery list */ | 1308 | /* FLOGI failed, use loop map to make discovery list */ |
@@ -1313,6 +1310,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1313 | 1310 | ||
1314 | /* Start discovery */ | 1311 | /* Start discovery */ |
1315 | lpfc_disc_start(vport); | 1312 | lpfc_disc_start(vport); |
1313 | /* Decrement the reference count to ndlp after the | ||
1314 | * reference to the ndlp are done. | ||
1315 | */ | ||
1316 | lpfc_nlp_put(ndlp); | ||
1316 | return; | 1317 | return; |
1317 | } | 1318 | } |
1318 | 1319 | ||
@@ -1320,6 +1321,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1320 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, | 1321 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, |
1321 | "0258 Register Fabric login error: 0x%x\n", | 1322 | "0258 Register Fabric login error: 0x%x\n", |
1322 | mb->mbxStatus); | 1323 | mb->mbxStatus); |
1324 | /* Decrement the reference count to ndlp after the reference | ||
1325 | * to the ndlp are done. | ||
1326 | */ | ||
1327 | lpfc_nlp_put(ndlp); | ||
1323 | return; | 1328 | return; |
1324 | } | 1329 | } |
1325 | 1330 | ||
@@ -1327,8 +1332,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1327 | ndlp->nlp_type |= NLP_FABRIC; | 1332 | ndlp->nlp_type |= NLP_FABRIC; |
1328 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 1333 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
1329 | 1334 | ||
1330 | lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ | ||
1331 | |||
1332 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { | 1335 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
1333 | vports = lpfc_create_vport_work_array(phba); | 1336 | vports = lpfc_create_vport_work_array(phba); |
1334 | if (vports != NULL) | 1337 | if (vports != NULL) |
@@ -1356,6 +1359,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1356 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1359 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1357 | kfree(mp); | 1360 | kfree(mp); |
1358 | mempool_free(pmb, phba->mbox_mem_pool); | 1361 | mempool_free(pmb, phba->mbox_mem_pool); |
1362 | |||
1363 | /* Drop the reference count from the mbox at the end after | ||
1364 | * all the current reference to the ndlp have been done. | ||
1365 | */ | ||
1366 | lpfc_nlp_put(ndlp); | ||
1359 | return; | 1367 | return; |
1360 | } | 1368 | } |
1361 | 1369 | ||
@@ -1463,9 +1471,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1463 | * registered the port. | 1471 | * registered the port. |
1464 | */ | 1472 | */ |
1465 | if (ndlp->rport && ndlp->rport->dd_data && | 1473 | if (ndlp->rport && ndlp->rport->dd_data && |
1466 | ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { | 1474 | ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) |
1467 | lpfc_nlp_put(ndlp); | 1475 | lpfc_nlp_put(ndlp); |
1468 | } | ||
1469 | 1476 | ||
1470 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | 1477 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
1471 | "rport add: did:x%x flg:x%x type x%x", | 1478 | "rport add: did:x%x flg:x%x type x%x", |
@@ -1660,6 +1667,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1660 | } | 1667 | } |
1661 | 1668 | ||
1662 | void | 1669 | void |
1670 | lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | ||
1671 | { | ||
1672 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1673 | |||
1674 | if (list_empty(&ndlp->nlp_listp)) { | ||
1675 | spin_lock_irq(shost->host_lock); | ||
1676 | list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); | ||
1677 | spin_unlock_irq(shost->host_lock); | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | void | ||
1663 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 1682 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1664 | { | 1683 | { |
1665 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 1684 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
@@ -1672,7 +1691,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1672 | list_del_init(&ndlp->nlp_listp); | 1691 | list_del_init(&ndlp->nlp_listp); |
1673 | spin_unlock_irq(shost->host_lock); | 1692 | spin_unlock_irq(shost->host_lock); |
1674 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | 1693 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, |
1675 | NLP_STE_UNUSED_NODE); | 1694 | NLP_STE_UNUSED_NODE); |
1695 | } | ||
1696 | |||
1697 | void | ||
1698 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | ||
1699 | { | ||
1700 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | ||
1701 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1702 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | ||
1703 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); | ||
1704 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | ||
1705 | NLP_STE_UNUSED_NODE); | ||
1706 | } | ||
1707 | |||
1708 | struct lpfc_nodelist * | ||
1709 | lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | ||
1710 | int state) | ||
1711 | { | ||
1712 | struct lpfc_hba *phba = vport->phba; | ||
1713 | uint32_t did; | ||
1714 | unsigned long flags; | ||
1715 | |||
1716 | if (!ndlp) | ||
1717 | return NULL; | ||
1718 | |||
1719 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
1720 | /* The ndlp should not be in memory free mode */ | ||
1721 | if (NLP_CHK_FREE_REQ(ndlp)) { | ||
1722 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1723 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
1724 | "0277 lpfc_enable_node: ndlp:x%p " | ||
1725 | "usgmap:x%x refcnt:%d\n", | ||
1726 | (void *)ndlp, ndlp->nlp_usg_map, | ||
1727 | atomic_read(&ndlp->kref.refcount)); | ||
1728 | return NULL; | ||
1729 | } | ||
1730 | /* The ndlp should not already be in active mode */ | ||
1731 | if (NLP_CHK_NODE_ACT(ndlp)) { | ||
1732 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1733 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
1734 | "0278 lpfc_enable_node: ndlp:x%p " | ||
1735 | "usgmap:x%x refcnt:%d\n", | ||
1736 | (void *)ndlp, ndlp->nlp_usg_map, | ||
1737 | atomic_read(&ndlp->kref.refcount)); | ||
1738 | return NULL; | ||
1739 | } | ||
1740 | |||
1741 | /* Keep the original DID */ | ||
1742 | did = ndlp->nlp_DID; | ||
1743 | |||
1744 | /* re-initialize ndlp except of ndlp linked list pointer */ | ||
1745 | memset((((char *)ndlp) + sizeof (struct list_head)), 0, | ||
1746 | sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); | ||
1747 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | ||
1748 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | ||
1749 | init_timer(&ndlp->nlp_delayfunc); | ||
1750 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | ||
1751 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | ||
1752 | ndlp->nlp_DID = did; | ||
1753 | ndlp->vport = vport; | ||
1754 | ndlp->nlp_sid = NLP_NO_SID; | ||
1755 | /* ndlp management re-initialize */ | ||
1756 | kref_init(&ndlp->kref); | ||
1757 | NLP_INT_NODE_ACT(ndlp); | ||
1758 | |||
1759 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1760 | |||
1761 | if (state != NLP_STE_UNUSED_NODE) | ||
1762 | lpfc_nlp_set_state(vport, ndlp, state); | ||
1763 | |||
1764 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | ||
1765 | "node enable: did:x%x", | ||
1766 | ndlp->nlp_DID, 0, 0); | ||
1767 | return ndlp; | ||
1676 | } | 1768 | } |
1677 | 1769 | ||
1678 | void | 1770 | void |
@@ -1972,7 +2064,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1972 | "Data: x%x x%x x%x\n", | 2064 | "Data: x%x x%x x%x\n", |
1973 | ndlp->nlp_DID, ndlp->nlp_flag, | 2065 | ndlp->nlp_DID, ndlp->nlp_flag, |
1974 | ndlp->nlp_state, ndlp->nlp_rpi); | 2066 | ndlp->nlp_state, ndlp->nlp_rpi); |
1975 | lpfc_dequeue_node(vport, ndlp); | 2067 | if (NLP_CHK_FREE_REQ(ndlp)) { |
2068 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
2069 | "0280 lpfc_cleanup_node: ndlp:x%p " | ||
2070 | "usgmap:x%x refcnt:%d\n", | ||
2071 | (void *)ndlp, ndlp->nlp_usg_map, | ||
2072 | atomic_read(&ndlp->kref.refcount)); | ||
2073 | lpfc_dequeue_node(vport, ndlp); | ||
2074 | } else { | ||
2075 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
2076 | "0281 lpfc_cleanup_node: ndlp:x%p " | ||
2077 | "usgmap:x%x refcnt:%d\n", | ||
2078 | (void *)ndlp, ndlp->nlp_usg_map, | ||
2079 | atomic_read(&ndlp->kref.refcount)); | ||
2080 | lpfc_disable_node(vport, ndlp); | ||
2081 | } | ||
1976 | 2082 | ||
1977 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ | 2083 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ |
1978 | if ((mb = phba->sli.mbox_active)) { | 2084 | if ((mb = phba->sli.mbox_active)) { |
@@ -1994,12 +2100,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1994 | } | 2100 | } |
1995 | list_del(&mb->list); | 2101 | list_del(&mb->list); |
1996 | mempool_free(mb, phba->mbox_mem_pool); | 2102 | mempool_free(mb, phba->mbox_mem_pool); |
1997 | lpfc_nlp_put(ndlp); | 2103 | /* We shall not invoke the lpfc_nlp_put to decrement |
2104 | * the ndlp reference count as we are in the process | ||
2105 | * of lpfc_nlp_release. | ||
2106 | */ | ||
1998 | } | 2107 | } |
1999 | } | 2108 | } |
2000 | spin_unlock_irq(&phba->hbalock); | 2109 | spin_unlock_irq(&phba->hbalock); |
2001 | 2110 | ||
2002 | lpfc_els_abort(phba,ndlp); | 2111 | lpfc_els_abort(phba, ndlp); |
2112 | |||
2003 | spin_lock_irq(shost->host_lock); | 2113 | spin_lock_irq(shost->host_lock); |
2004 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; | 2114 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; |
2005 | spin_unlock_irq(shost->host_lock); | 2115 | spin_unlock_irq(shost->host_lock); |
@@ -2057,7 +2167,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2057 | } | 2167 | } |
2058 | } | 2168 | } |
2059 | } | 2169 | } |
2060 | |||
2061 | lpfc_cleanup_node(vport, ndlp); | 2170 | lpfc_cleanup_node(vport, ndlp); |
2062 | 2171 | ||
2063 | /* | 2172 | /* |
@@ -2182,7 +2291,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | |||
2182 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 2291 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
2183 | spin_unlock_irq(shost->host_lock); | 2292 | spin_unlock_irq(shost->host_lock); |
2184 | return ndlp; | 2293 | return ndlp; |
2294 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
2295 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); | ||
2296 | if (!ndlp) | ||
2297 | return NULL; | ||
2298 | spin_lock_irq(shost->host_lock); | ||
2299 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | ||
2300 | spin_unlock_irq(shost->host_lock); | ||
2301 | return ndlp; | ||
2185 | } | 2302 | } |
2303 | |||
2186 | if (vport->fc_flag & FC_RSCN_MODE) { | 2304 | if (vport->fc_flag & FC_RSCN_MODE) { |
2187 | if (lpfc_rscn_payload_check(vport, did)) { | 2305 | if (lpfc_rscn_payload_check(vport, did)) { |
2188 | /* If we've already recieved a PLOGI from this NPort | 2306 | /* If we've already recieved a PLOGI from this NPort |
@@ -2363,6 +2481,7 @@ lpfc_disc_start(struct lpfc_vport *vport) | |||
2363 | * continue discovery. | 2481 | * continue discovery. |
2364 | */ | 2482 | */ |
2365 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2483 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
2484 | !(vport->fc_flag & FC_PT2PT) && | ||
2366 | !(vport->fc_flag & FC_RSCN_MODE)) { | 2485 | !(vport->fc_flag & FC_RSCN_MODE)) { |
2367 | lpfc_issue_reg_vpi(phba, vport); | 2486 | lpfc_issue_reg_vpi(phba, vport); |
2368 | return; | 2487 | return; |
@@ -2485,6 +2604,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) | |||
2485 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { | 2604 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { |
2486 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 2605 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
2487 | nlp_listp) { | 2606 | nlp_listp) { |
2607 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2608 | continue; | ||
2488 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || | 2609 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
2489 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { | 2610 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { |
2490 | lpfc_free_tx(phba, ndlp); | 2611 | lpfc_free_tx(phba, ndlp); |
@@ -2572,6 +2693,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) | |||
2572 | /* Start discovery by sending FLOGI, clean up old rpis */ | 2693 | /* Start discovery by sending FLOGI, clean up old rpis */ |
2573 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 2694 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
2574 | nlp_listp) { | 2695 | nlp_listp) { |
2696 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2697 | continue; | ||
2575 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 2698 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
2576 | continue; | 2699 | continue; |
2577 | if (ndlp->nlp_type & NLP_FABRIC) { | 2700 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -2618,7 +2741,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) | |||
2618 | "NameServer login\n"); | 2741 | "NameServer login\n"); |
2619 | /* Next look for NameServer ndlp */ | 2742 | /* Next look for NameServer ndlp */ |
2620 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 2743 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
2621 | if (ndlp) | 2744 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
2622 | lpfc_els_abort(phba, ndlp); | 2745 | lpfc_els_abort(phba, ndlp); |
2623 | 2746 | ||
2624 | /* ReStart discovery */ | 2747 | /* ReStart discovery */ |
@@ -2897,6 +3020,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2897 | ndlp->nlp_sid = NLP_NO_SID; | 3020 | ndlp->nlp_sid = NLP_NO_SID; |
2898 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 3021 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
2899 | kref_init(&ndlp->kref); | 3022 | kref_init(&ndlp->kref); |
3023 | NLP_INT_NODE_ACT(ndlp); | ||
2900 | 3024 | ||
2901 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | 3025 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
2902 | "node init: did:x%x", | 3026 | "node init: did:x%x", |
@@ -2911,6 +3035,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2911 | static void | 3035 | static void |
2912 | lpfc_nlp_release(struct kref *kref) | 3036 | lpfc_nlp_release(struct kref *kref) |
2913 | { | 3037 | { |
3038 | struct lpfc_hba *phba; | ||
3039 | unsigned long flags; | ||
2914 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, | 3040 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, |
2915 | kref); | 3041 | kref); |
2916 | 3042 | ||
@@ -2918,8 +3044,24 @@ lpfc_nlp_release(struct kref *kref) | |||
2918 | "node release: did:x%x flg:x%x type:x%x", | 3044 | "node release: did:x%x flg:x%x type:x%x", |
2919 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); | 3045 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
2920 | 3046 | ||
3047 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, | ||
3048 | "0279 lpfc_nlp_release: ndlp:x%p " | ||
3049 | "usgmap:x%x refcnt:%d\n", | ||
3050 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3051 | atomic_read(&ndlp->kref.refcount)); | ||
3052 | |||
3053 | /* remove ndlp from action. */ | ||
2921 | lpfc_nlp_remove(ndlp->vport, ndlp); | 3054 | lpfc_nlp_remove(ndlp->vport, ndlp); |
2922 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | 3055 | |
3056 | /* clear the ndlp active flag for all release cases */ | ||
3057 | phba = ndlp->vport->phba; | ||
3058 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3059 | NLP_CLR_NODE_ACT(ndlp); | ||
3060 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3061 | |||
3062 | /* free ndlp memory for final ndlp release */ | ||
3063 | if (NLP_CHK_FREE_REQ(ndlp)) | ||
3064 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | ||
2923 | } | 3065 | } |
2924 | 3066 | ||
2925 | /* This routine bumps the reference count for a ndlp structure to ensure | 3067 | /* This routine bumps the reference count for a ndlp structure to ensure |
@@ -2929,37 +3071,108 @@ lpfc_nlp_release(struct kref *kref) | |||
2929 | struct lpfc_nodelist * | 3071 | struct lpfc_nodelist * |
2930 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) | 3072 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) |
2931 | { | 3073 | { |
3074 | struct lpfc_hba *phba; | ||
3075 | unsigned long flags; | ||
3076 | |||
2932 | if (ndlp) { | 3077 | if (ndlp) { |
2933 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | 3078 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
2934 | "node get: did:x%x flg:x%x refcnt:x%x", | 3079 | "node get: did:x%x flg:x%x refcnt:x%x", |
2935 | ndlp->nlp_DID, ndlp->nlp_flag, | 3080 | ndlp->nlp_DID, ndlp->nlp_flag, |
2936 | atomic_read(&ndlp->kref.refcount)); | 3081 | atomic_read(&ndlp->kref.refcount)); |
2937 | kref_get(&ndlp->kref); | 3082 | /* The check of ndlp usage to prevent incrementing the |
3083 | * ndlp reference count that is in the process of being | ||
3084 | * released. | ||
3085 | */ | ||
3086 | phba = ndlp->vport->phba; | ||
3087 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3088 | if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { | ||
3089 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3090 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3091 | "0276 lpfc_nlp_get: ndlp:x%p " | ||
3092 | "usgmap:x%x refcnt:%d\n", | ||
3093 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3094 | atomic_read(&ndlp->kref.refcount)); | ||
3095 | return NULL; | ||
3096 | } else | ||
3097 | kref_get(&ndlp->kref); | ||
3098 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
2938 | } | 3099 | } |
2939 | return ndlp; | 3100 | return ndlp; |
2940 | } | 3101 | } |
2941 | 3102 | ||
2942 | |||
2943 | /* This routine decrements the reference count for a ndlp structure. If the | 3103 | /* This routine decrements the reference count for a ndlp structure. If the |
2944 | * count goes to 0, this indicates the the associated nodelist should be freed. | 3104 | * count goes to 0, this indicates the the associated nodelist should be |
3105 | * freed. Returning 1 indicates the ndlp resource has been released; on the | ||
3106 | * other hand, returning 0 indicates the ndlp resource has not been released | ||
3107 | * yet. | ||
2945 | */ | 3108 | */ |
2946 | int | 3109 | int |
2947 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) | 3110 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) |
2948 | { | 3111 | { |
2949 | if (ndlp) { | 3112 | struct lpfc_hba *phba; |
2950 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | 3113 | unsigned long flags; |
2951 | "node put: did:x%x flg:x%x refcnt:x%x", | 3114 | |
2952 | ndlp->nlp_DID, ndlp->nlp_flag, | 3115 | if (!ndlp) |
2953 | atomic_read(&ndlp->kref.refcount)); | 3116 | return 1; |
3117 | |||
3118 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | ||
3119 | "node put: did:x%x flg:x%x refcnt:x%x", | ||
3120 | ndlp->nlp_DID, ndlp->nlp_flag, | ||
3121 | atomic_read(&ndlp->kref.refcount)); | ||
3122 | phba = ndlp->vport->phba; | ||
3123 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3124 | /* Check the ndlp memory free acknowledge flag to avoid the | ||
3125 | * possible race condition that kref_put got invoked again | ||
3126 | * after previous one has done ndlp memory free. | ||
3127 | */ | ||
3128 | if (NLP_CHK_FREE_ACK(ndlp)) { | ||
3129 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3130 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3131 | "0274 lpfc_nlp_put: ndlp:x%p " | ||
3132 | "usgmap:x%x refcnt:%d\n", | ||
3133 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3134 | atomic_read(&ndlp->kref.refcount)); | ||
3135 | return 1; | ||
2954 | } | 3136 | } |
2955 | return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; | 3137 | /* Check the ndlp inactivate log flag to avoid the possible |
3138 | * race condition that kref_put got invoked again after ndlp | ||
3139 | * is already in inactivating state. | ||
3140 | */ | ||
3141 | if (NLP_CHK_IACT_REQ(ndlp)) { | ||
3142 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3143 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3144 | "0275 lpfc_nlp_put: ndlp:x%p " | ||
3145 | "usgmap:x%x refcnt:%d\n", | ||
3146 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3147 | atomic_read(&ndlp->kref.refcount)); | ||
3148 | return 1; | ||
3149 | } | ||
3150 | /* For last put, mark the ndlp usage flags to make sure no | ||
3151 | * other kref_get and kref_put on the same ndlp shall get | ||
3152 | * in between the process when the final kref_put has been | ||
3153 | * invoked on this ndlp. | ||
3154 | */ | ||
3155 | if (atomic_read(&ndlp->kref.refcount) == 1) { | ||
3156 | /* Indicate ndlp is put to inactive state. */ | ||
3157 | NLP_SET_IACT_REQ(ndlp); | ||
3158 | /* Acknowledge ndlp memory free has been seen. */ | ||
3159 | if (NLP_CHK_FREE_REQ(ndlp)) | ||
3160 | NLP_SET_FREE_ACK(ndlp); | ||
3161 | } | ||
3162 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3163 | /* Note, the kref_put returns 1 when decrementing a reference | ||
3164 | * count that was 1, it invokes the release callback function, | ||
3165 | * but it still left the reference count as 1 (not actually | ||
3166 | * performs the last decrementation). Otherwise, it actually | ||
3167 | * decrements the reference count and returns 0. | ||
3168 | */ | ||
3169 | return kref_put(&ndlp->kref, lpfc_nlp_release); | ||
2956 | } | 3170 | } |
2957 | 3171 | ||
2958 | /* This routine free's the specified nodelist if it is not in use | 3172 | /* This routine free's the specified nodelist if it is not in use |
2959 | * by any other discovery thread. This routine returns 1 if the ndlp | 3173 | * by any other discovery thread. This routine returns 1 if the |
2960 | * is not being used by anyone and has been freed. A return value of | 3174 | * ndlp has been freed. A return value of 0 indicates the ndlp is |
2961 | * 0 indicates it is being used by another discovery thread and the | 3175 | * not yet been released. |
2962 | * refcount is left unchanged. | ||
2963 | */ | 3176 | */ |
2964 | int | 3177 | int |
2965 | lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) | 3178 | lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) |
@@ -2968,11 +3181,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) | |||
2968 | "node not used: did:x%x flg:x%x refcnt:x%x", | 3181 | "node not used: did:x%x flg:x%x refcnt:x%x", |
2969 | ndlp->nlp_DID, ndlp->nlp_flag, | 3182 | ndlp->nlp_DID, ndlp->nlp_flag, |
2970 | atomic_read(&ndlp->kref.refcount)); | 3183 | atomic_read(&ndlp->kref.refcount)); |
2971 | 3184 | if (atomic_read(&ndlp->kref.refcount) == 1) | |
2972 | if (atomic_read(&ndlp->kref.refcount) == 1) { | 3185 | if (lpfc_nlp_put(ndlp)) |
2973 | lpfc_nlp_put(ndlp); | 3186 | return 1; |
2974 | return 1; | ||
2975 | } | ||
2976 | return 0; | 3187 | return 0; |
2977 | } | 3188 | } |
2978 | |||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 041f83e7634a..7773b949aa7c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -581,6 +581,7 @@ struct ls_rjt { /* Structure is in Big Endian format */ | |||
581 | #define LSEXP_INVALID_O_SID 0x15 | 581 | #define LSEXP_INVALID_O_SID 0x15 |
582 | #define LSEXP_INVALID_OX_RX 0x17 | 582 | #define LSEXP_INVALID_OX_RX 0x17 |
583 | #define LSEXP_CMD_IN_PROGRESS 0x19 | 583 | #define LSEXP_CMD_IN_PROGRESS 0x19 |
584 | #define LSEXP_PORT_LOGIN_REQ 0x1E | ||
584 | #define LSEXP_INVALID_NPORT_ID 0x1F | 585 | #define LSEXP_INVALID_NPORT_ID 0x1F |
585 | #define LSEXP_INVALID_SEQ_ID 0x21 | 586 | #define LSEXP_INVALID_SEQ_ID 0x21 |
586 | #define LSEXP_INVALID_XCHG 0x23 | 587 | #define LSEXP_INVALID_XCHG 0x23 |
@@ -1376,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */ | |||
1376 | #define CMD_QUE_XRI64_CX 0xB3 | 1377 | #define CMD_QUE_XRI64_CX 0xB3 |
1377 | #define CMD_IOCB_RCV_SEQ64_CX 0xB5 | 1378 | #define CMD_IOCB_RCV_SEQ64_CX 0xB5 |
1378 | #define CMD_IOCB_RCV_ELS64_CX 0xB7 | 1379 | #define CMD_IOCB_RCV_ELS64_CX 0xB7 |
1380 | #define CMD_IOCB_RET_XRI64_CX 0xB9 | ||
1379 | #define CMD_IOCB_RCV_CONT64_CX 0xBB | 1381 | #define CMD_IOCB_RCV_CONT64_CX 0xBB |
1380 | 1382 | ||
1381 | #define CMD_GEN_REQUEST64_CR 0xC2 | 1383 | #define CMD_GEN_REQUEST64_CR 0xC2 |
1382 | #define CMD_GEN_REQUEST64_CX 0xC3 | 1384 | #define CMD_GEN_REQUEST64_CX 0xC3 |
1383 | 1385 | ||
1386 | /* Unhandled SLI-3 Commands */ | ||
1387 | #define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 | ||
1388 | #define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 | ||
1389 | #define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 | ||
1390 | #define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD | ||
1391 | #define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 | ||
1392 | #define CMD_IOCB_ABORT_EXTENDED_CN 0xBA | ||
1393 | #define CMD_IOCB_RET_HBQE64_CN 0xCA | ||
1394 | #define CMD_IOCB_FCP_IBIDIR64_CR 0xAC | ||
1395 | #define CMD_IOCB_FCP_IBIDIR64_CX 0xAD | ||
1396 | #define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF | ||
1397 | #define CMD_IOCB_LOGENTRY_CN 0x94 | ||
1398 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 | ||
1399 | |||
1384 | #define CMD_MAX_IOCB_CMD 0xE6 | 1400 | #define CMD_MAX_IOCB_CMD 0xE6 |
1385 | #define CMD_IOCB_MASK 0xff | 1401 | #define CMD_IOCB_MASK 0xff |
1386 | 1402 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6cfeba7454d4..22843751c2ca 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -461,11 +461,21 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
461 | int | 461 | int |
462 | lpfc_hba_down_prep(struct lpfc_hba *phba) | 462 | lpfc_hba_down_prep(struct lpfc_hba *phba) |
463 | { | 463 | { |
464 | struct lpfc_vport **vports; | ||
465 | int i; | ||
464 | /* Disable interrupts */ | 466 | /* Disable interrupts */ |
465 | writel(0, phba->HCregaddr); | 467 | writel(0, phba->HCregaddr); |
466 | readl(phba->HCregaddr); /* flush */ | 468 | readl(phba->HCregaddr); /* flush */ |
467 | 469 | ||
468 | lpfc_cleanup_discovery_resources(phba->pport); | 470 | if (phba->pport->load_flag & FC_UNLOADING) |
471 | lpfc_cleanup_discovery_resources(phba->pport); | ||
472 | else { | ||
473 | vports = lpfc_create_vport_work_array(phba); | ||
474 | if (vports != NULL) | ||
475 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | ||
476 | lpfc_cleanup_discovery_resources(vports[i]); | ||
477 | lpfc_destroy_vport_work_array(phba, vports); | ||
478 | } | ||
469 | return 0; | 479 | return 0; |
470 | } | 480 | } |
471 | 481 | ||
@@ -1422,9 +1432,32 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1422 | lpfc_port_link_failure(vport); | 1432 | lpfc_port_link_failure(vport); |
1423 | 1433 | ||
1424 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 1434 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
1435 | if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1436 | ndlp = lpfc_enable_node(vport, ndlp, | ||
1437 | NLP_STE_UNUSED_NODE); | ||
1438 | if (!ndlp) | ||
1439 | continue; | ||
1440 | spin_lock_irq(&phba->ndlp_lock); | ||
1441 | NLP_SET_FREE_REQ(ndlp); | ||
1442 | spin_unlock_irq(&phba->ndlp_lock); | ||
1443 | /* Trigger the release of the ndlp memory */ | ||
1444 | lpfc_nlp_put(ndlp); | ||
1445 | continue; | ||
1446 | } | ||
1447 | spin_lock_irq(&phba->ndlp_lock); | ||
1448 | if (NLP_CHK_FREE_REQ(ndlp)) { | ||
1449 | /* The ndlp should not be in memory free mode already */ | ||
1450 | spin_unlock_irq(&phba->ndlp_lock); | ||
1451 | continue; | ||
1452 | } else | ||
1453 | /* Indicate request for freeing ndlp memory */ | ||
1454 | NLP_SET_FREE_REQ(ndlp); | ||
1455 | spin_unlock_irq(&phba->ndlp_lock); | ||
1456 | |||
1425 | if (ndlp->nlp_type & NLP_FABRIC) | 1457 | if (ndlp->nlp_type & NLP_FABRIC) |
1426 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1458 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1427 | NLP_EVT_DEVICE_RECOVERY); | 1459 | NLP_EVT_DEVICE_RECOVERY); |
1460 | |||
1428 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1461 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1429 | NLP_EVT_DEVICE_RM); | 1462 | NLP_EVT_DEVICE_RM); |
1430 | } | 1463 | } |
@@ -1438,6 +1471,17 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1438 | if (i++ > 3000) { | 1471 | if (i++ > 3000) { |
1439 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 1472 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
1440 | "0233 Nodelist not empty\n"); | 1473 | "0233 Nodelist not empty\n"); |
1474 | list_for_each_entry_safe(ndlp, next_ndlp, | ||
1475 | &vport->fc_nodes, nlp_listp) { | ||
1476 | lpfc_printf_vlog(ndlp->vport, KERN_ERR, | ||
1477 | LOG_NODE, | ||
1478 | "0282: did:x%x ndlp:x%p " | ||
1479 | "usgmap:x%x refcnt:%d\n", | ||
1480 | ndlp->nlp_DID, (void *)ndlp, | ||
1481 | ndlp->nlp_usg_map, | ||
1482 | atomic_read( | ||
1483 | &ndlp->kref.refcount)); | ||
1484 | } | ||
1441 | break; | 1485 | break; |
1442 | } | 1486 | } |
1443 | 1487 | ||
@@ -1586,6 +1630,8 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
1586 | list_for_each_entry_safe(ndlp, next_ndlp, | 1630 | list_for_each_entry_safe(ndlp, next_ndlp, |
1587 | &vports[i]->fc_nodes, | 1631 | &vports[i]->fc_nodes, |
1588 | nlp_listp) { | 1632 | nlp_listp) { |
1633 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
1634 | continue; | ||
1589 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 1635 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
1590 | continue; | 1636 | continue; |
1591 | if (ndlp->nlp_type & NLP_FABRIC) { | 1637 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -1695,9 +1741,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
1695 | 1741 | ||
1696 | vport = (struct lpfc_vport *) shost->hostdata; | 1742 | vport = (struct lpfc_vport *) shost->hostdata; |
1697 | vport->phba = phba; | 1743 | vport->phba = phba; |
1698 | |||
1699 | vport->load_flag |= FC_LOADING; | 1744 | vport->load_flag |= FC_LOADING; |
1700 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 1745 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
1746 | vport->fc_rscn_flush = 0; | ||
1701 | 1747 | ||
1702 | lpfc_get_vport_cfgparam(vport); | 1748 | lpfc_get_vport_cfgparam(vport); |
1703 | shost->unique_id = instance; | 1749 | shost->unique_id = instance; |
@@ -1879,6 +1925,42 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) | |||
1879 | spin_unlock_irq(shost->host_lock); | 1925 | spin_unlock_irq(shost->host_lock); |
1880 | } | 1926 | } |
1881 | 1927 | ||
1928 | static int | ||
1929 | lpfc_enable_msix(struct lpfc_hba *phba) | ||
1930 | { | ||
1931 | int error; | ||
1932 | |||
1933 | phba->msix_entries[0].entry = 0; | ||
1934 | phba->msix_entries[0].vector = 0; | ||
1935 | |||
1936 | error = pci_enable_msix(phba->pcidev, phba->msix_entries, | ||
1937 | ARRAY_SIZE(phba->msix_entries)); | ||
1938 | if (error) { | ||
1939 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
1940 | "0420 Enable MSI-X failed (%d), continuing " | ||
1941 | "with MSI\n", error); | ||
1942 | pci_disable_msix(phba->pcidev); | ||
1943 | return error; | ||
1944 | } | ||
1945 | |||
1946 | error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, | ||
1947 | LPFC_DRIVER_NAME, phba); | ||
1948 | if (error) { | ||
1949 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1950 | "0421 MSI-X request_irq failed (%d), " | ||
1951 | "continuing with MSI\n", error); | ||
1952 | pci_disable_msix(phba->pcidev); | ||
1953 | } | ||
1954 | return error; | ||
1955 | } | ||
1956 | |||
1957 | static void | ||
1958 | lpfc_disable_msix(struct lpfc_hba *phba) | ||
1959 | { | ||
1960 | free_irq(phba->msix_entries[0].vector, phba); | ||
1961 | pci_disable_msix(phba->pcidev); | ||
1962 | } | ||
1963 | |||
1882 | static int __devinit | 1964 | static int __devinit |
1883 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | 1965 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) |
1884 | { | 1966 | { |
@@ -1905,6 +1987,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1905 | 1987 | ||
1906 | spin_lock_init(&phba->hbalock); | 1988 | spin_lock_init(&phba->hbalock); |
1907 | 1989 | ||
1990 | /* Initialize ndlp management spinlock */ | ||
1991 | spin_lock_init(&phba->ndlp_lock); | ||
1992 | |||
1908 | phba->pcidev = pdev; | 1993 | phba->pcidev = pdev; |
1909 | 1994 | ||
1910 | /* Assign an unused board number */ | 1995 | /* Assign an unused board number */ |
@@ -2002,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2002 | 2087 | ||
2003 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); | 2088 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); |
2004 | 2089 | ||
2090 | INIT_LIST_HEAD(&phba->hbqbuf_in_list); | ||
2091 | |||
2005 | /* Initialize the SLI Layer to run with lpfc HBAs. */ | 2092 | /* Initialize the SLI Layer to run with lpfc HBAs. */ |
2006 | lpfc_sli_setup(phba); | 2093 | lpfc_sli_setup(phba); |
2007 | lpfc_sli_queue_setup(phba); | 2094 | lpfc_sli_queue_setup(phba); |
@@ -2077,24 +2164,36 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2077 | lpfc_debugfs_initialize(vport); | 2164 | lpfc_debugfs_initialize(vport); |
2078 | 2165 | ||
2079 | pci_set_drvdata(pdev, shost); | 2166 | pci_set_drvdata(pdev, shost); |
2167 | phba->intr_type = NONE; | ||
2080 | 2168 | ||
2081 | if (phba->cfg_use_msi) { | 2169 | if (phba->cfg_use_msi == 2) { |
2170 | error = lpfc_enable_msix(phba); | ||
2171 | if (!error) | ||
2172 | phba->intr_type = MSIX; | ||
2173 | } | ||
2174 | |||
2175 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2176 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | ||
2082 | retval = pci_enable_msi(phba->pcidev); | 2177 | retval = pci_enable_msi(phba->pcidev); |
2083 | if (!retval) | 2178 | if (!retval) |
2084 | phba->using_msi = 1; | 2179 | phba->intr_type = MSI; |
2085 | else | 2180 | else |
2086 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2181 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2087 | "0452 Enable MSI failed, continuing " | 2182 | "0452 Enable MSI failed, continuing " |
2088 | "with IRQ\n"); | 2183 | "with IRQ\n"); |
2089 | } | 2184 | } |
2090 | 2185 | ||
2091 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, | 2186 | /* MSI-X is the only case the doesn't need to call request_irq */ |
2092 | LPFC_DRIVER_NAME, phba); | 2187 | if (phba->intr_type != MSIX) { |
2093 | if (retval) { | 2188 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, |
2094 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2189 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); |
2095 | "0451 Enable interrupt handler failed\n"); | 2190 | if (retval) { |
2096 | error = retval; | 2191 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " |
2097 | goto out_disable_msi; | 2192 | "interrupt handler failed\n"); |
2193 | error = retval; | ||
2194 | goto out_disable_msi; | ||
2195 | } else if (phba->intr_type != MSI) | ||
2196 | phba->intr_type = INTx; | ||
2098 | } | 2197 | } |
2099 | 2198 | ||
2100 | phba->MBslimaddr = phba->slim_memmap_p; | 2199 | phba->MBslimaddr = phba->slim_memmap_p; |
@@ -2139,9 +2238,14 @@ out_remove_device: | |||
2139 | out_free_irq: | 2238 | out_free_irq: |
2140 | lpfc_stop_phba_timers(phba); | 2239 | lpfc_stop_phba_timers(phba); |
2141 | phba->pport->work_port_events = 0; | 2240 | phba->pport->work_port_events = 0; |
2142 | free_irq(phba->pcidev->irq, phba); | 2241 | |
2242 | if (phba->intr_type == MSIX) | ||
2243 | lpfc_disable_msix(phba); | ||
2244 | else | ||
2245 | free_irq(phba->pcidev->irq, phba); | ||
2246 | |||
2143 | out_disable_msi: | 2247 | out_disable_msi: |
2144 | if (phba->using_msi) | 2248 | if (phba->intr_type == MSI) |
2145 | pci_disable_msi(phba->pcidev); | 2249 | pci_disable_msi(phba->pcidev); |
2146 | destroy_port(vport); | 2250 | destroy_port(vport); |
2147 | out_kthread_stop: | 2251 | out_kthread_stop: |
@@ -2214,10 +2318,13 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2214 | 2318 | ||
2215 | lpfc_debugfs_terminate(vport); | 2319 | lpfc_debugfs_terminate(vport); |
2216 | 2320 | ||
2217 | /* Release the irq reservation */ | 2321 | if (phba->intr_type == MSIX) |
2218 | free_irq(phba->pcidev->irq, phba); | 2322 | lpfc_disable_msix(phba); |
2219 | if (phba->using_msi) | 2323 | else { |
2220 | pci_disable_msi(phba->pcidev); | 2324 | free_irq(phba->pcidev->irq, phba); |
2325 | if (phba->intr_type == MSI) | ||
2326 | pci_disable_msi(phba->pcidev); | ||
2327 | } | ||
2221 | 2328 | ||
2222 | pci_set_drvdata(pdev, NULL); | 2329 | pci_set_drvdata(pdev, NULL); |
2223 | scsi_host_put(shost); | 2330 | scsi_host_put(shost); |
@@ -2276,10 +2383,13 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
2276 | pring = &psli->ring[psli->fcp_ring]; | 2383 | pring = &psli->ring[psli->fcp_ring]; |
2277 | lpfc_sli_abort_iocb_ring(phba, pring); | 2384 | lpfc_sli_abort_iocb_ring(phba, pring); |
2278 | 2385 | ||
2279 | /* Release the irq reservation */ | 2386 | if (phba->intr_type == MSIX) |
2280 | free_irq(phba->pcidev->irq, phba); | 2387 | lpfc_disable_msix(phba); |
2281 | if (phba->using_msi) | 2388 | else { |
2282 | pci_disable_msi(phba->pcidev); | 2389 | free_irq(phba->pcidev->irq, phba); |
2390 | if (phba->intr_type == MSI) | ||
2391 | pci_disable_msi(phba->pcidev); | ||
2392 | } | ||
2283 | 2393 | ||
2284 | /* Request a slot reset. */ | 2394 | /* Request a slot reset. */ |
2285 | return PCI_ERS_RESULT_NEED_RESET; | 2395 | return PCI_ERS_RESULT_NEED_RESET; |
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index c5841d7565f7..39fd2b843bec 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2005 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -35,11 +35,15 @@ | |||
35 | #define LOG_ALL_MSG 0xffff /* LOG all messages */ | 35 | #define LOG_ALL_MSG 0xffff /* LOG all messages */ |
36 | 36 | ||
37 | #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ | 37 | #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ |
38 | do { \ | ||
38 | { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ | 39 | { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ |
39 | dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ | 40 | dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ |
40 | fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } | 41 | fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ |
42 | } while (0) | ||
41 | 43 | ||
42 | #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ | 44 | #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ |
45 | do { \ | ||
43 | { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ | 46 | { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ |
44 | dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ | 47 | dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ |
45 | fmt, phba->brd_no, ##arg); } | 48 | fmt, phba->brd_no, ##arg); } \ |
49 | } while (0) | ||
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 6dc5ab8d6716..3c0cebc71800 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -264,19 +264,30 @@ void | |||
264 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) | 264 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) |
265 | { | 265 | { |
266 | struct hbq_dmabuf *hbq_entry; | 266 | struct hbq_dmabuf *hbq_entry; |
267 | unsigned long flags; | ||
268 | |||
269 | if (!mp) | ||
270 | return; | ||
267 | 271 | ||
268 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 272 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
273 | /* Check whether HBQ is still in use */ | ||
274 | spin_lock_irqsave(&phba->hbalock, flags); | ||
275 | if (!phba->hbq_in_use) { | ||
276 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
277 | return; | ||
278 | } | ||
269 | hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); | 279 | hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); |
280 | list_del(&hbq_entry->dbuf.list); | ||
270 | if (hbq_entry->tag == -1) { | 281 | if (hbq_entry->tag == -1) { |
271 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | 282 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) |
272 | (phba, hbq_entry); | 283 | (phba, hbq_entry); |
273 | } else { | 284 | } else { |
274 | lpfc_sli_free_hbq(phba, hbq_entry); | 285 | lpfc_sli_free_hbq(phba, hbq_entry); |
275 | } | 286 | } |
287 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
276 | } else { | 288 | } else { |
277 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 289 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
278 | kfree(mp); | 290 | kfree(mp); |
279 | } | 291 | } |
280 | return; | 292 | return; |
281 | } | 293 | } |
282 | |||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4a0e3406e37a..d513813f6697 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -249,6 +249,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
249 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 249 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
250 | struct lpfc_hba *phba = vport->phba; | 250 | struct lpfc_hba *phba = vport->phba; |
251 | struct lpfc_dmabuf *pcmd; | 251 | struct lpfc_dmabuf *pcmd; |
252 | struct lpfc_work_evt *evtp; | ||
252 | uint32_t *lp; | 253 | uint32_t *lp; |
253 | IOCB_t *icmd; | 254 | IOCB_t *icmd; |
254 | struct serv_parm *sp; | 255 | struct serv_parm *sp; |
@@ -435,8 +436,14 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
435 | del_timer_sync(&ndlp->nlp_delayfunc); | 436 | del_timer_sync(&ndlp->nlp_delayfunc); |
436 | ndlp->nlp_last_elscmd = 0; | 437 | ndlp->nlp_last_elscmd = 0; |
437 | 438 | ||
438 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) | 439 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { |
439 | list_del_init(&ndlp->els_retry_evt.evt_listp); | 440 | list_del_init(&ndlp->els_retry_evt.evt_listp); |
441 | /* Decrement ndlp reference count held for the | ||
442 | * delayed retry | ||
443 | */ | ||
444 | evtp = &ndlp->els_retry_evt; | ||
445 | lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); | ||
446 | } | ||
440 | 447 | ||
441 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { | 448 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { |
442 | spin_lock_irq(shost->host_lock); | 449 | spin_lock_irq(shost->host_lock); |
@@ -638,13 +645,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
638 | return 0; | 645 | return 0; |
639 | } | 646 | } |
640 | 647 | ||
641 | /* Check config parameter use-adisc or FCP-2 */ | 648 | if (!(vport->fc_flag & FC_PT2PT)) { |
642 | if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || | 649 | /* Check config parameter use-adisc or FCP-2 */ |
643 | ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { | 650 | if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || |
644 | spin_lock_irq(shost->host_lock); | 651 | ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { |
645 | ndlp->nlp_flag |= NLP_NPR_ADISC; | 652 | spin_lock_irq(shost->host_lock); |
646 | spin_unlock_irq(shost->host_lock); | 653 | ndlp->nlp_flag |= NLP_NPR_ADISC; |
647 | return 1; | 654 | spin_unlock_irq(shost->host_lock); |
655 | return 1; | ||
656 | } | ||
648 | } | 657 | } |
649 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 658 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
650 | lpfc_unreg_rpi(vport, ndlp); | 659 | lpfc_unreg_rpi(vport, ndlp); |
@@ -656,7 +665,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
656 | void *arg, uint32_t evt) | 665 | void *arg, uint32_t evt) |
657 | { | 666 | { |
658 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 667 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
659 | "0253 Illegal State Transition: node x%x " | 668 | "0271 Illegal State Transition: node x%x " |
660 | "event x%x, state x%x Data: x%x x%x\n", | 669 | "event x%x, state x%x Data: x%x x%x\n", |
661 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, | 670 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, |
662 | ndlp->nlp_flag); | 671 | ndlp->nlp_flag); |
@@ -674,7 +683,7 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
674 | */ | 683 | */ |
675 | if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { | 684 | if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { |
676 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 685 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
677 | "0253 Illegal State Transition: node x%x " | 686 | "0272 Illegal State Transition: node x%x " |
678 | "event x%x, state x%x Data: x%x x%x\n", | 687 | "event x%x, state x%x Data: x%x x%x\n", |
679 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, | 688 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, |
680 | ndlp->nlp_flag); | 689 | ndlp->nlp_flag); |
@@ -2144,8 +2153,11 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2144 | uint32_t cur_state, rc; | 2153 | uint32_t cur_state, rc; |
2145 | uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, | 2154 | uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, |
2146 | uint32_t); | 2155 | uint32_t); |
2156 | uint32_t got_ndlp = 0; | ||
2157 | |||
2158 | if (lpfc_nlp_get(ndlp)) | ||
2159 | got_ndlp = 1; | ||
2147 | 2160 | ||
2148 | lpfc_nlp_get(ndlp); | ||
2149 | cur_state = ndlp->nlp_state; | 2161 | cur_state = ndlp->nlp_state; |
2150 | 2162 | ||
2151 | /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ | 2163 | /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ |
@@ -2162,15 +2174,24 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2162 | rc = (func) (vport, ndlp, arg, evt); | 2174 | rc = (func) (vport, ndlp, arg, evt); |
2163 | 2175 | ||
2164 | /* DSM out state <rc> on NPort <nlp_DID> */ | 2176 | /* DSM out state <rc> on NPort <nlp_DID> */ |
2165 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 2177 | if (got_ndlp) { |
2178 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
2166 | "0212 DSM out state %d on NPort x%x Data: x%x\n", | 2179 | "0212 DSM out state %d on NPort x%x Data: x%x\n", |
2167 | rc, ndlp->nlp_DID, ndlp->nlp_flag); | 2180 | rc, ndlp->nlp_DID, ndlp->nlp_flag); |
2168 | 2181 | ||
2169 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, | 2182 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, |
2170 | "DSM out: ste:%d did:x%x flg:x%x", | 2183 | "DSM out: ste:%d did:x%x flg:x%x", |
2171 | rc, ndlp->nlp_DID, ndlp->nlp_flag); | 2184 | rc, ndlp->nlp_DID, ndlp->nlp_flag); |
2185 | /* Decrement the ndlp reference count held for this function */ | ||
2186 | lpfc_nlp_put(ndlp); | ||
2187 | } else { | ||
2188 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
2189 | "0212 DSM out state %d on NPort free\n", rc); | ||
2172 | 2190 | ||
2173 | lpfc_nlp_put(ndlp); | 2191 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, |
2192 | "DSM out: ste:%d did:x%x flg:x%x", | ||
2193 | rc, 0, 0); | ||
2194 | } | ||
2174 | 2195 | ||
2175 | return rc; | 2196 | return rc; |
2176 | } | 2197 | } |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index fc5c3a42b05a..70255c11d3ad 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1283,6 +1283,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1283 | match = 0; | 1283 | match = 0; |
1284 | spin_lock_irq(shost->host_lock); | 1284 | spin_lock_irq(shost->host_lock); |
1285 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 1285 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
1286 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
1287 | continue; | ||
1286 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && | 1288 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && |
1287 | i == ndlp->nlp_sid && | 1289 | i == ndlp->nlp_sid && |
1288 | ndlp->rport) { | 1290 | ndlp->rport) { |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fdd01e384e36..f53206411cd8 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
203 | case CMD_IOCB_RCV_SEQ64_CX: | 203 | case CMD_IOCB_RCV_SEQ64_CX: |
204 | case CMD_IOCB_RCV_ELS64_CX: | 204 | case CMD_IOCB_RCV_ELS64_CX: |
205 | case CMD_IOCB_RCV_CONT64_CX: | 205 | case CMD_IOCB_RCV_CONT64_CX: |
206 | case CMD_IOCB_RET_XRI64_CX: | ||
206 | type = LPFC_UNSOL_IOCB; | 207 | type = LPFC_UNSOL_IOCB; |
207 | break; | 208 | break; |
209 | case CMD_IOCB_XMIT_MSEQ64_CR: | ||
210 | case CMD_IOCB_XMIT_MSEQ64_CX: | ||
211 | case CMD_IOCB_RCV_SEQ_LIST64_CX: | ||
212 | case CMD_IOCB_RCV_ELS_LIST64_CX: | ||
213 | case CMD_IOCB_CLOSE_EXTENDED_CN: | ||
214 | case CMD_IOCB_ABORT_EXTENDED_CN: | ||
215 | case CMD_IOCB_RET_HBQE64_CN: | ||
216 | case CMD_IOCB_FCP_IBIDIR64_CR: | ||
217 | case CMD_IOCB_FCP_IBIDIR64_CX: | ||
218 | case CMD_IOCB_FCP_ITASKMGT64_CX: | ||
219 | case CMD_IOCB_LOGENTRY_CN: | ||
220 | case CMD_IOCB_LOGENTRY_ASYNC_CN: | ||
221 | printk("%s - Unhandled SLI-3 Command x%x\n", | ||
222 | __FUNCTION__, iocb_cmnd); | ||
223 | type = LPFC_UNKNOWN_IOCB; | ||
224 | break; | ||
208 | default: | 225 | default: |
209 | type = LPFC_UNKNOWN_IOCB; | 226 | type = LPFC_UNKNOWN_IOCB; |
210 | break; | 227 | break; |
@@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
529 | { | 546 | { |
530 | struct lpfc_dmabuf *dmabuf, *next_dmabuf; | 547 | struct lpfc_dmabuf *dmabuf, *next_dmabuf; |
531 | struct hbq_dmabuf *hbq_buf; | 548 | struct hbq_dmabuf *hbq_buf; |
549 | unsigned long flags; | ||
532 | int i, hbq_count; | 550 | int i, hbq_count; |
551 | uint32_t hbqno; | ||
533 | 552 | ||
534 | hbq_count = lpfc_sli_hbq_count(); | 553 | hbq_count = lpfc_sli_hbq_count(); |
535 | /* Return all memory used by all HBQs */ | 554 | /* Return all memory used by all HBQs */ |
555 | spin_lock_irqsave(&phba->hbalock, flags); | ||
536 | for (i = 0; i < hbq_count; ++i) { | 556 | for (i = 0; i < hbq_count; ++i) { |
537 | list_for_each_entry_safe(dmabuf, next_dmabuf, | 557 | list_for_each_entry_safe(dmabuf, next_dmabuf, |
538 | &phba->hbqs[i].hbq_buffer_list, list) { | 558 | &phba->hbqs[i].hbq_buffer_list, list) { |
@@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
542 | } | 562 | } |
543 | phba->hbqs[i].buffer_count = 0; | 563 | phba->hbqs[i].buffer_count = 0; |
544 | } | 564 | } |
565 | /* Return all HBQ buffer that are in-fly */ | ||
566 | list_for_each_entry_safe(dmabuf, next_dmabuf, | ||
567 | &phba->hbqbuf_in_list, list) { | ||
568 | hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); | ||
569 | list_del(&hbq_buf->dbuf.list); | ||
570 | if (hbq_buf->tag == -1) { | ||
571 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | ||
572 | (phba, hbq_buf); | ||
573 | } else { | ||
574 | hbqno = hbq_buf->tag >> 16; | ||
575 | if (hbqno >= LPFC_MAX_HBQS) | ||
576 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | ||
577 | (phba, hbq_buf); | ||
578 | else | ||
579 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
580 | hbq_buf); | ||
581 | } | ||
582 | } | ||
583 | |||
584 | /* Mark the HBQs not in use */ | ||
585 | phba->hbq_in_use = 0; | ||
586 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
545 | } | 587 | } |
546 | 588 | ||
547 | static struct lpfc_hbq_entry * | 589 | static struct lpfc_hbq_entry * |
@@ -603,6 +645,7 @@ static int | |||
603 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | 645 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) |
604 | { | 646 | { |
605 | uint32_t i, start, end; | 647 | uint32_t i, start, end; |
648 | unsigned long flags; | ||
606 | struct hbq_dmabuf *hbq_buffer; | 649 | struct hbq_dmabuf *hbq_buffer; |
607 | 650 | ||
608 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) { | 651 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) { |
@@ -615,6 +658,13 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | |||
615 | end = lpfc_hbq_defs[hbqno]->entry_count; | 658 | end = lpfc_hbq_defs[hbqno]->entry_count; |
616 | } | 659 | } |
617 | 660 | ||
661 | /* Check whether HBQ is still in use */ | ||
662 | spin_lock_irqsave(&phba->hbalock, flags); | ||
663 | if (!phba->hbq_in_use) { | ||
664 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
665 | return 0; | ||
666 | } | ||
667 | |||
618 | /* Populate HBQ entries */ | 668 | /* Populate HBQ entries */ |
619 | for (i = start; i < end; i++) { | 669 | for (i = start; i < end; i++) { |
620 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 670 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
@@ -626,6 +676,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | |||
626 | else | 676 | else |
627 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | 677 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
628 | } | 678 | } |
679 | |||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
629 | return 0; | 681 | return 0; |
630 | } | 682 | } |
631 | 683 | ||
@@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
910 | uint32_t hbqno; | 962 | uint32_t hbqno; |
911 | void *virt; /* virtual address ptr */ | 963 | void *virt; /* virtual address ptr */ |
912 | dma_addr_t phys; /* mapped address */ | 964 | dma_addr_t phys; /* mapped address */ |
965 | unsigned long flags; | ||
966 | |||
967 | /* Check whether HBQ is still in use */ | ||
968 | spin_lock_irqsave(&phba->hbalock, flags); | ||
969 | if (!phba->hbq_in_use) { | ||
970 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
971 | return NULL; | ||
972 | } | ||
913 | 973 | ||
914 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); | 974 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); |
915 | if (hbq_entry == NULL) | 975 | if (hbq_entry == NULL) { |
976 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
916 | return NULL; | 977 | return NULL; |
978 | } | ||
917 | list_del(&hbq_entry->dbuf.list); | 979 | list_del(&hbq_entry->dbuf.list); |
918 | 980 | ||
919 | hbqno = tag >> 16; | 981 | hbqno = tag >> 16; |
920 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 982 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
921 | if (new_hbq_entry == NULL) | 983 | if (new_hbq_entry == NULL) { |
984 | list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
985 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
922 | return &hbq_entry->dbuf; | 986 | return &hbq_entry->dbuf; |
987 | } | ||
923 | new_hbq_entry->tag = -1; | 988 | new_hbq_entry->tag = -1; |
924 | phys = new_hbq_entry->dbuf.phys; | 989 | phys = new_hbq_entry->dbuf.phys; |
925 | virt = new_hbq_entry->dbuf.virt; | 990 | virt = new_hbq_entry->dbuf.virt; |
@@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
928 | hbq_entry->dbuf.phys = phys; | 993 | hbq_entry->dbuf.phys = phys; |
929 | hbq_entry->dbuf.virt = virt; | 994 | hbq_entry->dbuf.virt = virt; |
930 | lpfc_sli_free_hbq(phba, hbq_entry); | 995 | lpfc_sli_free_hbq(phba, hbq_entry); |
996 | list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
997 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
998 | |||
931 | return &new_hbq_entry->dbuf; | 999 | return &new_hbq_entry->dbuf; |
932 | } | 1000 | } |
933 | 1001 | ||
@@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
951 | uint32_t Rctl, Type; | 1019 | uint32_t Rctl, Type; |
952 | uint32_t match, i; | 1020 | uint32_t match, i; |
953 | struct lpfc_iocbq *iocbq; | 1021 | struct lpfc_iocbq *iocbq; |
1022 | struct lpfc_dmabuf *dmzbuf; | ||
954 | 1023 | ||
955 | match = 0; | 1024 | match = 0; |
956 | irsp = &(saveq->iocb); | 1025 | irsp = &(saveq->iocb); |
@@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
972 | return 1; | 1041 | return 1; |
973 | } | 1042 | } |
974 | 1043 | ||
1044 | if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && | ||
1045 | (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { | ||
1046 | if (irsp->ulpBdeCount > 0) { | ||
1047 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1048 | irsp->un.ulpWord[3]); | ||
1049 | lpfc_in_buf_free(phba, dmzbuf); | ||
1050 | } | ||
1051 | |||
1052 | if (irsp->ulpBdeCount > 1) { | ||
1053 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1054 | irsp->unsli3.sli3Words[3]); | ||
1055 | lpfc_in_buf_free(phba, dmzbuf); | ||
1056 | } | ||
1057 | |||
1058 | if (irsp->ulpBdeCount > 2) { | ||
1059 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1060 | irsp->unsli3.sli3Words[7]); | ||
1061 | lpfc_in_buf_free(phba, dmzbuf); | ||
1062 | } | ||
1063 | |||
1064 | return 1; | ||
1065 | } | ||
1066 | |||
975 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 1067 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
976 | if (irsp->ulpBdeCount != 0) { | 1068 | if (irsp->ulpBdeCount != 0) { |
977 | saveq->context2 = lpfc_sli_get_buff(phba, pring, | 1069 | saveq->context2 = lpfc_sli_get_buff(phba, pring, |
@@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) | |||
2293 | 2385 | ||
2294 | /* Initialize the struct lpfc_sli_hbq structure for each hbq */ | 2386 | /* Initialize the struct lpfc_sli_hbq structure for each hbq */ |
2295 | phba->link_state = LPFC_INIT_MBX_CMDS; | 2387 | phba->link_state = LPFC_INIT_MBX_CMDS; |
2388 | phba->hbq_in_use = 1; | ||
2296 | 2389 | ||
2297 | hbq_entry_index = 0; | 2390 | hbq_entry_index = 0; |
2298 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { | 2391 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { |
@@ -2404,9 +2497,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) | |||
2404 | if ((pmb->mb.un.varCfgPort.sli_mode == 3) && | 2497 | if ((pmb->mb.un.varCfgPort.sli_mode == 3) && |
2405 | (!pmb->mb.un.varCfgPort.cMA)) { | 2498 | (!pmb->mb.un.varCfgPort.cMA)) { |
2406 | rc = -ENXIO; | 2499 | rc = -ENXIO; |
2407 | goto do_prep_failed; | ||
2408 | } | 2500 | } |
2409 | return rc; | ||
2410 | 2501 | ||
2411 | do_prep_failed: | 2502 | do_prep_failed: |
2412 | mempool_free(pmb, phba->mbox_mem_pool); | 2503 | mempool_free(pmb, phba->mbox_mem_pool); |
@@ -2625,14 +2716,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2625 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2716 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2626 | 2717 | ||
2627 | /* Mbox command <mbxCommand> cannot issue */ | 2718 | /* Mbox command <mbxCommand> cannot issue */ |
2628 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) | 2719 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2629 | return MBX_NOT_FINISHED; | 2720 | return MBX_NOT_FINISHED; |
2630 | } | 2721 | } |
2631 | 2722 | ||
2632 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 2723 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && |
2633 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 2724 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { |
2634 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2725 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2635 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) | 2726 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2636 | return MBX_NOT_FINISHED; | 2727 | return MBX_NOT_FINISHED; |
2637 | } | 2728 | } |
2638 | 2729 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4b633d39a82a..ca540d1d041e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.2.4" | 21 | #define LPFC_DRIVER_VERSION "8.2.5" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | 24 | ||
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 9fad7663c117..86d05beb00b8 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -327,7 +327,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
327 | * up and ready to FDISC. | 327 | * up and ready to FDISC. |
328 | */ | 328 | */ |
329 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 329 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
330 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 330 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
331 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
331 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { | 332 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { |
332 | lpfc_set_disctmo(vport); | 333 | lpfc_set_disctmo(vport); |
333 | lpfc_initial_fdisc(vport); | 334 | lpfc_initial_fdisc(vport); |
@@ -358,7 +359,8 @@ disable_vport(struct fc_vport *fc_vport) | |||
358 | long timeout; | 359 | long timeout; |
359 | 360 | ||
360 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 361 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
361 | if (ndlp && phba->link_state >= LPFC_LINK_UP) { | 362 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
363 | && phba->link_state >= LPFC_LINK_UP) { | ||
362 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 364 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
363 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 365 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
364 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | 366 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) |
@@ -372,6 +374,8 @@ disable_vport(struct fc_vport *fc_vport) | |||
372 | * calling lpfc_cleanup_rpis(vport, 1) | 374 | * calling lpfc_cleanup_rpis(vport, 1) |
373 | */ | 375 | */ |
374 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 376 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
377 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
378 | continue; | ||
375 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 379 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
376 | continue; | 380 | continue; |
377 | lpfc_disc_state_machine(vport, ndlp, NULL, | 381 | lpfc_disc_state_machine(vport, ndlp, NULL, |
@@ -414,7 +418,8 @@ enable_vport(struct fc_vport *fc_vport) | |||
414 | * up and ready to FDISC. | 418 | * up and ready to FDISC. |
415 | */ | 419 | */ |
416 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 420 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
417 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 421 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
422 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
418 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { | 423 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { |
419 | lpfc_set_disctmo(vport); | 424 | lpfc_set_disctmo(vport); |
420 | lpfc_initial_fdisc(vport); | 425 | lpfc_initial_fdisc(vport); |
@@ -498,7 +503,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
498 | scsi_remove_host(lpfc_shost_from_vport(vport)); | 503 | scsi_remove_host(lpfc_shost_from_vport(vport)); |
499 | 504 | ||
500 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 505 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
501 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | 506 | |
507 | /* In case of driver unload, we shall not perform fabric logo as the | ||
508 | * worker thread already stopped at this stage and, in this case, we | ||
509 | * can safely skip the fabric logo. | ||
510 | */ | ||
511 | if (phba->pport->load_flag & FC_UNLOADING) { | ||
512 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | ||
513 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | ||
514 | phba->link_state >= LPFC_LINK_UP) { | ||
515 | /* First look for the Fabric ndlp */ | ||
516 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | ||
517 | if (!ndlp) | ||
518 | goto skip_logo; | ||
519 | else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
520 | ndlp = lpfc_enable_node(vport, ndlp, | ||
521 | NLP_STE_UNUSED_NODE); | ||
522 | if (!ndlp) | ||
523 | goto skip_logo; | ||
524 | } | ||
525 | /* Remove ndlp from vport npld list */ | ||
526 | lpfc_dequeue_node(vport, ndlp); | ||
527 | |||
528 | /* Indicate free memory when release */ | ||
529 | spin_lock_irq(&phba->ndlp_lock); | ||
530 | NLP_SET_FREE_REQ(ndlp); | ||
531 | spin_unlock_irq(&phba->ndlp_lock); | ||
532 | /* Kick off release ndlp when it can be safely done */ | ||
533 | lpfc_nlp_put(ndlp); | ||
534 | } | ||
535 | goto skip_logo; | ||
536 | } | ||
537 | |||
538 | /* Otherwise, we will perform fabric logo as needed */ | ||
539 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | ||
540 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | ||
502 | phba->link_state >= LPFC_LINK_UP) { | 541 | phba->link_state >= LPFC_LINK_UP) { |
503 | if (vport->cfg_enable_da_id) { | 542 | if (vport->cfg_enable_da_id) { |
504 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 543 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
@@ -519,8 +558,27 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
519 | if (!ndlp) | 558 | if (!ndlp) |
520 | goto skip_logo; | 559 | goto skip_logo; |
521 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 560 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
561 | /* Indicate free memory when release */ | ||
562 | NLP_SET_FREE_REQ(ndlp); | ||
522 | } else { | 563 | } else { |
564 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
565 | ndlp = lpfc_enable_node(vport, ndlp, | ||
566 | NLP_STE_UNUSED_NODE); | ||
567 | if (!ndlp) | ||
568 | goto skip_logo; | ||
569 | |||
570 | /* Remove ndlp from vport npld list */ | ||
523 | lpfc_dequeue_node(vport, ndlp); | 571 | lpfc_dequeue_node(vport, ndlp); |
572 | spin_lock_irq(&phba->ndlp_lock); | ||
573 | if (!NLP_CHK_FREE_REQ(ndlp)) | ||
574 | /* Indicate free memory when release */ | ||
575 | NLP_SET_FREE_REQ(ndlp); | ||
576 | else { | ||
577 | /* Skip this if ndlp is already in free mode */ | ||
578 | spin_unlock_irq(&phba->ndlp_lock); | ||
579 | goto skip_logo; | ||
580 | } | ||
581 | spin_unlock_irq(&phba->ndlp_lock); | ||
524 | } | 582 | } |
525 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 583 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
526 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 584 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
@@ -534,9 +592,9 @@ skip_logo: | |||
534 | lpfc_sli_host_down(vport); | 592 | lpfc_sli_host_down(vport); |
535 | 593 | ||
536 | lpfc_stop_vport_timers(vport); | 594 | lpfc_stop_vport_timers(vport); |
537 | lpfc_unreg_all_rpis(vport); | ||
538 | 595 | ||
539 | if (!(phba->pport->load_flag & FC_UNLOADING)) { | 596 | if (!(phba->pport->load_flag & FC_UNLOADING)) { |
597 | lpfc_unreg_all_rpis(vport); | ||
540 | lpfc_unreg_default_rpis(vport); | 598 | lpfc_unreg_default_rpis(vport); |
541 | /* | 599 | /* |
542 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) | 600 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) |
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index b6587a6d8486..0ad215e27b83 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c | |||
@@ -59,7 +59,6 @@ EXPORT_SYMBOL(mraid_mm_register_adp); | |||
59 | EXPORT_SYMBOL(mraid_mm_unregister_adp); | 59 | EXPORT_SYMBOL(mraid_mm_unregister_adp); |
60 | EXPORT_SYMBOL(mraid_mm_adapter_app_handle); | 60 | EXPORT_SYMBOL(mraid_mm_adapter_app_handle); |
61 | 61 | ||
62 | static int majorno; | ||
63 | static uint32_t drvr_ver = 0x02200207; | 62 | static uint32_t drvr_ver = 0x02200207; |
64 | 63 | ||
65 | static int adapters_count_g; | 64 | static int adapters_count_g; |
@@ -76,6 +75,12 @@ static const struct file_operations lsi_fops = { | |||
76 | .owner = THIS_MODULE, | 75 | .owner = THIS_MODULE, |
77 | }; | 76 | }; |
78 | 77 | ||
78 | static struct miscdevice megaraid_mm_dev = { | ||
79 | .minor = MISC_DYNAMIC_MINOR, | ||
80 | .name = "megadev0", | ||
81 | .fops = &lsi_fops, | ||
82 | }; | ||
83 | |||
79 | /** | 84 | /** |
80 | * mraid_mm_open - open routine for char node interface | 85 | * mraid_mm_open - open routine for char node interface |
81 | * @inode : unused | 86 | * @inode : unused |
@@ -1184,15 +1189,16 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) | |||
1184 | static int __init | 1189 | static int __init |
1185 | mraid_mm_init(void) | 1190 | mraid_mm_init(void) |
1186 | { | 1191 | { |
1192 | int err; | ||
1193 | |||
1187 | // Announce the driver version | 1194 | // Announce the driver version |
1188 | con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", | 1195 | con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", |
1189 | LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); | 1196 | LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); |
1190 | 1197 | ||
1191 | majorno = register_chrdev(0, "megadev", &lsi_fops); | 1198 | err = misc_register(&megaraid_mm_dev); |
1192 | 1199 | if (err < 0) { | |
1193 | if (majorno < 0) { | 1200 | con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); |
1194 | con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); | 1201 | return err; |
1195 | return majorno; | ||
1196 | } | 1202 | } |
1197 | 1203 | ||
1198 | init_waitqueue_head(&wait_q); | 1204 | init_waitqueue_head(&wait_q); |
@@ -1230,7 +1236,7 @@ mraid_mm_exit(void) | |||
1230 | { | 1236 | { |
1231 | con_log(CL_DLEVEL1 , ("exiting common mod\n")); | 1237 | con_log(CL_DLEVEL1 , ("exiting common mod\n")); |
1232 | 1238 | ||
1233 | unregister_chrdev(majorno, "megadev"); | 1239 | misc_deregister(&megaraid_mm_dev); |
1234 | } | 1240 | } |
1235 | 1241 | ||
1236 | module_init(mraid_mm_init); | 1242 | module_init(mraid_mm_init); |
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index c8762b2b8ed1..55b425c0a654 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/moduleparam.h> | 22 | #include <linux/moduleparam.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/miscdevice.h> | ||
25 | 26 | ||
26 | #include "mbox_defs.h" | 27 | #include "mbox_defs.h" |
27 | #include "megaraid_ioctl.h" | 28 | #include "megaraid_ioctl.h" |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 2a6e4f472eaa..a57fed47b39d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -416,11 +416,11 @@ static int ses_intf_add(struct class_device *cdev, | |||
416 | int i, j, types, len, components = 0; | 416 | int i, j, types, len, components = 0; |
417 | int err = -ENOMEM; | 417 | int err = -ENOMEM; |
418 | struct enclosure_device *edev; | 418 | struct enclosure_device *edev; |
419 | struct ses_component *scomp; | 419 | struct ses_component *scomp = NULL; |
420 | 420 | ||
421 | if (!scsi_device_enclosure(sdev)) { | 421 | if (!scsi_device_enclosure(sdev)) { |
422 | /* not an enclosure, but might be in one */ | 422 | /* not an enclosure, but might be in one */ |
423 | edev = enclosure_find(&sdev->host->shost_gendev); | 423 | edev = enclosure_find(&sdev->host->shost_gendev); |
424 | if (edev) { | 424 | if (edev) { |
425 | ses_match_to_enclosure(edev, sdev); | 425 | ses_match_to_enclosure(edev, sdev); |
426 | class_device_put(&edev->cdev); | 426 | class_device_put(&edev->cdev); |
@@ -456,9 +456,6 @@ static int ses_intf_add(struct class_device *cdev, | |||
456 | if (!buf) | 456 | if (!buf) |
457 | goto err_free; | 457 | goto err_free; |
458 | 458 | ||
459 | ses_dev->page1 = buf; | ||
460 | ses_dev->page1_len = len; | ||
461 | |||
462 | result = ses_recv_diag(sdev, 1, buf, len); | 459 | result = ses_recv_diag(sdev, 1, buf, len); |
463 | if (result) | 460 | if (result) |
464 | goto recv_failed; | 461 | goto recv_failed; |
@@ -473,6 +470,9 @@ static int ses_intf_add(struct class_device *cdev, | |||
473 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 470 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) |
474 | components += type_ptr[1]; | 471 | components += type_ptr[1]; |
475 | } | 472 | } |
473 | ses_dev->page1 = buf; | ||
474 | ses_dev->page1_len = len; | ||
475 | buf = NULL; | ||
476 | 476 | ||
477 | result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); | 477 | result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); |
478 | if (result) | 478 | if (result) |
@@ -489,6 +489,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
489 | goto recv_failed; | 489 | goto recv_failed; |
490 | ses_dev->page2 = buf; | 490 | ses_dev->page2 = buf; |
491 | ses_dev->page2_len = len; | 491 | ses_dev->page2_len = len; |
492 | buf = NULL; | ||
492 | 493 | ||
493 | /* The additional information page --- allows us | 494 | /* The additional information page --- allows us |
494 | * to match up the devices */ | 495 | * to match up the devices */ |
@@ -506,11 +507,12 @@ static int ses_intf_add(struct class_device *cdev, | |||
506 | goto recv_failed; | 507 | goto recv_failed; |
507 | ses_dev->page10 = buf; | 508 | ses_dev->page10 = buf; |
508 | ses_dev->page10_len = len; | 509 | ses_dev->page10_len = len; |
510 | buf = NULL; | ||
509 | 511 | ||
510 | no_page10: | 512 | no_page10: |
511 | scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL); | 513 | scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); |
512 | if (!scomp) | 514 | if (!scomp) |
513 | goto err_free; | 515 | goto err_free; |
514 | 516 | ||
515 | edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, | 517 | edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, |
516 | components, &ses_enclosure_callbacks); | 518 | components, &ses_enclosure_callbacks); |
@@ -521,10 +523,9 @@ static int ses_intf_add(struct class_device *cdev, | |||
521 | 523 | ||
522 | edev->scratch = ses_dev; | 524 | edev->scratch = ses_dev; |
523 | for (i = 0; i < components; i++) | 525 | for (i = 0; i < components; i++) |
524 | edev->component[i].scratch = scomp++; | 526 | edev->component[i].scratch = scomp + i; |
525 | 527 | ||
526 | /* Page 7 for the descriptors is optional */ | 528 | /* Page 7 for the descriptors is optional */ |
527 | buf = NULL; | ||
528 | result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); | 529 | result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); |
529 | if (result) | 530 | if (result) |
530 | goto simple_populate; | 531 | goto simple_populate; |
@@ -532,6 +533,8 @@ static int ses_intf_add(struct class_device *cdev, | |||
532 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 533 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
533 | /* add 1 for trailing '\0' we'll use */ | 534 | /* add 1 for trailing '\0' we'll use */ |
534 | buf = kzalloc(len + 1, GFP_KERNEL); | 535 | buf = kzalloc(len + 1, GFP_KERNEL); |
536 | if (!buf) | ||
537 | goto simple_populate; | ||
535 | result = ses_recv_diag(sdev, 7, buf, len); | 538 | result = ses_recv_diag(sdev, 7, buf, len); |
536 | if (result) { | 539 | if (result) { |
537 | simple_populate: | 540 | simple_populate: |
@@ -598,6 +601,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
598 | err = -ENODEV; | 601 | err = -ENODEV; |
599 | err_free: | 602 | err_free: |
600 | kfree(buf); | 603 | kfree(buf); |
604 | kfree(scomp); | ||
601 | kfree(ses_dev->page10); | 605 | kfree(ses_dev->page10); |
602 | kfree(ses_dev->page2); | 606 | kfree(ses_dev->page2); |
603 | kfree(ses_dev->page1); | 607 | kfree(ses_dev->page1); |
@@ -630,6 +634,7 @@ static void ses_intf_remove(struct class_device *cdev, | |||
630 | ses_dev = edev->scratch; | 634 | ses_dev = edev->scratch; |
631 | edev->scratch = NULL; | 635 | edev->scratch = NULL; |
632 | 636 | ||
637 | kfree(ses_dev->page10); | ||
633 | kfree(ses_dev->page1); | 638 | kfree(ses_dev->page1); |
634 | kfree(ses_dev->page2); | 639 | kfree(ses_dev->page2); |
635 | kfree(ses_dev); | 640 | kfree(ses_dev); |
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index 6325901e5093..f7d279542fa5 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c | |||
@@ -187,10 +187,10 @@ | |||
187 | #define sym53c416_base_2 sym53c416_2 | 187 | #define sym53c416_base_2 sym53c416_2 |
188 | #define sym53c416_base_3 sym53c416_3 | 188 | #define sym53c416_base_3 sym53c416_3 |
189 | 189 | ||
190 | static unsigned int sym53c416_base[2] = {0,0}; | 190 | static unsigned int sym53c416_base[2]; |
191 | static unsigned int sym53c416_base_1[2] = {0,0}; | 191 | static unsigned int sym53c416_base_1[2]; |
192 | static unsigned int sym53c416_base_2[2] = {0,0}; | 192 | static unsigned int sym53c416_base_2[2]; |
193 | static unsigned int sym53c416_base_3[2] = {0,0}; | 193 | static unsigned int sym53c416_base_3[2]; |
194 | 194 | ||
195 | #endif | 195 | #endif |
196 | 196 | ||
@@ -621,25 +621,25 @@ int __init sym53c416_detect(struct scsi_host_template *tpnt) | |||
621 | int ints[3]; | 621 | int ints[3]; |
622 | 622 | ||
623 | ints[0] = 2; | 623 | ints[0] = 2; |
624 | if(sym53c416_base) | 624 | if(sym53c416_base[0]) |
625 | { | 625 | { |
626 | ints[1] = sym53c416_base[0]; | 626 | ints[1] = sym53c416_base[0]; |
627 | ints[2] = sym53c416_base[1]; | 627 | ints[2] = sym53c416_base[1]; |
628 | sym53c416_setup(NULL, ints); | 628 | sym53c416_setup(NULL, ints); |
629 | } | 629 | } |
630 | if(sym53c416_base_1) | 630 | if(sym53c416_base_1[0]) |
631 | { | 631 | { |
632 | ints[1] = sym53c416_base_1[0]; | 632 | ints[1] = sym53c416_base_1[0]; |
633 | ints[2] = sym53c416_base_1[1]; | 633 | ints[2] = sym53c416_base_1[1]; |
634 | sym53c416_setup(NULL, ints); | 634 | sym53c416_setup(NULL, ints); |
635 | } | 635 | } |
636 | if(sym53c416_base_2) | 636 | if(sym53c416_base_2[0]) |
637 | { | 637 | { |
638 | ints[1] = sym53c416_base_2[0]; | 638 | ints[1] = sym53c416_base_2[0]; |
639 | ints[2] = sym53c416_base_2[1]; | 639 | ints[2] = sym53c416_base_2[1]; |
640 | sym53c416_setup(NULL, ints); | 640 | sym53c416_setup(NULL, ints); |
641 | } | 641 | } |
642 | if(sym53c416_base_3) | 642 | if(sym53c416_base_3[0]) |
643 | { | 643 | { |
644 | ints[1] = sym53c416_base_3[0]; | 644 | ints[1] = sym53c416_base_3[0]; |
645 | ints[2] = sym53c416_base_3[1]; | 645 | ints[2] = sym53c416_base_3[1]; |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d1299e999723..530ff4c553f8 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/workqueue.h> | 7 | #include <linux/workqueue.h> |
8 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
9 | #include <scsi/scsi.h> | ||
9 | 10 | ||
10 | struct request_queue; | 11 | struct request_queue; |
11 | struct block_device; | 12 | struct block_device; |
@@ -25,12 +26,15 @@ struct blk_queue_tags; | |||
25 | * NONE: Self evident. Host adapter is not capable of scatter-gather. | 26 | * NONE: Self evident. Host adapter is not capable of scatter-gather. |
26 | * ALL: Means that the host adapter module can do scatter-gather, | 27 | * ALL: Means that the host adapter module can do scatter-gather, |
27 | * and that there is no limit to the size of the table to which | 28 | * and that there is no limit to the size of the table to which |
28 | * we scatter/gather data. | 29 | * we scatter/gather data. The value we set here is the maximum |
30 | * single element sglist. To use chained sglists, the adapter | ||
31 | * has to set a value beyond ALL (and correctly use the chain | ||
32 | * handling API. | ||
29 | * Anything else: Indicates the maximum number of chains that can be | 33 | * Anything else: Indicates the maximum number of chains that can be |
30 | * used in one scatter-gather request. | 34 | * used in one scatter-gather request. |
31 | */ | 35 | */ |
32 | #define SG_NONE 0 | 36 | #define SG_NONE 0 |
33 | #define SG_ALL 0xff | 37 | #define SG_ALL SCSI_MAX_SG_SEGMENTS |
34 | 38 | ||
35 | #define MODE_UNKNOWN 0x00 | 39 | #define MODE_UNKNOWN 0x00 |
36 | #define MODE_INITIATOR 0x01 | 40 | #define MODE_INITIATOR 0x01 |