diff options
author | David S. Miller <davem@davemloft.net> | 2008-02-23 23:38:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-02-23 23:38:20 -0500 |
commit | 8d3c202be23c5a915f7053ebd4e96f44700c6a62 (patch) | |
tree | e0f017aff86d3ad0b858fe85f44e11096087ed00 /drivers/scsi | |
parent | 1b04ab4597725f75f94942da9aa40daa7b9a4bd9 (diff) | |
parent | 038eb0ea04b245351be34b0ae76b55eee4603989 (diff) |
Merge branch 'master' of ../linux-2.6/
Diffstat (limited to 'drivers/scsi')
57 files changed, 4353 insertions, 544 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a5f0aaaf0dd4..c46666a24809 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -722,7 +722,7 @@ config SCSI_FD_MCS | |||
722 | 722 | ||
723 | config SCSI_GDTH | 723 | config SCSI_GDTH |
724 | tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" | 724 | tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" |
725 | depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY | 725 | depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API |
726 | ---help--- | 726 | ---help--- |
727 | Formerly called GDT SCSI Disk Array Controller Support. | 727 | Formerly called GDT SCSI Disk Array Controller Support. |
728 | 728 | ||
@@ -992,6 +992,16 @@ config SCSI_IZIP_SLOW_CTR | |||
992 | 992 | ||
993 | Generally, saying N is fine. | 993 | Generally, saying N is fine. |
994 | 994 | ||
995 | config SCSI_MVSAS | ||
996 | tristate "Marvell 88SE6440 SAS/SATA support" | ||
997 | depends on PCI && SCSI | ||
998 | select SCSI_SAS_LIBSAS | ||
999 | help | ||
1000 | This driver supports Marvell SAS/SATA PCI devices. | ||
1001 | |||
1002 | To compiler this driver as a module, choose M here: the module | ||
1003 | will be called mvsas. | ||
1004 | |||
995 | config SCSI_NCR53C406A | 1005 | config SCSI_NCR53C406A |
996 | tristate "NCR53c406a SCSI support" | 1006 | tristate "NCR53c406a SCSI support" |
997 | depends on ISA && SCSI | 1007 | depends on ISA && SCSI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 925c26b4fff9..23e6ecbd4778 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ | |||
119 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ | 119 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ |
120 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | 120 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o |
121 | obj-$(CONFIG_SCSI_STEX) += stex.o | 121 | obj-$(CONFIG_SCSI_STEX) += stex.o |
122 | obj-$(CONFIG_SCSI_MVSAS) += mvsas.o | ||
122 | obj-$(CONFIG_PS3_ROM) += ps3rom.o | 123 | obj-$(CONFIG_PS3_ROM) += ps3rom.o |
123 | 124 | ||
124 | obj-$(CONFIG_ARM) += arm/ | 125 | obj-$(CONFIG_ARM) += arm/ |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index bfd0e64964ac..c05092fd3a9d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -144,51 +144,77 @@ static char *aac_get_status_string(u32 status); | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | static int nondasd = -1; | 146 | static int nondasd = -1; |
147 | static int aac_cache = 0; | 147 | static int aac_cache; |
148 | static int dacmode = -1; | 148 | static int dacmode = -1; |
149 | 149 | int aac_msi; | |
150 | int aac_commit = -1; | 150 | int aac_commit = -1; |
151 | int startup_timeout = 180; | 151 | int startup_timeout = 180; |
152 | int aif_timeout = 120; | 152 | int aif_timeout = 120; |
153 | 153 | ||
154 | module_param(nondasd, int, S_IRUGO|S_IWUSR); | 154 | module_param(nondasd, int, S_IRUGO|S_IWUSR); |
155 | MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); | 155 | MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." |
156 | " 0=off, 1=on"); | ||
156 | module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); | 157 | module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); |
157 | MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache"); | 158 | MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" |
159 | "\tbit 0 - Disable FUA in WRITE SCSI commands\n" | ||
160 | "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" | ||
161 | "\tbit 2 - Disable only if Battery not protecting Cache"); | ||
158 | module_param(dacmode, int, S_IRUGO|S_IWUSR); | 162 | module_param(dacmode, int, S_IRUGO|S_IWUSR); |
159 | MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); | 163 | MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." |
164 | " 0=off, 1=on"); | ||
160 | module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); | 165 | module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); |
161 | MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); | 166 | MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" |
167 | " adapter for foreign arrays.\n" | ||
168 | "This is typically needed in systems that do not have a BIOS." | ||
169 | " 0=off, 1=on"); | ||
170 | module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); | ||
171 | MODULE_PARM_DESC(msi, "IRQ handling." | ||
172 | " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)"); | ||
162 | module_param(startup_timeout, int, S_IRUGO|S_IWUSR); | 173 | module_param(startup_timeout, int, S_IRUGO|S_IWUSR); |
163 | MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS."); | 174 | MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" |
175 | " adapter to have it's kernel up and\n" | ||
176 | "running. This is typically adjusted for large systems that do not" | ||
177 | " have a BIOS."); | ||
164 | module_param(aif_timeout, int, S_IRUGO|S_IWUSR); | 178 | module_param(aif_timeout, int, S_IRUGO|S_IWUSR); |
165 | MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems."); | 179 | MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" |
180 | " applications to pick up AIFs before\n" | ||
181 | "deregistering them. This is typically adjusted for heavily burdened" | ||
182 | " systems."); | ||
166 | 183 | ||
167 | int numacb = -1; | 184 | int numacb = -1; |
168 | module_param(numacb, int, S_IRUGO|S_IWUSR); | 185 | module_param(numacb, int, S_IRUGO|S_IWUSR); |
169 | MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware."); | 186 | MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" |
187 | " blocks (FIB) allocated. Valid values are 512 and down. Default is" | ||
188 | " to use suggestion from Firmware."); | ||
170 | 189 | ||
171 | int acbsize = -1; | 190 | int acbsize = -1; |
172 | module_param(acbsize, int, S_IRUGO|S_IWUSR); | 191 | module_param(acbsize, int, S_IRUGO|S_IWUSR); |
173 | MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); | 192 | MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" |
193 | " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" | ||
194 | " suggestion from Firmware."); | ||
174 | 195 | ||
175 | int update_interval = 30 * 60; | 196 | int update_interval = 30 * 60; |
176 | module_param(update_interval, int, S_IRUGO|S_IWUSR); | 197 | module_param(update_interval, int, S_IRUGO|S_IWUSR); |
177 | MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter."); | 198 | MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" |
199 | " updates issued to adapter."); | ||
178 | 200 | ||
179 | int check_interval = 24 * 60 * 60; | 201 | int check_interval = 24 * 60 * 60; |
180 | module_param(check_interval, int, S_IRUGO|S_IWUSR); | 202 | module_param(check_interval, int, S_IRUGO|S_IWUSR); |
181 | MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks."); | 203 | MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" |
204 | " checks."); | ||
182 | 205 | ||
183 | int aac_check_reset = 1; | 206 | int aac_check_reset = 1; |
184 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); | 207 | module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); |
185 | MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it."); | 208 | MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the" |
209 | " adapter. a value of -1 forces the reset to adapters programmed to" | ||
210 | " ignore it."); | ||
186 | 211 | ||
187 | int expose_physicals = -1; | 212 | int expose_physicals = -1; |
188 | module_param(expose_physicals, int, S_IRUGO|S_IWUSR); | 213 | module_param(expose_physicals, int, S_IRUGO|S_IWUSR); |
189 | MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); | 214 | MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." |
215 | " -1=protect 0=off, 1=on"); | ||
190 | 216 | ||
191 | int aac_reset_devices = 0; | 217 | int aac_reset_devices; |
192 | module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); | 218 | module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); |
193 | MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); | 219 | MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); |
194 | 220 | ||
@@ -1315,7 +1341,7 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1315 | (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), | 1341 | (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), |
1316 | dev->supplement_adapter_info.VpdInfo.Tsid); | 1342 | dev->supplement_adapter_info.VpdInfo.Tsid); |
1317 | } | 1343 | } |
1318 | if (!aac_check_reset || ((aac_check_reset != 1) && | 1344 | if (!aac_check_reset || ((aac_check_reset == 1) && |
1319 | (dev->supplement_adapter_info.SupportedOptions2 & | 1345 | (dev->supplement_adapter_info.SupportedOptions2 & |
1320 | AAC_OPTION_IGNORE_RESET))) { | 1346 | AAC_OPTION_IGNORE_RESET))) { |
1321 | printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", | 1347 | printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", |
@@ -1353,13 +1379,14 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1353 | 1379 | ||
1354 | if (nondasd != -1) | 1380 | if (nondasd != -1) |
1355 | dev->nondasd_support = (nondasd!=0); | 1381 | dev->nondasd_support = (nondasd!=0); |
1356 | if(dev->nondasd_support != 0) { | 1382 | if (dev->nondasd_support && !dev->in_reset) |
1357 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); | 1383 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); |
1358 | } | ||
1359 | 1384 | ||
1360 | dev->dac_support = 0; | 1385 | dev->dac_support = 0; |
1361 | if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ | 1386 | if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ |
1362 | printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); | 1387 | if (!dev->in_reset) |
1388 | printk(KERN_INFO "%s%d: 64bit support enabled.\n", | ||
1389 | dev->name, dev->id); | ||
1363 | dev->dac_support = 1; | 1390 | dev->dac_support = 1; |
1364 | } | 1391 | } |
1365 | 1392 | ||
@@ -1369,8 +1396,9 @@ int aac_get_adapter_info(struct aac_dev* dev) | |||
1369 | if(dev->dac_support != 0) { | 1396 | if(dev->dac_support != 0) { |
1370 | if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && | 1397 | if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && |
1371 | !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { | 1398 | !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { |
1372 | printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", | 1399 | if (!dev->in_reset) |
1373 | dev->name, dev->id); | 1400 | printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", |
1401 | dev->name, dev->id); | ||
1374 | } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && | 1402 | } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && |
1375 | !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { | 1403 | !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { |
1376 | printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", | 1404 | printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3195d29f2177..ace0b751c131 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -1026,6 +1026,7 @@ struct aac_dev | |||
1026 | u8 raw_io_64; | 1026 | u8 raw_io_64; |
1027 | u8 printf_enabled; | 1027 | u8 printf_enabled; |
1028 | u8 in_reset; | 1028 | u8 in_reset; |
1029 | u8 msi; | ||
1029 | }; | 1030 | }; |
1030 | 1031 | ||
1031 | #define aac_adapter_interrupt(dev) \ | 1032 | #define aac_adapter_interrupt(dev) \ |
@@ -1881,6 +1882,7 @@ extern int startup_timeout; | |||
1881 | extern int aif_timeout; | 1882 | extern int aif_timeout; |
1882 | extern int expose_physicals; | 1883 | extern int expose_physicals; |
1883 | extern int aac_reset_devices; | 1884 | extern int aac_reset_devices; |
1885 | extern int aac_msi; | ||
1884 | extern int aac_commit; | 1886 | extern int aac_commit; |
1885 | extern int update_interval; | 1887 | extern int update_interval; |
1886 | extern int check_interval; | 1888 | extern int check_interval; |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 81b36923e0ef..47434499e82b 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1458,7 +1458,7 @@ int aac_check_health(struct aac_dev * aac) | |||
1458 | 1458 | ||
1459 | printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); | 1459 | printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); |
1460 | 1460 | ||
1461 | if (!aac_check_reset || ((aac_check_reset != 1) && | 1461 | if (!aac_check_reset || ((aac_check_reset == 1) && |
1462 | (aac->supplement_adapter_info.SupportedOptions2 & | 1462 | (aac->supplement_adapter_info.SupportedOptions2 & |
1463 | AAC_OPTION_IGNORE_RESET))) | 1463 | AAC_OPTION_IGNORE_RESET))) |
1464 | goto out; | 1464 | goto out; |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e80d2a0c46af..ae5f74fb62d5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -275,9 +275,9 @@ static const char *aac_info(struct Scsi_Host *shost) | |||
275 | 275 | ||
276 | /** | 276 | /** |
277 | * aac_get_driver_ident | 277 | * aac_get_driver_ident |
278 | * @devtype: index into lookup table | 278 | * @devtype: index into lookup table |
279 | * | 279 | * |
280 | * Returns a pointer to the entry in the driver lookup table. | 280 | * Returns a pointer to the entry in the driver lookup table. |
281 | */ | 281 | */ |
282 | 282 | ||
283 | struct aac_driver_ident* aac_get_driver_ident(int devtype) | 283 | struct aac_driver_ident* aac_get_driver_ident(int devtype) |
@@ -494,13 +494,14 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) | |||
494 | 494 | ||
495 | static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) | 495 | static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) |
496 | { | 496 | { |
497 | struct scsi_device * sdev = to_scsi_device(dev); | 497 | struct scsi_device *sdev = to_scsi_device(dev); |
498 | struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); | ||
498 | if (sdev_channel(sdev) != CONTAINER_CHANNEL) | 499 | if (sdev_channel(sdev) != CONTAINER_CHANNEL) |
499 | return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach | 500 | return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach |
500 | ? "Hidden\n" : "JBOD"); | 501 | ? "Hidden\n" : |
502 | ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); | ||
501 | return snprintf(buf, PAGE_SIZE, "%s\n", | 503 | return snprintf(buf, PAGE_SIZE, "%s\n", |
502 | get_container_type(((struct aac_dev *)(sdev->host->hostdata)) | 504 | get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); |
503 | ->fsa_dev[sdev_id(sdev)].type)); | ||
504 | } | 505 | } |
505 | 506 | ||
506 | static struct device_attribute aac_raid_level_attr = { | 507 | static struct device_attribute aac_raid_level_attr = { |
@@ -641,7 +642,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) | |||
641 | AAC_OPTION_MU_RESET) && | 642 | AAC_OPTION_MU_RESET) && |
642 | aac_check_reset && | 643 | aac_check_reset && |
643 | ((aac_check_reset != 1) || | 644 | ((aac_check_reset != 1) || |
644 | (aac->supplement_adapter_info.SupportedOptions2 & | 645 | !(aac->supplement_adapter_info.SupportedOptions2 & |
645 | AAC_OPTION_IGNORE_RESET))) | 646 | AAC_OPTION_IGNORE_RESET))) |
646 | aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ | 647 | aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ |
647 | return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ | 648 | return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ |
@@ -860,8 +861,8 @@ ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf) | |||
860 | le32_to_cpu(dev->adapter_info.serial[0])); | 861 | le32_to_cpu(dev->adapter_info.serial[0])); |
861 | if (len && | 862 | if (len && |
862 | !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ | 863 | !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ |
863 | sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len], | 864 | sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], |
864 | buf, len)) | 865 | buf, len-1)) |
865 | len = snprintf(buf, PAGE_SIZE, "%.*s\n", | 866 | len = snprintf(buf, PAGE_SIZE, "%.*s\n", |
866 | (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), | 867 | (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), |
867 | dev->supplement_adapter_info.MfgPcbaSerialNo); | 868 | dev->supplement_adapter_info.MfgPcbaSerialNo); |
@@ -1004,32 +1005,32 @@ static const struct file_operations aac_cfg_fops = { | |||
1004 | 1005 | ||
1005 | static struct scsi_host_template aac_driver_template = { | 1006 | static struct scsi_host_template aac_driver_template = { |
1006 | .module = THIS_MODULE, | 1007 | .module = THIS_MODULE, |
1007 | .name = "AAC", | 1008 | .name = "AAC", |
1008 | .proc_name = AAC_DRIVERNAME, | 1009 | .proc_name = AAC_DRIVERNAME, |
1009 | .info = aac_info, | 1010 | .info = aac_info, |
1010 | .ioctl = aac_ioctl, | 1011 | .ioctl = aac_ioctl, |
1011 | #ifdef CONFIG_COMPAT | 1012 | #ifdef CONFIG_COMPAT |
1012 | .compat_ioctl = aac_compat_ioctl, | 1013 | .compat_ioctl = aac_compat_ioctl, |
1013 | #endif | 1014 | #endif |
1014 | .queuecommand = aac_queuecommand, | 1015 | .queuecommand = aac_queuecommand, |
1015 | .bios_param = aac_biosparm, | 1016 | .bios_param = aac_biosparm, |
1016 | .shost_attrs = aac_attrs, | 1017 | .shost_attrs = aac_attrs, |
1017 | .slave_configure = aac_slave_configure, | 1018 | .slave_configure = aac_slave_configure, |
1018 | .change_queue_depth = aac_change_queue_depth, | 1019 | .change_queue_depth = aac_change_queue_depth, |
1019 | .sdev_attrs = aac_dev_attrs, | 1020 | .sdev_attrs = aac_dev_attrs, |
1020 | .eh_abort_handler = aac_eh_abort, | 1021 | .eh_abort_handler = aac_eh_abort, |
1021 | .eh_host_reset_handler = aac_eh_reset, | 1022 | .eh_host_reset_handler = aac_eh_reset, |
1022 | .can_queue = AAC_NUM_IO_FIB, | 1023 | .can_queue = AAC_NUM_IO_FIB, |
1023 | .this_id = MAXIMUM_NUM_CONTAINERS, | 1024 | .this_id = MAXIMUM_NUM_CONTAINERS, |
1024 | .sg_tablesize = 16, | 1025 | .sg_tablesize = 16, |
1025 | .max_sectors = 128, | 1026 | .max_sectors = 128, |
1026 | #if (AAC_NUM_IO_FIB > 256) | 1027 | #if (AAC_NUM_IO_FIB > 256) |
1027 | .cmd_per_lun = 256, | 1028 | .cmd_per_lun = 256, |
1028 | #else | 1029 | #else |
1029 | .cmd_per_lun = AAC_NUM_IO_FIB, | 1030 | .cmd_per_lun = AAC_NUM_IO_FIB, |
1030 | #endif | 1031 | #endif |
1031 | .use_clustering = ENABLE_CLUSTERING, | 1032 | .use_clustering = ENABLE_CLUSTERING, |
1032 | .emulated = 1, | 1033 | .emulated = 1, |
1033 | }; | 1034 | }; |
1034 | 1035 | ||
1035 | static void __aac_shutdown(struct aac_dev * aac) | 1036 | static void __aac_shutdown(struct aac_dev * aac) |
@@ -1039,6 +1040,8 @@ static void __aac_shutdown(struct aac_dev * aac) | |||
1039 | aac_send_shutdown(aac); | 1040 | aac_send_shutdown(aac); |
1040 | aac_adapter_disable_int(aac); | 1041 | aac_adapter_disable_int(aac); |
1041 | free_irq(aac->pdev->irq, aac); | 1042 | free_irq(aac->pdev->irq, aac); |
1043 | if (aac->msi) | ||
1044 | pci_disable_msi(aac->pdev); | ||
1042 | } | 1045 | } |
1043 | 1046 | ||
1044 | static int __devinit aac_probe_one(struct pci_dev *pdev, | 1047 | static int __devinit aac_probe_one(struct pci_dev *pdev, |
@@ -1254,7 +1257,7 @@ static struct pci_driver aac_pci_driver = { | |||
1254 | .id_table = aac_pci_tbl, | 1257 | .id_table = aac_pci_tbl, |
1255 | .probe = aac_probe_one, | 1258 | .probe = aac_probe_one, |
1256 | .remove = __devexit_p(aac_remove_one), | 1259 | .remove = __devexit_p(aac_remove_one), |
1257 | .shutdown = aac_shutdown, | 1260 | .shutdown = aac_shutdown, |
1258 | }; | 1261 | }; |
1259 | 1262 | ||
1260 | static int __init aac_init(void) | 1263 | static int __init aac_init(void) |
@@ -1271,7 +1274,7 @@ static int __init aac_init(void) | |||
1271 | aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); | 1274 | aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); |
1272 | if (aac_cfg_major < 0) { | 1275 | if (aac_cfg_major < 0) { |
1273 | printk(KERN_WARNING | 1276 | printk(KERN_WARNING |
1274 | "aacraid: unable to register \"aac\" device.\n"); | 1277 | "aacraid: unable to register \"aac\" device.\n"); |
1275 | } | 1278 | } |
1276 | 1279 | ||
1277 | return 0; | 1280 | return 0; |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index a08bbf1fd76c..1f18b83e1e02 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -625,8 +625,11 @@ int _aac_rx_init(struct aac_dev *dev) | |||
625 | if (aac_init_adapter(dev) == NULL) | 625 | if (aac_init_adapter(dev) == NULL) |
626 | goto error_iounmap; | 626 | goto error_iounmap; |
627 | aac_adapter_comm(dev, dev->comm_interface); | 627 | aac_adapter_comm(dev, dev->comm_interface); |
628 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, | 628 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); |
629 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, | ||
629 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { | 630 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { |
631 | if (dev->msi) | ||
632 | pci_disable_msi(dev->pdev); | ||
630 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", | 633 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", |
631 | name, instance); | 634 | name, instance); |
632 | goto error_iounmap; | 635 | goto error_iounmap; |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 85b91bc578c9..cfc3410ec073 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
34 | #include <linux/pci.h> | ||
34 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include <linux/blkdev.h> | 37 | #include <linux/blkdev.h> |
@@ -385,7 +386,7 @@ int aac_sa_init(struct aac_dev *dev) | |||
385 | 386 | ||
386 | if(aac_init_adapter(dev) == NULL) | 387 | if(aac_init_adapter(dev) == NULL) |
387 | goto error_irq; | 388 | goto error_irq; |
388 | if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, | 389 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, |
389 | IRQF_SHARED|IRQF_DISABLED, | 390 | IRQF_SHARED|IRQF_DISABLED, |
390 | "aacraid", (void *)dev ) < 0) { | 391 | "aacraid", (void *)dev ) < 0) { |
391 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", | 392 | printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", |
@@ -403,7 +404,7 @@ int aac_sa_init(struct aac_dev *dev) | |||
403 | 404 | ||
404 | error_irq: | 405 | error_irq: |
405 | aac_sa_disable_interrupt(dev); | 406 | aac_sa_disable_interrupt(dev); |
406 | free_irq(dev->scsi_host_ptr->irq, (void *)dev); | 407 | free_irq(dev->pdev->irq, (void *)dev); |
407 | 408 | ||
408 | error_iounmap: | 409 | error_iounmap: |
409 | 410 | ||
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ccef891d642f..3c2d6888bb8c 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -566,7 +566,7 @@ typedef struct asc_dvc_var { | |||
566 | ASC_SCSI_BIT_ID_TYPE unit_not_ready; | 566 | ASC_SCSI_BIT_ID_TYPE unit_not_ready; |
567 | ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; | 567 | ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; |
568 | ASC_SCSI_BIT_ID_TYPE start_motor; | 568 | ASC_SCSI_BIT_ID_TYPE start_motor; |
569 | uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8); | 569 | uchar *overrun_buf; |
570 | dma_addr_t overrun_dma; | 570 | dma_addr_t overrun_dma; |
571 | uchar scsi_reset_wait; | 571 | uchar scsi_reset_wait; |
572 | uchar chip_no; | 572 | uchar chip_no; |
@@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
13833 | */ | 13833 | */ |
13834 | if (ASC_NARROW_BOARD(boardp)) { | 13834 | if (ASC_NARROW_BOARD(boardp)) { |
13835 | ASC_DBG(2, "AscInitAsc1000Driver()\n"); | 13835 | ASC_DBG(2, "AscInitAsc1000Driver()\n"); |
13836 | |||
13837 | asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); | ||
13838 | if (!asc_dvc_varp->overrun_buf) { | ||
13839 | ret = -ENOMEM; | ||
13840 | goto err_free_wide_mem; | ||
13841 | } | ||
13836 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); | 13842 | warn_code = AscInitAsc1000Driver(asc_dvc_varp); |
13837 | 13843 | ||
13838 | if (warn_code || asc_dvc_varp->err_code) { | 13844 | if (warn_code || asc_dvc_varp->err_code) { |
@@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, | |||
13840 | "warn 0x%x, error 0x%x\n", | 13846 | "warn 0x%x, error 0x%x\n", |
13841 | asc_dvc_varp->init_state, warn_code, | 13847 | asc_dvc_varp->init_state, warn_code, |
13842 | asc_dvc_varp->err_code); | 13848 | asc_dvc_varp->err_code); |
13843 | if (asc_dvc_varp->err_code) | 13849 | if (asc_dvc_varp->err_code) { |
13844 | ret = -ENODEV; | 13850 | ret = -ENODEV; |
13851 | kfree(asc_dvc_varp->overrun_buf); | ||
13852 | } | ||
13845 | } | 13853 | } |
13846 | } else { | 13854 | } else { |
13847 | if (advansys_wide_init_chip(shost)) | 13855 | if (advansys_wide_init_chip(shost)) |
@@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost) | |||
13894 | dma_unmap_single(board->dev, | 13902 | dma_unmap_single(board->dev, |
13895 | board->dvc_var.asc_dvc_var.overrun_dma, | 13903 | board->dvc_var.asc_dvc_var.overrun_dma, |
13896 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); | 13904 | ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); |
13905 | kfree(board->dvc_var.asc_dvc_var.overrun_buf); | ||
13897 | } else { | 13906 | } else { |
13898 | iounmap(board->ioremap_addr); | 13907 | iounmap(board->ioremap_addr); |
13899 | advansys_wide_free_mem(board); | 13908 | advansys_wide_free_mem(board); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 4150c8a8fdc2..dfaaae5e73ae 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c | |||
@@ -89,7 +89,7 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg) | |||
89 | pci_save_state(pdev); | 89 | pci_save_state(pdev); |
90 | pci_disable_device(pdev); | 90 | pci_disable_device(pdev); |
91 | 91 | ||
92 | if (mesg.event == PM_EVENT_SUSPEND) | 92 | if (mesg.event & PM_EVENT_SLEEP) |
93 | pci_set_power_state(pdev, PCI_D3hot); | 93 | pci_set_power_state(pdev, PCI_D3hot); |
94 | 94 | ||
95 | return rc; | 95 | return rc; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 6d2ae641273c..64e62ce59c15 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c | |||
@@ -695,15 +695,16 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) | |||
695 | scb_index = ahc_inb(ahc, SCB_TAG); | 695 | scb_index = ahc_inb(ahc, SCB_TAG); |
696 | scb = ahc_lookup_scb(ahc, scb_index); | 696 | scb = ahc_lookup_scb(ahc, scb_index); |
697 | if (devinfo.role == ROLE_INITIATOR) { | 697 | if (devinfo.role == ROLE_INITIATOR) { |
698 | if (scb == NULL) | 698 | if (bus_phase == P_MESGOUT) { |
699 | panic("HOST_MSG_LOOP with " | 699 | if (scb == NULL) |
700 | "invalid SCB %x\n", scb_index); | 700 | panic("HOST_MSG_LOOP with " |
701 | "invalid SCB %x\n", | ||
702 | scb_index); | ||
701 | 703 | ||
702 | if (bus_phase == P_MESGOUT) | ||
703 | ahc_setup_initiator_msgout(ahc, | 704 | ahc_setup_initiator_msgout(ahc, |
704 | &devinfo, | 705 | &devinfo, |
705 | scb); | 706 | scb); |
706 | else { | 707 | } else { |
707 | ahc->msg_type = | 708 | ahc->msg_type = |
708 | MSG_TYPE_INITIATOR_MSGIN; | 709 | MSG_TYPE_INITIATOR_MSGIN; |
709 | ahc->msgin_index = 0; | 710 | ahc->msgin_index = 0; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index dd6e21d6f1dd..3d3eaef65fb3 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | |||
@@ -134,7 +134,7 @@ ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg) | |||
134 | pci_save_state(pdev); | 134 | pci_save_state(pdev); |
135 | pci_disable_device(pdev); | 135 | pci_disable_device(pdev); |
136 | 136 | ||
137 | if (mesg.event == PM_EVENT_SUSPEND) | 137 | if (mesg.event & PM_EVENT_SLEEP) |
138 | pci_set_power_state(pdev, PCI_D3hot); | 138 | pci_set_power_state(pdev, PCI_D3hot); |
139 | 139 | ||
140 | return rc; | 140 | return rc; |
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h index fa7c5290257d..912e6b755f74 100644 --- a/drivers/scsi/aic94xx/aic94xx_sas.h +++ b/drivers/scsi/aic94xx/aic94xx_sas.h | |||
@@ -292,7 +292,7 @@ struct scb_header { | |||
292 | #define INITIATE_SSP_TASK 0x00 | 292 | #define INITIATE_SSP_TASK 0x00 |
293 | #define INITIATE_LONG_SSP_TASK 0x01 | 293 | #define INITIATE_LONG_SSP_TASK 0x01 |
294 | #define INITIATE_BIDIR_SSP_TASK 0x02 | 294 | #define INITIATE_BIDIR_SSP_TASK 0x02 |
295 | #define ABORT_TASK 0x03 | 295 | #define SCB_ABORT_TASK 0x03 |
296 | #define INITIATE_SSP_TMF 0x04 | 296 | #define INITIATE_SSP_TMF 0x04 |
297 | #define SSP_TARG_GET_DATA 0x05 | 297 | #define SSP_TARG_GET_DATA 0x05 |
298 | #define SSP_TARG_GET_DATA_GOOD 0x06 | 298 | #define SSP_TARG_GET_DATA_GOOD 0x06 |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 0febad4dd75f..ab350504ca5a 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, | |||
458 | tc_abort = le16_to_cpu(tc_abort); | 458 | tc_abort = le16_to_cpu(tc_abort); |
459 | 459 | ||
460 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { | 460 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { |
461 | struct sas_task *task = ascb->uldd_task; | 461 | struct sas_task *task = a->uldd_task; |
462 | |||
463 | if (a->tc_index != tc_abort) | ||
464 | continue; | ||
462 | 465 | ||
463 | if (task && a->tc_index == tc_abort) { | 466 | if (task) { |
464 | failed_dev = task->dev; | 467 | failed_dev = task->dev; |
465 | sas_task_abort(task); | 468 | sas_task_abort(task); |
466 | break; | 469 | } else { |
470 | ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n", | ||
471 | a->scb->header.opcode); | ||
467 | } | 472 | } |
473 | break; | ||
468 | } | 474 | } |
469 | 475 | ||
470 | if (!failed_dev) { | 476 | if (!failed_dev) { |
@@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, | |||
478 | * that the EH will wake up and do something. | 484 | * that the EH will wake up and do something. |
479 | */ | 485 | */ |
480 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { | 486 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { |
481 | struct sas_task *task = ascb->uldd_task; | 487 | struct sas_task *task = a->uldd_task; |
482 | 488 | ||
483 | if (task && | 489 | if (task && |
484 | task->dev == failed_dev && | 490 | task->dev == failed_dev && |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 87b2f6e6adfe..144f5ad20453 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -151,8 +151,6 @@ static int asd_clear_nexus_I_T(struct domain_device *dev) | |||
151 | CLEAR_NEXUS_PRE; | 151 | CLEAR_NEXUS_PRE; |
152 | scb->clear_nexus.nexus = NEXUS_I_T; | 152 | scb->clear_nexus.nexus = NEXUS_I_T; |
153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; |
154 | if (dev->tproto) | ||
155 | scb->clear_nexus.flags |= SUSPEND_TX; | ||
156 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 154 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
157 | dev->lldd_dev); | 155 | dev->lldd_dev); |
158 | CLEAR_NEXUS_POST; | 156 | CLEAR_NEXUS_POST; |
@@ -169,8 +167,6 @@ static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) | |||
169 | CLEAR_NEXUS_PRE; | 167 | CLEAR_NEXUS_PRE; |
170 | scb->clear_nexus.nexus = NEXUS_I_T_L; | 168 | scb->clear_nexus.nexus = NEXUS_I_T_L; |
171 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 169 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; |
172 | if (dev->tproto) | ||
173 | scb->clear_nexus.flags |= SUSPEND_TX; | ||
174 | memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); | 170 | memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); |
175 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 171 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
176 | dev->lldd_dev); | 172 | dev->lldd_dev); |
@@ -369,7 +365,7 @@ int asd_abort_task(struct sas_task *task) | |||
369 | return -ENOMEM; | 365 | return -ENOMEM; |
370 | scb = ascb->scb; | 366 | scb = ascb->scb; |
371 | 367 | ||
372 | scb->header.opcode = ABORT_TASK; | 368 | scb->header.opcode = SCB_ABORT_TASK; |
373 | 369 | ||
374 | switch (task->task_proto) { | 370 | switch (task->task_proto) { |
375 | case SAS_PROTOCOL_SATA: | 371 | case SAS_PROTOCOL_SATA: |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 4f9ff32cfed0..f91f79c8007d 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -1387,18 +1387,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1387 | switch(controlcode) { | 1387 | switch(controlcode) { |
1388 | 1388 | ||
1389 | case ARCMSR_MESSAGE_READ_RQBUFFER: { | 1389 | case ARCMSR_MESSAGE_READ_RQBUFFER: { |
1390 | unsigned long *ver_addr; | 1390 | unsigned char *ver_addr; |
1391 | uint8_t *pQbuffer, *ptmpQbuffer; | 1391 | uint8_t *pQbuffer, *ptmpQbuffer; |
1392 | int32_t allxfer_len = 0; | 1392 | int32_t allxfer_len = 0; |
1393 | void *tmp; | ||
1394 | 1393 | ||
1395 | tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); | 1394 | ver_addr = kmalloc(1032, GFP_ATOMIC); |
1396 | ver_addr = (unsigned long *)tmp; | 1395 | if (!ver_addr) { |
1397 | if (!tmp) { | ||
1398 | retvalue = ARCMSR_MESSAGE_FAIL; | 1396 | retvalue = ARCMSR_MESSAGE_FAIL; |
1399 | goto message_out; | 1397 | goto message_out; |
1400 | } | 1398 | } |
1401 | ptmpQbuffer = (uint8_t *) ver_addr; | 1399 | ptmpQbuffer = ver_addr; |
1402 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) | 1400 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) |
1403 | && (allxfer_len < 1031)) { | 1401 | && (allxfer_len < 1031)) { |
1404 | pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; | 1402 | pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; |
@@ -1427,26 +1425,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1427 | } | 1425 | } |
1428 | arcmsr_iop_message_read(acb); | 1426 | arcmsr_iop_message_read(acb); |
1429 | } | 1427 | } |
1430 | memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); | 1428 | memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); |
1431 | pcmdmessagefld->cmdmessage.Length = allxfer_len; | 1429 | pcmdmessagefld->cmdmessage.Length = allxfer_len; |
1432 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; | 1430 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; |
1433 | kfree(tmp); | 1431 | kfree(ver_addr); |
1434 | } | 1432 | } |
1435 | break; | 1433 | break; |
1436 | 1434 | ||
1437 | case ARCMSR_MESSAGE_WRITE_WQBUFFER: { | 1435 | case ARCMSR_MESSAGE_WRITE_WQBUFFER: { |
1438 | unsigned long *ver_addr; | 1436 | unsigned char *ver_addr; |
1439 | int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; | 1437 | int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; |
1440 | uint8_t *pQbuffer, *ptmpuserbuffer; | 1438 | uint8_t *pQbuffer, *ptmpuserbuffer; |
1441 | void *tmp; | ||
1442 | 1439 | ||
1443 | tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); | 1440 | ver_addr = kmalloc(1032, GFP_ATOMIC); |
1444 | ver_addr = (unsigned long *)tmp; | 1441 | if (!ver_addr) { |
1445 | if (!tmp) { | ||
1446 | retvalue = ARCMSR_MESSAGE_FAIL; | 1442 | retvalue = ARCMSR_MESSAGE_FAIL; |
1447 | goto message_out; | 1443 | goto message_out; |
1448 | } | 1444 | } |
1449 | ptmpuserbuffer = (uint8_t *)ver_addr; | 1445 | ptmpuserbuffer = ver_addr; |
1450 | user_len = pcmdmessagefld->cmdmessage.Length; | 1446 | user_len = pcmdmessagefld->cmdmessage.Length; |
1451 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); | 1447 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); |
1452 | wqbuf_lastindex = acb->wqbuf_lastindex; | 1448 | wqbuf_lastindex = acb->wqbuf_lastindex; |
@@ -1492,7 +1488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1492 | retvalue = ARCMSR_MESSAGE_FAIL; | 1488 | retvalue = ARCMSR_MESSAGE_FAIL; |
1493 | } | 1489 | } |
1494 | } | 1490 | } |
1495 | kfree(tmp); | 1491 | kfree(ver_addr); |
1496 | } | 1492 | } |
1497 | break; | 1493 | break; |
1498 | 1494 | ||
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index fb5f20284389..a715632e19d4 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2018,6 +2018,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, | |||
2018 | * the upper layers to process. This would have been set | 2018 | * the upper layers to process. This would have been set |
2019 | * correctly by fas216_std_done. | 2019 | * correctly by fas216_std_done. |
2020 | */ | 2020 | */ |
2021 | scsi_eh_restore_cmnd(SCpnt, &info->ses); | ||
2021 | SCpnt->scsi_done(SCpnt); | 2022 | SCpnt->scsi_done(SCpnt); |
2022 | } | 2023 | } |
2023 | 2024 | ||
@@ -2103,23 +2104,12 @@ request_sense: | |||
2103 | if (SCpnt->cmnd[0] == REQUEST_SENSE) | 2104 | if (SCpnt->cmnd[0] == REQUEST_SENSE) |
2104 | goto done; | 2105 | goto done; |
2105 | 2106 | ||
2107 | scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); | ||
2106 | fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, | 2108 | fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, |
2107 | "requesting sense"); | 2109 | "requesting sense"); |
2108 | memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd)); | 2110 | init_SCp(SCpnt); |
2109 | SCpnt->cmnd[0] = REQUEST_SENSE; | ||
2110 | SCpnt->cmnd[1] = SCpnt->device->lun << 5; | ||
2111 | SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer); | ||
2112 | SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); | ||
2113 | SCpnt->SCp.buffer = NULL; | ||
2114 | SCpnt->SCp.buffers_residual = 0; | ||
2115 | SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; | ||
2116 | SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); | ||
2117 | SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer); | ||
2118 | SCpnt->SCp.Message = 0; | 2111 | SCpnt->SCp.Message = 0; |
2119 | SCpnt->SCp.Status = 0; | 2112 | SCpnt->SCp.Status = 0; |
2120 | SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); | ||
2121 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | ||
2122 | SCpnt->use_sg = 0; | ||
2123 | SCpnt->tag = 0; | 2113 | SCpnt->tag = 0; |
2124 | SCpnt->host_scribble = (void *)fas216_rq_sns_done; | 2114 | SCpnt->host_scribble = (void *)fas216_rq_sns_done; |
2125 | 2115 | ||
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 00e5f055afdc..b65f4cf0eec9 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #define NO_IRQ 255 | 16 | #define NO_IRQ 255 |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #include <scsi/scsi_eh.h> | ||
20 | |||
19 | #include "queue.h" | 21 | #include "queue.h" |
20 | #include "msgqueue.h" | 22 | #include "msgqueue.h" |
21 | 23 | ||
@@ -311,6 +313,7 @@ typedef struct { | |||
311 | 313 | ||
312 | /* miscellaneous */ | 314 | /* miscellaneous */ |
313 | int internal_done; /* flag to indicate request done */ | 315 | int internal_done; /* flag to indicate request done */ |
316 | struct scsi_eh_save ses; /* holds request sense restore info */ | ||
314 | unsigned long magic_end; | 317 | unsigned long magic_end; |
315 | } FAS216_Info; | 318 | } FAS216_Info; |
316 | 319 | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index c82523908c2e..6d67f5c0eb8e 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -642,12 +642,15 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, | |||
642 | *cnt, vendor, device)); | 642 | *cnt, vendor, device)); |
643 | 643 | ||
644 | pdev = NULL; | 644 | pdev = NULL; |
645 | while ((pdev = pci_find_device(vendor, device, pdev)) | 645 | while ((pdev = pci_get_device(vendor, device, pdev)) |
646 | != NULL) { | 646 | != NULL) { |
647 | if (pci_enable_device(pdev)) | 647 | if (pci_enable_device(pdev)) |
648 | continue; | 648 | continue; |
649 | if (*cnt >= MAXHA) | 649 | if (*cnt >= MAXHA) { |
650 | pci_dev_put(pdev); | ||
650 | return; | 651 | return; |
652 | } | ||
653 | |||
651 | /* GDT PCI controller found, resources are already in pdev */ | 654 | /* GDT PCI controller found, resources are already in pdev */ |
652 | pcistr[*cnt].pdev = pdev; | 655 | pcistr[*cnt].pdev = pdev; |
653 | pcistr[*cnt].irq = pdev->irq; | 656 | pcistr[*cnt].irq = pdev->irq; |
@@ -4836,6 +4839,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios) | |||
4836 | if (error) | 4839 | if (error) |
4837 | goto out_free_coal_stat; | 4840 | goto out_free_coal_stat; |
4838 | list_add_tail(&ha->list, &gdth_instances); | 4841 | list_add_tail(&ha->list, &gdth_instances); |
4842 | |||
4843 | scsi_scan_host(shp); | ||
4844 | |||
4839 | return 0; | 4845 | return 0; |
4840 | 4846 | ||
4841 | out_free_coal_stat: | 4847 | out_free_coal_stat: |
@@ -4963,6 +4969,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot) | |||
4963 | if (error) | 4969 | if (error) |
4964 | goto out_free_coal_stat; | 4970 | goto out_free_coal_stat; |
4965 | list_add_tail(&ha->list, &gdth_instances); | 4971 | list_add_tail(&ha->list, &gdth_instances); |
4972 | |||
4973 | scsi_scan_host(shp); | ||
4974 | |||
4966 | return 0; | 4975 | return 0; |
4967 | 4976 | ||
4968 | out_free_ccb_phys: | 4977 | out_free_ccb_phys: |
@@ -5100,6 +5109,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) | |||
5100 | if (error) | 5109 | if (error) |
5101 | goto out_free_coal_stat; | 5110 | goto out_free_coal_stat; |
5102 | list_add_tail(&ha->list, &gdth_instances); | 5111 | list_add_tail(&ha->list, &gdth_instances); |
5112 | |||
5113 | scsi_scan_host(shp); | ||
5114 | |||
5103 | return 0; | 5115 | return 0; |
5104 | 5116 | ||
5105 | out_free_coal_stat: | 5117 | out_free_coal_stat: |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index de5773443c62..ce0228e26aec 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) | |||
694 | { | 694 | { |
695 | ulong flags; | 695 | ulong flags; |
696 | 696 | ||
697 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
698 | |||
699 | if (buf == ha->pscratch) { | 697 | if (buf == ha->pscratch) { |
698 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
700 | ha->scratch_busy = FALSE; | 699 | ha->scratch_busy = FALSE; |
700 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
701 | } else { | 701 | } else { |
702 | pci_free_consistent(ha->pdev, size, buf, paddr); | 702 | pci_free_consistent(ha->pdev, size, buf, paddr); |
703 | } | 703 | } |
704 | |||
705 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
706 | } | 704 | } |
707 | 705 | ||
708 | #ifdef GDTH_IOCTL_PROC | 706 | #ifdef GDTH_IOCTL_PROC |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2074701f7e76..c72014a3e7d4 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5140,7 +5140,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, | |||
5140 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5140 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5141 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 5141 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; |
5142 | struct ipr_ioadl_desc *last_ioadl = NULL; | 5142 | struct ipr_ioadl_desc *last_ioadl = NULL; |
5143 | int len = qc->nbytes + qc->pad_len; | 5143 | int len = qc->nbytes; |
5144 | struct scatterlist *sg; | 5144 | struct scatterlist *sg; |
5145 | unsigned int si; | 5145 | unsigned int si; |
5146 | 5146 | ||
@@ -5206,7 +5206,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
5206 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; | 5206 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; |
5207 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; | 5207 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; |
5208 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; | 5208 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; |
5209 | ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; | 5209 | ipr_cmd->dma_use_sg = qc->n_elem; |
5210 | 5210 | ||
5211 | ipr_build_ata_ioadl(ipr_cmd, qc); | 5211 | ipr_build_ata_ioadl(ipr_cmd, qc); |
5212 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; | 5212 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index bb152fb9fec7..7ed568f180ae 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -1576,7 +1576,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr) | |||
1576 | METHOD_TRACE("ips_make_passthru", 1); | 1576 | METHOD_TRACE("ips_make_passthru", 1); |
1577 | 1577 | ||
1578 | scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) | 1578 | scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) |
1579 | length += sg[i].length; | 1579 | length += sg->length; |
1580 | 1580 | ||
1581 | if (length < sizeof (ips_passthru_t)) { | 1581 | if (length < sizeof (ips_passthru_t)) { |
1582 | /* wrong size */ | 1582 | /* wrong size */ |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 0996f866f14c..7cd05b599a12 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -178,8 +178,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
178 | task->uldd_task = qc; | 178 | task->uldd_task = qc; |
179 | if (ata_is_atapi(qc->tf.protocol)) { | 179 | if (ata_is_atapi(qc->tf.protocol)) { |
180 | memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); | 180 | memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); |
181 | task->total_xfer_len = qc->nbytes + qc->pad_len; | 181 | task->total_xfer_len = qc->nbytes; |
182 | task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; | 182 | task->num_scatter = qc->n_elem; |
183 | } else { | 183 | } else { |
184 | for_each_sg(qc->sg, sg, qc->n_elem, si) | 184 | for_each_sg(qc->sg, sg, qc->n_elem, si) |
185 | xfer += sg->length; | 185 | xfer += sg->length; |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index f869fba86807..704ea06a6e50 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -51,10 +51,14 @@ static void sas_scsi_task_done(struct sas_task *task) | |||
51 | { | 51 | { |
52 | struct task_status_struct *ts = &task->task_status; | 52 | struct task_status_struct *ts = &task->task_status; |
53 | struct scsi_cmnd *sc = task->uldd_task; | 53 | struct scsi_cmnd *sc = task->uldd_task; |
54 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host); | ||
55 | unsigned ts_flags = task->task_state_flags; | ||
56 | int hs = 0, stat = 0; | 54 | int hs = 0, stat = 0; |
57 | 55 | ||
56 | if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
57 | /* Aborted tasks will be completed by the error handler */ | ||
58 | SAS_DPRINTK("task done but aborted\n"); | ||
59 | return; | ||
60 | } | ||
61 | |||
58 | if (unlikely(!sc)) { | 62 | if (unlikely(!sc)) { |
59 | SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); | 63 | SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); |
60 | list_del_init(&task->list); | 64 | list_del_init(&task->list); |
@@ -120,11 +124,7 @@ static void sas_scsi_task_done(struct sas_task *task) | |||
120 | sc->result = (hs << 16) | stat; | 124 | sc->result = (hs << 16) | stat; |
121 | list_del_init(&task->list); | 125 | list_del_init(&task->list); |
122 | sas_free_task(task); | 126 | sas_free_task(task); |
123 | /* This is very ugly but this is how SCSI Core works. */ | 127 | sc->scsi_done(sc); |
124 | if (ts_flags & SAS_TASK_STATE_ABORTED) | ||
125 | scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q); | ||
126 | else | ||
127 | sc->scsi_done(sc); | ||
128 | } | 128 | } |
129 | 129 | ||
130 | static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) | 130 | static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) |
@@ -255,13 +255,34 @@ out: | |||
255 | return res; | 255 | return res; |
256 | } | 256 | } |
257 | 257 | ||
258 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | ||
259 | { | ||
260 | struct sas_task *task = TO_SAS_TASK(cmd); | ||
261 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); | ||
262 | |||
263 | /* remove the aborted task flag to allow the task to be | ||
264 | * completed now. At this point, we only get called following | ||
265 | * an actual abort of the task, so we should be guaranteed not | ||
266 | * to be racing with any completions from the LLD (hence we | ||
267 | * don't need the task state lock to clear the flag) */ | ||
268 | task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; | ||
269 | /* Now call task_done. However, task will be free'd after | ||
270 | * this */ | ||
271 | task->task_done(task); | ||
272 | /* now finish the command and move it on to the error | ||
273 | * handler done list, this also takes it off the | ||
274 | * error handler pending list */ | ||
275 | scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); | ||
276 | } | ||
277 | |||
258 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) | 278 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) |
259 | { | 279 | { |
260 | struct scsi_cmnd *cmd, *n; | 280 | struct scsi_cmnd *cmd, *n; |
261 | 281 | ||
262 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { | 282 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { |
263 | if (cmd == my_cmd) | 283 | if (cmd->device->sdev_target == my_cmd->device->sdev_target && |
264 | list_del_init(&cmd->eh_entry); | 284 | cmd->device->lun == my_cmd->device->lun) |
285 | sas_eh_finish_cmd(cmd); | ||
265 | } | 286 | } |
266 | } | 287 | } |
267 | 288 | ||
@@ -274,7 +295,7 @@ static void sas_scsi_clear_queue_I_T(struct list_head *error_q, | |||
274 | struct domain_device *x = cmd_to_domain_dev(cmd); | 295 | struct domain_device *x = cmd_to_domain_dev(cmd); |
275 | 296 | ||
276 | if (x == dev) | 297 | if (x == dev) |
277 | list_del_init(&cmd->eh_entry); | 298 | sas_eh_finish_cmd(cmd); |
278 | } | 299 | } |
279 | } | 300 | } |
280 | 301 | ||
@@ -288,7 +309,7 @@ static void sas_scsi_clear_queue_port(struct list_head *error_q, | |||
288 | struct asd_sas_port *x = dev->port; | 309 | struct asd_sas_port *x = dev->port; |
289 | 310 | ||
290 | if (x == port) | 311 | if (x == port) |
291 | list_del_init(&cmd->eh_entry); | 312 | sas_eh_finish_cmd(cmd); |
292 | } | 313 | } |
293 | } | 314 | } |
294 | 315 | ||
@@ -528,14 +549,14 @@ Again: | |||
528 | case TASK_IS_DONE: | 549 | case TASK_IS_DONE: |
529 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, | 550 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, |
530 | task); | 551 | task); |
531 | task->task_done(task); | 552 | sas_eh_finish_cmd(cmd); |
532 | if (need_reset) | 553 | if (need_reset) |
533 | try_to_reset_cmd_device(shost, cmd); | 554 | try_to_reset_cmd_device(shost, cmd); |
534 | continue; | 555 | continue; |
535 | case TASK_IS_ABORTED: | 556 | case TASK_IS_ABORTED: |
536 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 557 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
537 | __FUNCTION__, task); | 558 | __FUNCTION__, task); |
538 | task->task_done(task); | 559 | sas_eh_finish_cmd(cmd); |
539 | if (need_reset) | 560 | if (need_reset) |
540 | try_to_reset_cmd_device(shost, cmd); | 561 | try_to_reset_cmd_device(shost, cmd); |
541 | continue; | 562 | continue; |
@@ -547,7 +568,7 @@ Again: | |||
547 | "recovered\n", | 568 | "recovered\n", |
548 | SAS_ADDR(task->dev), | 569 | SAS_ADDR(task->dev), |
549 | cmd->device->lun); | 570 | cmd->device->lun); |
550 | task->task_done(task); | 571 | sas_eh_finish_cmd(cmd); |
551 | if (need_reset) | 572 | if (need_reset) |
552 | try_to_reset_cmd_device(shost, cmd); | 573 | try_to_reset_cmd_device(shost, cmd); |
553 | sas_scsi_clear_queue_lu(work_q, cmd); | 574 | sas_scsi_clear_queue_lu(work_q, cmd); |
@@ -562,7 +583,7 @@ Again: | |||
562 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 583 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
563 | SAS_DPRINTK("I_T %016llx recovered\n", | 584 | SAS_DPRINTK("I_T %016llx recovered\n", |
564 | SAS_ADDR(task->dev->sas_addr)); | 585 | SAS_ADDR(task->dev->sas_addr)); |
565 | task->task_done(task); | 586 | sas_eh_finish_cmd(cmd); |
566 | if (need_reset) | 587 | if (need_reset) |
567 | try_to_reset_cmd_device(shost, cmd); | 588 | try_to_reset_cmd_device(shost, cmd); |
568 | sas_scsi_clear_queue_I_T(work_q, task->dev); | 589 | sas_scsi_clear_queue_I_T(work_q, task->dev); |
@@ -577,7 +598,7 @@ Again: | |||
577 | if (res == TMF_RESP_FUNC_COMPLETE) { | 598 | if (res == TMF_RESP_FUNC_COMPLETE) { |
578 | SAS_DPRINTK("clear nexus port:%d " | 599 | SAS_DPRINTK("clear nexus port:%d " |
579 | "succeeded\n", port->id); | 600 | "succeeded\n", port->id); |
580 | task->task_done(task); | 601 | sas_eh_finish_cmd(cmd); |
581 | if (need_reset) | 602 | if (need_reset) |
582 | try_to_reset_cmd_device(shost, cmd); | 603 | try_to_reset_cmd_device(shost, cmd); |
583 | sas_scsi_clear_queue_port(work_q, | 604 | sas_scsi_clear_queue_port(work_q, |
@@ -591,10 +612,10 @@ Again: | |||
591 | if (res == TMF_RESP_FUNC_COMPLETE) { | 612 | if (res == TMF_RESP_FUNC_COMPLETE) { |
592 | SAS_DPRINTK("clear nexus ha " | 613 | SAS_DPRINTK("clear nexus ha " |
593 | "succeeded\n"); | 614 | "succeeded\n"); |
594 | task->task_done(task); | 615 | sas_eh_finish_cmd(cmd); |
595 | if (need_reset) | 616 | if (need_reset) |
596 | try_to_reset_cmd_device(shost, cmd); | 617 | try_to_reset_cmd_device(shost, cmd); |
597 | goto out; | 618 | goto clear_q; |
598 | } | 619 | } |
599 | } | 620 | } |
600 | /* If we are here -- this means that no amount | 621 | /* If we are here -- this means that no amount |
@@ -606,21 +627,18 @@ Again: | |||
606 | SAS_ADDR(task->dev->sas_addr), | 627 | SAS_ADDR(task->dev->sas_addr), |
607 | cmd->device->lun); | 628 | cmd->device->lun); |
608 | 629 | ||
609 | task->task_done(task); | 630 | sas_eh_finish_cmd(cmd); |
610 | if (need_reset) | 631 | if (need_reset) |
611 | try_to_reset_cmd_device(shost, cmd); | 632 | try_to_reset_cmd_device(shost, cmd); |
612 | goto clear_q; | 633 | goto clear_q; |
613 | } | 634 | } |
614 | } | 635 | } |
615 | out: | ||
616 | return list_empty(work_q); | 636 | return list_empty(work_q); |
617 | clear_q: | 637 | clear_q: |
618 | SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); | 638 | SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); |
619 | list_for_each_entry_safe(cmd, n, work_q, eh_entry) { | 639 | list_for_each_entry_safe(cmd, n, work_q, eh_entry) |
620 | struct sas_task *task = TO_SAS_TASK(cmd); | 640 | sas_eh_finish_cmd(cmd); |
621 | list_del_init(&cmd->eh_entry); | 641 | |
622 | task->task_done(task); | ||
623 | } | ||
624 | return list_empty(work_q); | 642 | return list_empty(work_q); |
625 | } | 643 | } |
626 | 644 | ||
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 83567b9755b4..2ab2d24dcc15 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -307,6 +307,7 @@ struct lpfc_vport { | |||
307 | 307 | ||
308 | uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ | 308 | uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ |
309 | uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ | 309 | uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ |
310 | uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ | ||
310 | struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; | 311 | struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; |
311 | struct lpfc_name fc_nodename; /* fc nodename */ | 312 | struct lpfc_name fc_nodename; /* fc nodename */ |
312 | struct lpfc_name fc_portname; /* fc portname */ | 313 | struct lpfc_name fc_portname; /* fc portname */ |
@@ -392,6 +393,13 @@ enum hba_temp_state { | |||
392 | HBA_OVER_TEMP | 393 | HBA_OVER_TEMP |
393 | }; | 394 | }; |
394 | 395 | ||
396 | enum intr_type_t { | ||
397 | NONE = 0, | ||
398 | INTx, | ||
399 | MSI, | ||
400 | MSIX, | ||
401 | }; | ||
402 | |||
395 | struct lpfc_hba { | 403 | struct lpfc_hba { |
396 | struct lpfc_sli sli; | 404 | struct lpfc_sli sli; |
397 | uint32_t sli_rev; /* SLI2 or SLI3 */ | 405 | uint32_t sli_rev; /* SLI2 or SLI3 */ |
@@ -409,7 +417,7 @@ struct lpfc_hba { | |||
409 | /* This flag is set while issuing */ | 417 | /* This flag is set while issuing */ |
410 | /* INIT_LINK mailbox command */ | 418 | /* INIT_LINK mailbox command */ |
411 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ | 419 | #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ |
412 | #define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ | 420 | #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ |
413 | 421 | ||
414 | struct lpfc_sli2_slim *slim2p; | 422 | struct lpfc_sli2_slim *slim2p; |
415 | struct lpfc_dmabuf hbqslimp; | 423 | struct lpfc_dmabuf hbqslimp; |
@@ -487,6 +495,8 @@ struct lpfc_hba { | |||
487 | wait_queue_head_t *work_wait; | 495 | wait_queue_head_t *work_wait; |
488 | struct task_struct *worker_thread; | 496 | struct task_struct *worker_thread; |
489 | 497 | ||
498 | uint32_t hbq_in_use; /* HBQs in use flag */ | ||
499 | struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ | ||
490 | uint32_t hbq_count; /* Count of configured HBQs */ | 500 | uint32_t hbq_count; /* Count of configured HBQs */ |
491 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ | 501 | struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ |
492 | 502 | ||
@@ -555,7 +565,8 @@ struct lpfc_hba { | |||
555 | mempool_t *nlp_mem_pool; | 565 | mempool_t *nlp_mem_pool; |
556 | 566 | ||
557 | struct fc_host_statistics link_stats; | 567 | struct fc_host_statistics link_stats; |
558 | uint8_t using_msi; | 568 | enum intr_type_t intr_type; |
569 | struct msix_entry msix_entries[1]; | ||
559 | 570 | ||
560 | struct list_head port_list; | 571 | struct list_head port_list; |
561 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ | 572 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ |
@@ -595,6 +606,8 @@ struct lpfc_hba { | |||
595 | unsigned long last_completion_time; | 606 | unsigned long last_completion_time; |
596 | struct timer_list hb_tmofunc; | 607 | struct timer_list hb_tmofunc; |
597 | uint8_t hb_outstanding; | 608 | uint8_t hb_outstanding; |
609 | /* ndlp reference management */ | ||
610 | spinlock_t ndlp_lock; | ||
598 | /* | 611 | /* |
599 | * Following bit will be set for all buffer tags which are not | 612 | * Following bit will be set for all buffer tags which are not |
600 | * associated with any HBQ. | 613 | * associated with any HBQ. |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4bae4a2ed2f1..b12a841703ca 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1191,7 +1191,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
1191 | shost = lpfc_shost_from_vport(vport); | 1191 | shost = lpfc_shost_from_vport(vport); |
1192 | spin_lock_irq(shost->host_lock); | 1192 | spin_lock_irq(shost->host_lock); |
1193 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) | 1193 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) |
1194 | if (ndlp->rport) | 1194 | if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) |
1195 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 1195 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
1196 | spin_unlock_irq(shost->host_lock); | 1196 | spin_unlock_irq(shost->host_lock); |
1197 | } | 1197 | } |
@@ -1592,9 +1592,11 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
1592 | # support this feature | 1592 | # support this feature |
1593 | # 0 = MSI disabled (default) | 1593 | # 0 = MSI disabled (default) |
1594 | # 1 = MSI enabled | 1594 | # 1 = MSI enabled |
1595 | # Value range is [0,1]. Default value is 0. | 1595 | # 2 = MSI-X enabled |
1596 | # Value range is [0,2]. Default value is 0. | ||
1596 | */ | 1597 | */ |
1597 | LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); | 1598 | LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " |
1599 | "MSI-X (2), if possible"); | ||
1598 | 1600 | ||
1599 | /* | 1601 | /* |
1600 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. | 1602 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. |
@@ -1946,11 +1948,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, | |||
1946 | } | 1948 | } |
1947 | 1949 | ||
1948 | /* If HBA encountered an error attention, allow only DUMP | 1950 | /* If HBA encountered an error attention, allow only DUMP |
1949 | * mailbox command until the HBA is restarted. | 1951 | * or RESTART mailbox commands until the HBA is restarted. |
1950 | */ | 1952 | */ |
1951 | if ((phba->pport->stopped) && | 1953 | if ((phba->pport->stopped) && |
1952 | (phba->sysfs_mbox.mbox->mb.mbxCommand | 1954 | (phba->sysfs_mbox.mbox->mb.mbxCommand != |
1953 | != MBX_DUMP_MEMORY)) { | 1955 | MBX_DUMP_MEMORY && |
1956 | phba->sysfs_mbox.mbox->mb.mbxCommand != | ||
1957 | MBX_RESTART)) { | ||
1954 | sysfs_mbox_idle(phba); | 1958 | sysfs_mbox_idle(phba); |
1955 | spin_unlock_irq(&phba->hbalock); | 1959 | spin_unlock_irq(&phba->hbalock); |
1956 | return -EPERM; | 1960 | return -EPERM; |
@@ -2384,7 +2388,8 @@ lpfc_get_node_by_target(struct scsi_target *starget) | |||
2384 | spin_lock_irq(shost->host_lock); | 2388 | spin_lock_irq(shost->host_lock); |
2385 | /* Search for this, mapped, target ID */ | 2389 | /* Search for this, mapped, target ID */ |
2386 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 2390 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
2387 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && | 2391 | if (NLP_CHK_NODE_ACT(ndlp) && |
2392 | ndlp->nlp_state == NLP_STE_MAPPED_NODE && | ||
2388 | starget->id == ndlp->nlp_sid) { | 2393 | starget->id == ndlp->nlp_sid) { |
2389 | spin_unlock_irq(shost->host_lock); | 2394 | spin_unlock_irq(shost->host_lock); |
2390 | return ndlp; | 2395 | return ndlp; |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 50fcb7c930bc..0819f5f39de5 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -53,7 +53,10 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
53 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 53 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
54 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 54 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
56 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); | ||
56 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 57 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
58 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, | ||
59 | struct lpfc_nodelist *, int); | ||
57 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); | 60 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); |
58 | void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); | 61 | void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); |
59 | void lpfc_set_disctmo(struct lpfc_vport *); | 62 | void lpfc_set_disctmo(struct lpfc_vport *); |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 92441ce610ed..3d0ccd9b341d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -294,7 +294,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, | |||
294 | /* Save for completion so we can release these resources */ | 294 | /* Save for completion so we can release these resources */ |
295 | geniocb->context1 = (uint8_t *) inp; | 295 | geniocb->context1 = (uint8_t *) inp; |
296 | geniocb->context2 = (uint8_t *) outp; | 296 | geniocb->context2 = (uint8_t *) outp; |
297 | geniocb->context_un.ndlp = ndlp; | 297 | geniocb->context_un.ndlp = lpfc_nlp_get(ndlp); |
298 | 298 | ||
299 | /* Fill in payload, bp points to frame payload */ | 299 | /* Fill in payload, bp points to frame payload */ |
300 | icmd->ulpCommand = CMD_GEN_REQUEST64_CR; | 300 | icmd->ulpCommand = CMD_GEN_REQUEST64_CR; |
@@ -489,8 +489,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) | |||
489 | */ | 489 | */ |
490 | ndlp = lpfc_findnode_did(vport, | 490 | ndlp = lpfc_findnode_did(vport, |
491 | Did); | 491 | Did); |
492 | if (ndlp && (ndlp->nlp_type & | 492 | if (ndlp && |
493 | NLP_FCP_TARGET)) | 493 | NLP_CHK_NODE_ACT(ndlp) |
494 | && (ndlp->nlp_type & | ||
495 | NLP_FCP_TARGET)) | ||
494 | lpfc_setup_disc_node | 496 | lpfc_setup_disc_node |
495 | (vport, Did); | 497 | (vport, Did); |
496 | else if (lpfc_ns_cmd(vport, | 498 | else if (lpfc_ns_cmd(vport, |
@@ -773,7 +775,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
773 | "0267 NameServer GFF Rsp " | 775 | "0267 NameServer GFF Rsp " |
774 | "x%x Error (%d %d) Data: x%x x%x\n", | 776 | "x%x Error (%d %d) Data: x%x x%x\n", |
775 | did, irsp->ulpStatus, irsp->un.ulpWord[4], | 777 | did, irsp->ulpStatus, irsp->un.ulpWord[4], |
776 | vport->fc_flag, vport->fc_rscn_id_cnt) | 778 | vport->fc_flag, vport->fc_rscn_id_cnt); |
777 | } | 779 | } |
778 | 780 | ||
779 | /* This is a target port, unregistered port, or the GFF_ID failed */ | 781 | /* This is a target port, unregistered port, or the GFF_ID failed */ |
@@ -1064,7 +1066,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1064 | int rc = 0; | 1066 | int rc = 0; |
1065 | 1067 | ||
1066 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 1068 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
1067 | if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { | 1069 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) |
1070 | || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { | ||
1068 | rc=1; | 1071 | rc=1; |
1069 | goto ns_cmd_exit; | 1072 | goto ns_cmd_exit; |
1070 | } | 1073 | } |
@@ -1213,8 +1216,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1213 | cmpl = lpfc_cmpl_ct_cmd_rff_id; | 1216 | cmpl = lpfc_cmpl_ct_cmd_rff_id; |
1214 | break; | 1217 | break; |
1215 | } | 1218 | } |
1216 | lpfc_nlp_get(ndlp); | 1219 | /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count |
1217 | 1220 | * to hold ndlp reference for the corresponding callback function. | |
1221 | */ | ||
1218 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { | 1222 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { |
1219 | /* On success, The cmpl function will free the buffers */ | 1223 | /* On success, The cmpl function will free the buffers */ |
1220 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, | 1224 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, |
@@ -1222,9 +1226,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, | |||
1222 | cmdcode, ndlp->nlp_DID, 0); | 1226 | cmdcode, ndlp->nlp_DID, 0); |
1223 | return 0; | 1227 | return 0; |
1224 | } | 1228 | } |
1225 | |||
1226 | rc=6; | 1229 | rc=6; |
1230 | |||
1231 | /* Decrement ndlp reference count to release ndlp reference held | ||
1232 | * for the failed command's callback function. | ||
1233 | */ | ||
1227 | lpfc_nlp_put(ndlp); | 1234 | lpfc_nlp_put(ndlp); |
1235 | |||
1228 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 1236 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
1229 | ns_cmd_free_bmp: | 1237 | ns_cmd_free_bmp: |
1230 | kfree(bmp); | 1238 | kfree(bmp); |
@@ -1271,6 +1279,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1271 | } | 1279 | } |
1272 | 1280 | ||
1273 | ndlp = lpfc_findnode_did(vport, FDMI_DID); | 1281 | ndlp = lpfc_findnode_did(vport, FDMI_DID); |
1282 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) | ||
1283 | goto fail_out; | ||
1284 | |||
1274 | if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { | 1285 | if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { |
1275 | /* FDMI rsp failed */ | 1286 | /* FDMI rsp failed */ |
1276 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 1287 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
@@ -1294,6 +1305,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1294 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); | 1305 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); |
1295 | break; | 1306 | break; |
1296 | } | 1307 | } |
1308 | |||
1309 | fail_out: | ||
1297 | lpfc_ct_free_iocb(phba, cmdiocb); | 1310 | lpfc_ct_free_iocb(phba, cmdiocb); |
1298 | return; | 1311 | return; |
1299 | } | 1312 | } |
@@ -1650,12 +1663,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) | |||
1650 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | 1663 | bpl->tus.w = le32_to_cpu(bpl->tus.w); |
1651 | 1664 | ||
1652 | cmpl = lpfc_cmpl_ct_cmd_fdmi; | 1665 | cmpl = lpfc_cmpl_ct_cmd_fdmi; |
1653 | lpfc_nlp_get(ndlp); | ||
1654 | 1666 | ||
1667 | /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count | ||
1668 | * to hold ndlp reference for the corresponding callback function. | ||
1669 | */ | ||
1655 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) | 1670 | if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) |
1656 | return 0; | 1671 | return 0; |
1657 | 1672 | ||
1673 | /* Decrement ndlp reference count to release ndlp reference held | ||
1674 | * for the failed command's callback function. | ||
1675 | */ | ||
1658 | lpfc_nlp_put(ndlp); | 1676 | lpfc_nlp_put(ndlp); |
1677 | |||
1659 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 1678 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
1660 | fdmi_cmd_free_bmp: | 1679 | fdmi_cmd_free_bmp: |
1661 | kfree(bmp); | 1680 | kfree(bmp); |
@@ -1698,7 +1717,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) | |||
1698 | struct lpfc_nodelist *ndlp; | 1717 | struct lpfc_nodelist *ndlp; |
1699 | 1718 | ||
1700 | ndlp = lpfc_findnode_did(vport, FDMI_DID); | 1719 | ndlp = lpfc_findnode_did(vport, FDMI_DID); |
1701 | if (ndlp) { | 1720 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { |
1702 | if (init_utsname()->nodename[0] != '\0') | 1721 | if (init_utsname()->nodename[0] != '\0') |
1703 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); | 1722 | lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); |
1704 | else | 1723 | else |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index cfe81c50529a..2db0b74b6fad 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -73,6 +73,12 @@ struct lpfc_nodelist { | |||
73 | uint8_t nlp_fcp_info; /* class info, bits 0-3 */ | 73 | uint8_t nlp_fcp_info; /* class info, bits 0-3 */ |
74 | #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ | 74 | #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ |
75 | 75 | ||
76 | uint16_t nlp_usg_map; /* ndlp management usage bitmap */ | ||
77 | #define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ | ||
78 | #define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ | ||
79 | #define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */ | ||
80 | #define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ | ||
81 | |||
76 | struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ | 82 | struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ |
77 | struct fc_rport *rport; /* Corresponding FC transport | 83 | struct fc_rport *rport; /* Corresponding FC transport |
78 | port structure */ | 84 | port structure */ |
@@ -85,25 +91,51 @@ struct lpfc_nodelist { | |||
85 | }; | 91 | }; |
86 | 92 | ||
87 | /* Defines for nlp_flag (uint32) */ | 93 | /* Defines for nlp_flag (uint32) */ |
88 | #define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ | 94 | #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ |
89 | #define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ | 95 | #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ |
90 | #define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ | 96 | #define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ |
91 | #define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ | 97 | #define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ |
92 | #define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ | 98 | #define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ |
93 | #define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ | 99 | #define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ |
94 | #define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ | 100 | #define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ |
95 | #define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ | 101 | #define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ |
96 | #define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ | 102 | #define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ |
97 | #define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ | 103 | #define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ |
98 | #define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ | 104 | #define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ |
99 | #define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ | 105 | #define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ |
100 | #define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful | 106 | #define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful |
101 | ACC */ | 107 | ACC */ |
102 | #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from | 108 | #define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from |
103 | NPR list */ | 109 | NPR list */ |
104 | #define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ | 110 | #define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ |
105 | #define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ | 111 | #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ |
106 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ | 112 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ |
113 | #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ | ||
114 | |||
115 | /* ndlp usage management macros */ | ||
116 | #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ | ||
117 | & NLP_USG_NODE_ACT_BIT) \ | ||
118 | && \ | ||
119 | !((ndlp)->nlp_usg_map \ | ||
120 | & NLP_USG_FREE_ACK_BIT)) | ||
121 | #define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
122 | |= NLP_USG_NODE_ACT_BIT) | ||
123 | #define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
124 | = NLP_USG_NODE_ACT_BIT) | ||
125 | #define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ | ||
126 | &= ~NLP_USG_NODE_ACT_BIT) | ||
127 | #define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
128 | & NLP_USG_IACT_REQ_BIT) | ||
129 | #define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
130 | |= NLP_USG_IACT_REQ_BIT) | ||
131 | #define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
132 | & NLP_USG_FREE_REQ_BIT) | ||
133 | #define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ | ||
134 | |= NLP_USG_FREE_REQ_BIT) | ||
135 | #define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ | ||
136 | & NLP_USG_FREE_ACK_BIT) | ||
137 | #define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ | ||
138 | |= NLP_USG_FREE_ACK_BIT) | ||
107 | 139 | ||
108 | /* There are 4 different double linked lists nodelist entries can reside on. | 140 | /* There are 4 different double linked lists nodelist entries can reside on. |
109 | * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used | 141 | * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c6b739dc6bc3..cbb68a942255 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -113,6 +113,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
113 | 113 | ||
114 | if (elsiocb == NULL) | 114 | if (elsiocb == NULL) |
115 | return NULL; | 115 | return NULL; |
116 | |||
116 | icmd = &elsiocb->iocb; | 117 | icmd = &elsiocb->iocb; |
117 | 118 | ||
118 | /* fill in BDEs for command */ | 119 | /* fill in BDEs for command */ |
@@ -134,9 +135,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
134 | if (!prsp || !prsp->virt) | 135 | if (!prsp || !prsp->virt) |
135 | goto els_iocb_free_prsp_exit; | 136 | goto els_iocb_free_prsp_exit; |
136 | INIT_LIST_HEAD(&prsp->list); | 137 | INIT_LIST_HEAD(&prsp->list); |
137 | } else { | 138 | } else |
138 | prsp = NULL; | 139 | prsp = NULL; |
139 | } | ||
140 | 140 | ||
141 | /* Allocate buffer for Buffer ptr list */ | 141 | /* Allocate buffer for Buffer ptr list */ |
142 | pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | 142 | pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
@@ -246,7 +246,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
246 | 246 | ||
247 | sp = &phba->fc_fabparam; | 247 | sp = &phba->fc_fabparam; |
248 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 248 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
249 | if (!ndlp) { | 249 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
250 | err = 1; | 250 | err = 1; |
251 | goto fail; | 251 | goto fail; |
252 | } | 252 | } |
@@ -282,6 +282,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
282 | 282 | ||
283 | mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; | 283 | mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; |
284 | mbox->vport = vport; | 284 | mbox->vport = vport; |
285 | /* increment the reference count on ndlp to hold reference | ||
286 | * for the callback routine. | ||
287 | */ | ||
285 | mbox->context2 = lpfc_nlp_get(ndlp); | 288 | mbox->context2 = lpfc_nlp_get(ndlp); |
286 | 289 | ||
287 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | 290 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
@@ -293,6 +296,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
293 | return 0; | 296 | return 0; |
294 | 297 | ||
295 | fail_issue_reg_login: | 298 | fail_issue_reg_login: |
299 | /* decrement the reference count on ndlp just incremented | ||
300 | * for the failed mbox command. | ||
301 | */ | ||
296 | lpfc_nlp_put(ndlp); | 302 | lpfc_nlp_put(ndlp); |
297 | mp = (struct lpfc_dmabuf *) mbox->context1; | 303 | mp = (struct lpfc_dmabuf *) mbox->context1; |
298 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 304 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -381,6 +387,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
381 | */ | 387 | */ |
382 | list_for_each_entry_safe(np, next_np, | 388 | list_for_each_entry_safe(np, next_np, |
383 | &vport->fc_nodes, nlp_listp) { | 389 | &vport->fc_nodes, nlp_listp) { |
390 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
391 | continue; | ||
384 | if ((np->nlp_state != NLP_STE_NPR_NODE) || | 392 | if ((np->nlp_state != NLP_STE_NPR_NODE) || |
385 | !(np->nlp_flag & NLP_NPR_ADISC)) | 393 | !(np->nlp_flag & NLP_NPR_ADISC)) |
386 | continue; | 394 | continue; |
@@ -456,6 +464,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
456 | mempool_free(mbox, phba->mbox_mem_pool); | 464 | mempool_free(mbox, phba->mbox_mem_pool); |
457 | goto fail; | 465 | goto fail; |
458 | } | 466 | } |
467 | /* Decrement ndlp reference count indicating that ndlp can be | ||
468 | * safely released when other references to it are done. | ||
469 | */ | ||
459 | lpfc_nlp_put(ndlp); | 470 | lpfc_nlp_put(ndlp); |
460 | 471 | ||
461 | ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); | 472 | ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); |
@@ -467,22 +478,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
467 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 478 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); |
468 | if (!ndlp) | 479 | if (!ndlp) |
469 | goto fail; | 480 | goto fail; |
470 | |||
471 | lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); | 481 | lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); |
482 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
483 | ndlp = lpfc_enable_node(vport, ndlp, | ||
484 | NLP_STE_UNUSED_NODE); | ||
485 | if(!ndlp) | ||
486 | goto fail; | ||
472 | } | 487 | } |
473 | 488 | ||
474 | memcpy(&ndlp->nlp_portname, &sp->portName, | 489 | memcpy(&ndlp->nlp_portname, &sp->portName, |
475 | sizeof(struct lpfc_name)); | 490 | sizeof(struct lpfc_name)); |
476 | memcpy(&ndlp->nlp_nodename, &sp->nodeName, | 491 | memcpy(&ndlp->nlp_nodename, &sp->nodeName, |
477 | sizeof(struct lpfc_name)); | 492 | sizeof(struct lpfc_name)); |
493 | /* Set state will put ndlp onto node list if not already done */ | ||
478 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 494 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
479 | spin_lock_irq(shost->host_lock); | 495 | spin_lock_irq(shost->host_lock); |
480 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 496 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
481 | spin_unlock_irq(shost->host_lock); | 497 | spin_unlock_irq(shost->host_lock); |
482 | } else { | 498 | } else |
483 | /* This side will wait for the PLOGI */ | 499 | /* This side will wait for the PLOGI, decrement ndlp reference |
500 | * count indicating that ndlp can be released when other | ||
501 | * references to it are done. | ||
502 | */ | ||
484 | lpfc_nlp_put(ndlp); | 503 | lpfc_nlp_put(ndlp); |
485 | } | ||
486 | 504 | ||
487 | /* If we are pt2pt with another NPort, force NPIV off! */ | 505 | /* If we are pt2pt with another NPort, force NPIV off! */ |
488 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; | 506 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
@@ -728,16 +746,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport) | |||
728 | if (!ndlp) | 746 | if (!ndlp) |
729 | return 0; | 747 | return 0; |
730 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 748 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
731 | } else { | 749 | /* Put ndlp onto node list */ |
732 | lpfc_dequeue_node(vport, ndlp); | 750 | lpfc_enqueue_node(vport, ndlp); |
751 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
752 | /* re-setup ndlp without removing from node list */ | ||
753 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
754 | if (!ndlp) | ||
755 | return 0; | ||
733 | } | 756 | } |
734 | 757 | ||
735 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) { | 758 | if (lpfc_issue_els_flogi(vport, ndlp, 0)) |
736 | /* This decrement of reference count to node shall kick off | 759 | /* This decrement of reference count to node shall kick off |
737 | * the release of the node. | 760 | * the release of the node. |
738 | */ | 761 | */ |
739 | lpfc_nlp_put(ndlp); | 762 | lpfc_nlp_put(ndlp); |
740 | } | 763 | |
741 | return 1; | 764 | return 1; |
742 | } | 765 | } |
743 | 766 | ||
@@ -755,9 +778,15 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) | |||
755 | if (!ndlp) | 778 | if (!ndlp) |
756 | return 0; | 779 | return 0; |
757 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 780 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
758 | } else { | 781 | /* Put ndlp onto node list */ |
759 | lpfc_dequeue_node(vport, ndlp); | 782 | lpfc_enqueue_node(vport, ndlp); |
783 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
784 | /* re-setup ndlp without removing from node list */ | ||
785 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
786 | if (!ndlp) | ||
787 | return 0; | ||
760 | } | 788 | } |
789 | |||
761 | if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { | 790 | if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { |
762 | /* decrement node reference count to trigger the release of | 791 | /* decrement node reference count to trigger the release of |
763 | * the node. | 792 | * the node. |
@@ -816,7 +845,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
816 | */ | 845 | */ |
817 | new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); | 846 | new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); |
818 | 847 | ||
819 | if (new_ndlp == ndlp) | 848 | if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) |
820 | return ndlp; | 849 | return ndlp; |
821 | 850 | ||
822 | if (!new_ndlp) { | 851 | if (!new_ndlp) { |
@@ -827,8 +856,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
827 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); | 856 | new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); |
828 | if (!new_ndlp) | 857 | if (!new_ndlp) |
829 | return ndlp; | 858 | return ndlp; |
830 | |||
831 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); | 859 | lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); |
860 | } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { | ||
861 | new_ndlp = lpfc_enable_node(vport, new_ndlp, | ||
862 | NLP_STE_UNUSED_NODE); | ||
863 | if (!new_ndlp) | ||
864 | return ndlp; | ||
832 | } | 865 | } |
833 | 866 | ||
834 | lpfc_unreg_rpi(vport, new_ndlp); | 867 | lpfc_unreg_rpi(vport, new_ndlp); |
@@ -839,6 +872,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
839 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 872 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
840 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; | 873 | ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; |
841 | 874 | ||
875 | /* Set state will put new_ndlp on to node list if not already done */ | ||
842 | lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); | 876 | lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); |
843 | 877 | ||
844 | /* Move this back to NPR state */ | 878 | /* Move this back to NPR state */ |
@@ -912,7 +946,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
912 | irsp->un.elsreq64.remoteID); | 946 | irsp->un.elsreq64.remoteID); |
913 | 947 | ||
914 | ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); | 948 | ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); |
915 | if (!ndlp) { | 949 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
916 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 950 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
917 | "0136 PLOGI completes to NPort x%x " | 951 | "0136 PLOGI completes to NPort x%x " |
918 | "with no ndlp. Data: x%x x%x x%x\n", | 952 | "with no ndlp. Data: x%x x%x x%x\n", |
@@ -962,12 +996,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
962 | } | 996 | } |
963 | /* PLOGI failed */ | 997 | /* PLOGI failed */ |
964 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 998 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
965 | if (lpfc_error_lost_link(irsp)) { | 999 | if (lpfc_error_lost_link(irsp)) |
966 | rc = NLP_STE_FREED_NODE; | 1000 | rc = NLP_STE_FREED_NODE; |
967 | } else { | 1001 | else |
968 | rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1002 | rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
969 | NLP_EVT_CMPL_PLOGI); | 1003 | NLP_EVT_CMPL_PLOGI); |
970 | } | ||
971 | } else { | 1004 | } else { |
972 | /* Good status, call state machine */ | 1005 | /* Good status, call state machine */ |
973 | prsp = list_entry(((struct lpfc_dmabuf *) | 1006 | prsp = list_entry(((struct lpfc_dmabuf *) |
@@ -1015,8 +1048,10 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) | |||
1015 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1048 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1016 | 1049 | ||
1017 | ndlp = lpfc_findnode_did(vport, did); | 1050 | ndlp = lpfc_findnode_did(vport, did); |
1018 | /* If ndlp if not NULL, we will bump the reference count on it */ | 1051 | if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) |
1052 | ndlp = NULL; | ||
1019 | 1053 | ||
1054 | /* If ndlp is not NULL, we will bump the reference count on it */ | ||
1020 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); | 1055 | cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); |
1021 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, | 1056 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, |
1022 | ELS_CMD_PLOGI); | 1057 | ELS_CMD_PLOGI); |
@@ -1097,18 +1132,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1097 | } | 1132 | } |
1098 | /* PRLI failed */ | 1133 | /* PRLI failed */ |
1099 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 1134 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
1100 | if (lpfc_error_lost_link(irsp)) { | 1135 | if (lpfc_error_lost_link(irsp)) |
1101 | goto out; | 1136 | goto out; |
1102 | } else { | 1137 | else |
1103 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1138 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1104 | NLP_EVT_CMPL_PRLI); | 1139 | NLP_EVT_CMPL_PRLI); |
1105 | } | 1140 | } else |
1106 | } else { | ||
1107 | /* Good status, call state machine */ | 1141 | /* Good status, call state machine */ |
1108 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1142 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1109 | NLP_EVT_CMPL_PRLI); | 1143 | NLP_EVT_CMPL_PRLI); |
1110 | } | ||
1111 | |||
1112 | out: | 1144 | out: |
1113 | lpfc_els_free_iocb(phba, cmdiocb); | 1145 | lpfc_els_free_iocb(phba, cmdiocb); |
1114 | return; | 1146 | return; |
@@ -1275,15 +1307,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1275 | } | 1307 | } |
1276 | /* ADISC failed */ | 1308 | /* ADISC failed */ |
1277 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ | 1309 | /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ |
1278 | if (!lpfc_error_lost_link(irsp)) { | 1310 | if (!lpfc_error_lost_link(irsp)) |
1279 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1311 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1280 | NLP_EVT_CMPL_ADISC); | 1312 | NLP_EVT_CMPL_ADISC); |
1281 | } | 1313 | } else |
1282 | } else { | ||
1283 | /* Good status, call state machine */ | 1314 | /* Good status, call state machine */ |
1284 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1315 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1285 | NLP_EVT_CMPL_ADISC); | 1316 | NLP_EVT_CMPL_ADISC); |
1286 | } | ||
1287 | 1317 | ||
1288 | if (disc && vport->num_disc_nodes) { | 1318 | if (disc && vport->num_disc_nodes) { |
1289 | /* Check to see if there are more ADISCs to be sent */ | 1319 | /* Check to see if there are more ADISCs to be sent */ |
@@ -1443,14 +1473,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1443 | else | 1473 | else |
1444 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1474 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1445 | NLP_EVT_CMPL_LOGO); | 1475 | NLP_EVT_CMPL_LOGO); |
1446 | } else { | 1476 | } else |
1447 | /* Good status, call state machine. | 1477 | /* Good status, call state machine. |
1448 | * This will unregister the rpi if needed. | 1478 | * This will unregister the rpi if needed. |
1449 | */ | 1479 | */ |
1450 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, | 1480 | lpfc_disc_state_machine(vport, ndlp, cmdiocb, |
1451 | NLP_EVT_CMPL_LOGO); | 1481 | NLP_EVT_CMPL_LOGO); |
1452 | } | ||
1453 | |||
1454 | out: | 1482 | out: |
1455 | lpfc_els_free_iocb(phba, cmdiocb); | 1483 | lpfc_els_free_iocb(phba, cmdiocb); |
1456 | return; | 1484 | return; |
@@ -1556,11 +1584,19 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1556 | psli = &phba->sli; | 1584 | psli = &phba->sli; |
1557 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1585 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1558 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); | 1586 | cmdsize = (sizeof(uint32_t) + sizeof(SCR)); |
1559 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1560 | if (!ndlp) | ||
1561 | return 1; | ||
1562 | 1587 | ||
1563 | lpfc_nlp_init(vport, ndlp, nportid); | 1588 | ndlp = lpfc_findnode_did(vport, nportid); |
1589 | if (!ndlp) { | ||
1590 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1591 | if (!ndlp) | ||
1592 | return 1; | ||
1593 | lpfc_nlp_init(vport, ndlp, nportid); | ||
1594 | lpfc_enqueue_node(vport, ndlp); | ||
1595 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1596 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
1597 | if (!ndlp) | ||
1598 | return 1; | ||
1599 | } | ||
1564 | 1600 | ||
1565 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, | 1601 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
1566 | ndlp->nlp_DID, ELS_CMD_SCR); | 1602 | ndlp->nlp_DID, ELS_CMD_SCR); |
@@ -1623,11 +1659,19 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1623 | psli = &phba->sli; | 1659 | psli = &phba->sli; |
1624 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ | 1660 | pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ |
1625 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); | 1661 | cmdsize = (sizeof(uint32_t) + sizeof(FARP)); |
1626 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1627 | if (!ndlp) | ||
1628 | return 1; | ||
1629 | 1662 | ||
1630 | lpfc_nlp_init(vport, ndlp, nportid); | 1663 | ndlp = lpfc_findnode_did(vport, nportid); |
1664 | if (!ndlp) { | ||
1665 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
1666 | if (!ndlp) | ||
1667 | return 1; | ||
1668 | lpfc_nlp_init(vport, ndlp, nportid); | ||
1669 | lpfc_enqueue_node(vport, ndlp); | ||
1670 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1671 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
1672 | if (!ndlp) | ||
1673 | return 1; | ||
1674 | } | ||
1631 | 1675 | ||
1632 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, | 1676 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, |
1633 | ndlp->nlp_DID, ELS_CMD_RNID); | 1677 | ndlp->nlp_DID, ELS_CMD_RNID); |
@@ -1657,7 +1701,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) | |||
1657 | memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); | 1701 | memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); |
1658 | memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); | 1702 | memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); |
1659 | ondlp = lpfc_findnode_did(vport, nportid); | 1703 | ondlp = lpfc_findnode_did(vport, nportid); |
1660 | if (ondlp) { | 1704 | if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { |
1661 | memcpy(&fp->OportName, &ondlp->nlp_portname, | 1705 | memcpy(&fp->OportName, &ondlp->nlp_portname, |
1662 | sizeof(struct lpfc_name)); | 1706 | sizeof(struct lpfc_name)); |
1663 | memcpy(&fp->OnodeName, &ondlp->nlp_nodename, | 1707 | memcpy(&fp->OnodeName, &ondlp->nlp_nodename, |
@@ -1690,6 +1734,7 @@ void | |||
1690 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | 1734 | lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) |
1691 | { | 1735 | { |
1692 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 1736 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
1737 | struct lpfc_work_evt *evtp; | ||
1693 | 1738 | ||
1694 | spin_lock_irq(shost->host_lock); | 1739 | spin_lock_irq(shost->host_lock); |
1695 | nlp->nlp_flag &= ~NLP_DELAY_TMO; | 1740 | nlp->nlp_flag &= ~NLP_DELAY_TMO; |
@@ -1697,8 +1742,12 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) | |||
1697 | del_timer_sync(&nlp->nlp_delayfunc); | 1742 | del_timer_sync(&nlp->nlp_delayfunc); |
1698 | nlp->nlp_last_elscmd = 0; | 1743 | nlp->nlp_last_elscmd = 0; |
1699 | 1744 | ||
1700 | if (!list_empty(&nlp->els_retry_evt.evt_listp)) | 1745 | if (!list_empty(&nlp->els_retry_evt.evt_listp)) { |
1701 | list_del_init(&nlp->els_retry_evt.evt_listp); | 1746 | list_del_init(&nlp->els_retry_evt.evt_listp); |
1747 | /* Decrement nlp reference count held for the delayed retry */ | ||
1748 | evtp = &nlp->els_retry_evt; | ||
1749 | lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); | ||
1750 | } | ||
1702 | 1751 | ||
1703 | if (nlp->nlp_flag & NLP_NPR_2B_DISC) { | 1752 | if (nlp->nlp_flag & NLP_NPR_2B_DISC) { |
1704 | spin_lock_irq(shost->host_lock); | 1753 | spin_lock_irq(shost->host_lock); |
@@ -1842,13 +1891,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1842 | cmd = *elscmd++; | 1891 | cmd = *elscmd++; |
1843 | } | 1892 | } |
1844 | 1893 | ||
1845 | if (ndlp) | 1894 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
1846 | did = ndlp->nlp_DID; | 1895 | did = ndlp->nlp_DID; |
1847 | else { | 1896 | else { |
1848 | /* We should only hit this case for retrying PLOGI */ | 1897 | /* We should only hit this case for retrying PLOGI */ |
1849 | did = irsp->un.elsreq64.remoteID; | 1898 | did = irsp->un.elsreq64.remoteID; |
1850 | ndlp = lpfc_findnode_did(vport, did); | 1899 | ndlp = lpfc_findnode_did(vport, did); |
1851 | if (!ndlp && (cmd != ELS_CMD_PLOGI)) | 1900 | if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) |
1901 | && (cmd != ELS_CMD_PLOGI)) | ||
1852 | return 1; | 1902 | return 1; |
1853 | } | 1903 | } |
1854 | 1904 | ||
@@ -1870,18 +1920,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1870 | break; | 1920 | break; |
1871 | 1921 | ||
1872 | case IOERR_ILLEGAL_COMMAND: | 1922 | case IOERR_ILLEGAL_COMMAND: |
1873 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && | 1923 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
1874 | (cmd == ELS_CMD_FDISC)) { | 1924 | "0124 Retry illegal cmd x%x " |
1875 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 1925 | "retry:x%x delay:x%x\n", |
1876 | "0124 FDISC failed (3/6) " | 1926 | cmd, cmdiocb->retry, delay); |
1877 | "retrying...\n"); | 1927 | retry = 1; |
1878 | lpfc_mbx_unreg_vpi(vport); | 1928 | /* All command's retry policy */ |
1879 | retry = 1; | 1929 | maxretry = 8; |
1880 | /* FDISC retry policy */ | 1930 | if (cmdiocb->retry > 2) |
1881 | maxretry = 48; | 1931 | delay = 1000; |
1882 | if (cmdiocb->retry >= 32) | ||
1883 | delay = 1000; | ||
1884 | } | ||
1885 | break; | 1932 | break; |
1886 | 1933 | ||
1887 | case IOERR_NO_RESOURCES: | 1934 | case IOERR_NO_RESOURCES: |
@@ -1967,6 +2014,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1967 | break; | 2014 | break; |
1968 | 2015 | ||
1969 | case LSRJT_LOGICAL_ERR: | 2016 | case LSRJT_LOGICAL_ERR: |
2017 | /* There are some cases where switches return this | ||
2018 | * error when they are not ready and should be returning | ||
2019 | * Logical Busy. We should delay every time. | ||
2020 | */ | ||
2021 | if (cmd == ELS_CMD_FDISC && | ||
2022 | stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { | ||
2023 | maxretry = 3; | ||
2024 | delay = 1000; | ||
2025 | retry = 1; | ||
2026 | break; | ||
2027 | } | ||
1970 | case LSRJT_PROTOCOL_ERR: | 2028 | case LSRJT_PROTOCOL_ERR: |
1971 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2029 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
1972 | (cmd == ELS_CMD_FDISC) && | 2030 | (cmd == ELS_CMD_FDISC) && |
@@ -1996,7 +2054,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
1996 | retry = 1; | 2054 | retry = 1; |
1997 | 2055 | ||
1998 | if ((cmd == ELS_CMD_FLOGI) && | 2056 | if ((cmd == ELS_CMD_FLOGI) && |
1999 | (phba->fc_topology != TOPOLOGY_LOOP)) { | 2057 | (phba->fc_topology != TOPOLOGY_LOOP) && |
2058 | !lpfc_error_lost_link(irsp)) { | ||
2000 | /* FLOGI retry policy */ | 2059 | /* FLOGI retry policy */ |
2001 | retry = 1; | 2060 | retry = 1; |
2002 | maxretry = 48; | 2061 | maxretry = 48; |
@@ -2322,6 +2381,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2322 | if ((rspiocb->iocb.ulpStatus == 0) | 2381 | if ((rspiocb->iocb.ulpStatus == 0) |
2323 | && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { | 2382 | && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { |
2324 | lpfc_unreg_rpi(vport, ndlp); | 2383 | lpfc_unreg_rpi(vport, ndlp); |
2384 | /* Increment reference count to ndlp to hold the | ||
2385 | * reference to ndlp for the callback function. | ||
2386 | */ | ||
2325 | mbox->context2 = lpfc_nlp_get(ndlp); | 2387 | mbox->context2 = lpfc_nlp_get(ndlp); |
2326 | mbox->vport = vport; | 2388 | mbox->vport = vport; |
2327 | if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { | 2389 | if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { |
@@ -2335,9 +2397,13 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2335 | NLP_STE_REG_LOGIN_ISSUE); | 2397 | NLP_STE_REG_LOGIN_ISSUE); |
2336 | } | 2398 | } |
2337 | if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) | 2399 | if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) |
2338 | != MBX_NOT_FINISHED) { | 2400 | != MBX_NOT_FINISHED) |
2339 | goto out; | 2401 | goto out; |
2340 | } | 2402 | else |
2403 | /* Decrement the ndlp reference count we | ||
2404 | * set for this failed mailbox command. | ||
2405 | */ | ||
2406 | lpfc_nlp_put(ndlp); | ||
2341 | 2407 | ||
2342 | /* ELS rsp: Cannot issue reg_login for <NPortid> */ | 2408 | /* ELS rsp: Cannot issue reg_login for <NPortid> */ |
2343 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 2409 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
@@ -2796,6 +2862,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) | |||
2796 | 2862 | ||
2797 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ | 2863 | /* go thru NPR nodes and issue any remaining ELS ADISCs */ |
2798 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 2864 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
2865 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2866 | continue; | ||
2799 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && | 2867 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && |
2800 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && | 2868 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && |
2801 | (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { | 2869 | (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { |
@@ -2833,6 +2901,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) | |||
2833 | 2901 | ||
2834 | /* go thru NPR nodes and issue any remaining ELS PLOGIs */ | 2902 | /* go thru NPR nodes and issue any remaining ELS PLOGIs */ |
2835 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 2903 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
2904 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2905 | continue; | ||
2836 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && | 2906 | if (ndlp->nlp_state == NLP_STE_NPR_NODE && |
2837 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && | 2907 | (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && |
2838 | (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && | 2908 | (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && |
@@ -2869,6 +2939,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) | |||
2869 | struct lpfc_hba *phba = vport->phba; | 2939 | struct lpfc_hba *phba = vport->phba; |
2870 | int i; | 2940 | int i; |
2871 | 2941 | ||
2942 | spin_lock_irq(shost->host_lock); | ||
2943 | if (vport->fc_rscn_flush) { | ||
2944 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
2945 | spin_unlock_irq(shost->host_lock); | ||
2946 | return; | ||
2947 | } | ||
2948 | /* Indicate we are walking lpfc_els_flush_rscn on this vport */ | ||
2949 | vport->fc_rscn_flush = 1; | ||
2950 | spin_unlock_irq(shost->host_lock); | ||
2951 | |||
2872 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { | 2952 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { |
2873 | lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); | 2953 | lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); |
2874 | vport->fc_rscn_id_list[i] = NULL; | 2954 | vport->fc_rscn_id_list[i] = NULL; |
@@ -2878,6 +2958,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) | |||
2878 | vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); | 2958 | vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); |
2879 | spin_unlock_irq(shost->host_lock); | 2959 | spin_unlock_irq(shost->host_lock); |
2880 | lpfc_can_disctmo(vport); | 2960 | lpfc_can_disctmo(vport); |
2961 | /* Indicate we are done walking this fc_rscn_id_list */ | ||
2962 | vport->fc_rscn_flush = 0; | ||
2881 | } | 2963 | } |
2882 | 2964 | ||
2883 | int | 2965 | int |
@@ -2887,6 +2969,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2887 | D_ID rscn_did; | 2969 | D_ID rscn_did; |
2888 | uint32_t *lp; | 2970 | uint32_t *lp; |
2889 | uint32_t payload_len, i; | 2971 | uint32_t payload_len, i; |
2972 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
2890 | 2973 | ||
2891 | ns_did.un.word = did; | 2974 | ns_did.un.word = did; |
2892 | 2975 | ||
@@ -2898,6 +2981,15 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2898 | if (vport->fc_flag & FC_RSCN_DISCOVERY) | 2981 | if (vport->fc_flag & FC_RSCN_DISCOVERY) |
2899 | return did; | 2982 | return did; |
2900 | 2983 | ||
2984 | spin_lock_irq(shost->host_lock); | ||
2985 | if (vport->fc_rscn_flush) { | ||
2986 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
2987 | spin_unlock_irq(shost->host_lock); | ||
2988 | return 0; | ||
2989 | } | ||
2990 | /* Indicate we are walking fc_rscn_id_list on this vport */ | ||
2991 | vport->fc_rscn_flush = 1; | ||
2992 | spin_unlock_irq(shost->host_lock); | ||
2901 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { | 2993 | for (i = 0; i < vport->fc_rscn_id_cnt; i++) { |
2902 | lp = vport->fc_rscn_id_list[i]->virt; | 2994 | lp = vport->fc_rscn_id_list[i]->virt; |
2903 | payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); | 2995 | payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); |
@@ -2908,16 +3000,16 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2908 | switch (rscn_did.un.b.resv) { | 3000 | switch (rscn_did.un.b.resv) { |
2909 | case 0: /* Single N_Port ID effected */ | 3001 | case 0: /* Single N_Port ID effected */ |
2910 | if (ns_did.un.word == rscn_did.un.word) | 3002 | if (ns_did.un.word == rscn_did.un.word) |
2911 | return did; | 3003 | goto return_did_out; |
2912 | break; | 3004 | break; |
2913 | case 1: /* Whole N_Port Area effected */ | 3005 | case 1: /* Whole N_Port Area effected */ |
2914 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) | 3006 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) |
2915 | && (ns_did.un.b.area == rscn_did.un.b.area)) | 3007 | && (ns_did.un.b.area == rscn_did.un.b.area)) |
2916 | return did; | 3008 | goto return_did_out; |
2917 | break; | 3009 | break; |
2918 | case 2: /* Whole N_Port Domain effected */ | 3010 | case 2: /* Whole N_Port Domain effected */ |
2919 | if (ns_did.un.b.domain == rscn_did.un.b.domain) | 3011 | if (ns_did.un.b.domain == rscn_did.un.b.domain) |
2920 | return did; | 3012 | goto return_did_out; |
2921 | break; | 3013 | break; |
2922 | default: | 3014 | default: |
2923 | /* Unknown Identifier in RSCN node */ | 3015 | /* Unknown Identifier in RSCN node */ |
@@ -2926,11 +3018,17 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |||
2926 | "RSCN payload Data: x%x\n", | 3018 | "RSCN payload Data: x%x\n", |
2927 | rscn_did.un.word); | 3019 | rscn_did.un.word); |
2928 | case 3: /* Whole Fabric effected */ | 3020 | case 3: /* Whole Fabric effected */ |
2929 | return did; | 3021 | goto return_did_out; |
2930 | } | 3022 | } |
2931 | } | 3023 | } |
2932 | } | 3024 | } |
3025 | /* Indicate we are done with walking fc_rscn_id_list on this vport */ | ||
3026 | vport->fc_rscn_flush = 0; | ||
2933 | return 0; | 3027 | return 0; |
3028 | return_did_out: | ||
3029 | /* Indicate we are done with walking fc_rscn_id_list on this vport */ | ||
3030 | vport->fc_rscn_flush = 0; | ||
3031 | return did; | ||
2934 | } | 3032 | } |
2935 | 3033 | ||
2936 | static int | 3034 | static int |
@@ -2943,7 +3041,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |||
2943 | */ | 3041 | */ |
2944 | 3042 | ||
2945 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 3043 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
2946 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || | 3044 | if (!NLP_CHK_NODE_ACT(ndlp) || |
3045 | ndlp->nlp_state == NLP_STE_UNUSED_NODE || | ||
2947 | lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) | 3046 | lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) |
2948 | continue; | 3047 | continue; |
2949 | 3048 | ||
@@ -2971,7 +3070,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
2971 | uint32_t *lp, *datap; | 3070 | uint32_t *lp, *datap; |
2972 | IOCB_t *icmd; | 3071 | IOCB_t *icmd; |
2973 | uint32_t payload_len, length, nportid, *cmd; | 3072 | uint32_t payload_len, length, nportid, *cmd; |
2974 | int rscn_cnt = vport->fc_rscn_id_cnt; | 3073 | int rscn_cnt; |
2975 | int rscn_id = 0, hba_id = 0; | 3074 | int rscn_id = 0, hba_id = 0; |
2976 | int i; | 3075 | int i; |
2977 | 3076 | ||
@@ -2984,7 +3083,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
2984 | /* RSCN received */ | 3083 | /* RSCN received */ |
2985 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 3084 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
2986 | "0214 RSCN received Data: x%x x%x x%x x%x\n", | 3085 | "0214 RSCN received Data: x%x x%x x%x x%x\n", |
2987 | vport->fc_flag, payload_len, *lp, rscn_cnt); | 3086 | vport->fc_flag, payload_len, *lp, |
3087 | vport->fc_rscn_id_cnt); | ||
2988 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) | 3088 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) |
2989 | fc_host_post_event(shost, fc_get_event_number(), | 3089 | fc_host_post_event(shost, fc_get_event_number(), |
2990 | FCH_EVT_RSCN, lp[i]); | 3090 | FCH_EVT_RSCN, lp[i]); |
@@ -3022,7 +3122,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3022 | "0214 Ignore RSCN " | 3122 | "0214 Ignore RSCN " |
3023 | "Data: x%x x%x x%x x%x\n", | 3123 | "Data: x%x x%x x%x x%x\n", |
3024 | vport->fc_flag, payload_len, | 3124 | vport->fc_flag, payload_len, |
3025 | *lp, rscn_cnt); | 3125 | *lp, vport->fc_rscn_id_cnt); |
3026 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 3126 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
3027 | "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", | 3127 | "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", |
3028 | ndlp->nlp_DID, vport->port_state, | 3128 | ndlp->nlp_DID, vport->port_state, |
@@ -3034,6 +3134,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3034 | } | 3134 | } |
3035 | } | 3135 | } |
3036 | 3136 | ||
3137 | spin_lock_irq(shost->host_lock); | ||
3138 | if (vport->fc_rscn_flush) { | ||
3139 | /* Another thread is walking fc_rscn_id_list on this vport */ | ||
3140 | spin_unlock_irq(shost->host_lock); | ||
3141 | vport->fc_flag |= FC_RSCN_DISCOVERY; | ||
3142 | return 0; | ||
3143 | } | ||
3144 | /* Indicate we are walking fc_rscn_id_list on this vport */ | ||
3145 | vport->fc_rscn_flush = 1; | ||
3146 | spin_unlock_irq(shost->host_lock); | ||
3147 | /* Get the array count after sucessfully have the token */ | ||
3148 | rscn_cnt = vport->fc_rscn_id_cnt; | ||
3037 | /* If we are already processing an RSCN, save the received | 3149 | /* If we are already processing an RSCN, save the received |
3038 | * RSCN payload buffer, cmdiocb->context2 to process later. | 3150 | * RSCN payload buffer, cmdiocb->context2 to process later. |
3039 | */ | 3151 | */ |
@@ -3055,7 +3167,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3055 | if ((rscn_cnt) && | 3167 | if ((rscn_cnt) && |
3056 | (payload_len + length <= LPFC_BPL_SIZE)) { | 3168 | (payload_len + length <= LPFC_BPL_SIZE)) { |
3057 | *cmd &= ELS_CMD_MASK; | 3169 | *cmd &= ELS_CMD_MASK; |
3058 | *cmd |= be32_to_cpu(payload_len + length); | 3170 | *cmd |= cpu_to_be32(payload_len + length); |
3059 | memcpy(((uint8_t *)cmd) + length, lp, | 3171 | memcpy(((uint8_t *)cmd) + length, lp, |
3060 | payload_len); | 3172 | payload_len); |
3061 | } else { | 3173 | } else { |
@@ -3066,7 +3178,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3066 | */ | 3178 | */ |
3067 | cmdiocb->context2 = NULL; | 3179 | cmdiocb->context2 = NULL; |
3068 | } | 3180 | } |
3069 | |||
3070 | /* Deferred RSCN */ | 3181 | /* Deferred RSCN */ |
3071 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 3182 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
3072 | "0235 Deferred RSCN " | 3183 | "0235 Deferred RSCN " |
@@ -3083,9 +3194,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3083 | vport->fc_rscn_id_cnt, vport->fc_flag, | 3194 | vport->fc_rscn_id_cnt, vport->fc_flag, |
3084 | vport->port_state); | 3195 | vport->port_state); |
3085 | } | 3196 | } |
3197 | /* Indicate we are done walking fc_rscn_id_list on this vport */ | ||
3198 | vport->fc_rscn_flush = 0; | ||
3086 | /* Send back ACC */ | 3199 | /* Send back ACC */ |
3087 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 3200 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
3088 | |||
3089 | /* send RECOVERY event for ALL nodes that match RSCN payload */ | 3201 | /* send RECOVERY event for ALL nodes that match RSCN payload */ |
3090 | lpfc_rscn_recovery_check(vport); | 3202 | lpfc_rscn_recovery_check(vport); |
3091 | spin_lock_irq(shost->host_lock); | 3203 | spin_lock_irq(shost->host_lock); |
@@ -3093,7 +3205,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3093 | spin_unlock_irq(shost->host_lock); | 3205 | spin_unlock_irq(shost->host_lock); |
3094 | return 0; | 3206 | return 0; |
3095 | } | 3207 | } |
3096 | |||
3097 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, | 3208 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, |
3098 | "RCV RSCN: did:x%x/ste:x%x flg:x%x", | 3209 | "RCV RSCN: did:x%x/ste:x%x flg:x%x", |
3099 | ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); | 3210 | ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); |
@@ -3102,20 +3213,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3102 | vport->fc_flag |= FC_RSCN_MODE; | 3213 | vport->fc_flag |= FC_RSCN_MODE; |
3103 | spin_unlock_irq(shost->host_lock); | 3214 | spin_unlock_irq(shost->host_lock); |
3104 | vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; | 3215 | vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; |
3216 | /* Indicate we are done walking fc_rscn_id_list on this vport */ | ||
3217 | vport->fc_rscn_flush = 0; | ||
3105 | /* | 3218 | /* |
3106 | * If we zero, cmdiocb->context2, the calling routine will | 3219 | * If we zero, cmdiocb->context2, the calling routine will |
3107 | * not try to free it. | 3220 | * not try to free it. |
3108 | */ | 3221 | */ |
3109 | cmdiocb->context2 = NULL; | 3222 | cmdiocb->context2 = NULL; |
3110 | |||
3111 | lpfc_set_disctmo(vport); | 3223 | lpfc_set_disctmo(vport); |
3112 | |||
3113 | /* Send back ACC */ | 3224 | /* Send back ACC */ |
3114 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 3225 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
3115 | |||
3116 | /* send RECOVERY event for ALL nodes that match RSCN payload */ | 3226 | /* send RECOVERY event for ALL nodes that match RSCN payload */ |
3117 | lpfc_rscn_recovery_check(vport); | 3227 | lpfc_rscn_recovery_check(vport); |
3118 | |||
3119 | return lpfc_els_handle_rscn(vport); | 3228 | return lpfc_els_handle_rscn(vport); |
3120 | } | 3229 | } |
3121 | 3230 | ||
@@ -3145,7 +3254,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
3145 | vport->num_disc_nodes = 0; | 3254 | vport->num_disc_nodes = 0; |
3146 | 3255 | ||
3147 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 3256 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
3148 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 3257 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
3258 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
3149 | /* Good ndlp, issue CT Request to NameServer */ | 3259 | /* Good ndlp, issue CT Request to NameServer */ |
3150 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) | 3260 | if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) |
3151 | /* Wait for NameServer query cmpl before we can | 3261 | /* Wait for NameServer query cmpl before we can |
@@ -3155,25 +3265,35 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) | |||
3155 | /* If login to NameServer does not exist, issue one */ | 3265 | /* If login to NameServer does not exist, issue one */ |
3156 | /* Good status, issue PLOGI to NameServer */ | 3266 | /* Good status, issue PLOGI to NameServer */ |
3157 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 3267 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
3158 | if (ndlp) | 3268 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
3159 | /* Wait for NameServer login cmpl before we can | 3269 | /* Wait for NameServer login cmpl before we can |
3160 | continue */ | 3270 | continue */ |
3161 | return 1; | 3271 | return 1; |
3162 | 3272 | ||
3163 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 3273 | if (ndlp) { |
3164 | if (!ndlp) { | 3274 | ndlp = lpfc_enable_node(vport, ndlp, |
3165 | lpfc_els_flush_rscn(vport); | 3275 | NLP_STE_PLOGI_ISSUE); |
3166 | return 0; | 3276 | if (!ndlp) { |
3277 | lpfc_els_flush_rscn(vport); | ||
3278 | return 0; | ||
3279 | } | ||
3280 | ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; | ||
3167 | } else { | 3281 | } else { |
3282 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | ||
3283 | if (!ndlp) { | ||
3284 | lpfc_els_flush_rscn(vport); | ||
3285 | return 0; | ||
3286 | } | ||
3168 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | 3287 | lpfc_nlp_init(vport, ndlp, NameServer_DID); |
3169 | ndlp->nlp_type |= NLP_FABRIC; | ||
3170 | ndlp->nlp_prev_state = ndlp->nlp_state; | 3288 | ndlp->nlp_prev_state = ndlp->nlp_state; |
3171 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 3289 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
3172 | lpfc_issue_els_plogi(vport, NameServer_DID, 0); | ||
3173 | /* Wait for NameServer login cmpl before we can | ||
3174 | continue */ | ||
3175 | return 1; | ||
3176 | } | 3290 | } |
3291 | ndlp->nlp_type |= NLP_FABRIC; | ||
3292 | lpfc_issue_els_plogi(vport, NameServer_DID, 0); | ||
3293 | /* Wait for NameServer login cmpl before we can | ||
3294 | * continue | ||
3295 | */ | ||
3296 | return 1; | ||
3177 | } | 3297 | } |
3178 | 3298 | ||
3179 | lpfc_els_flush_rscn(vport); | 3299 | lpfc_els_flush_rscn(vport); |
@@ -3672,6 +3792,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3672 | 3792 | ||
3673 | list_for_each_entry_safe(ndlp, next_ndlp, | 3793 | list_for_each_entry_safe(ndlp, next_ndlp, |
3674 | &vport->fc_nodes, nlp_listp) { | 3794 | &vport->fc_nodes, nlp_listp) { |
3795 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
3796 | continue; | ||
3675 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 3797 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
3676 | continue; | 3798 | continue; |
3677 | if (ndlp->nlp_type & NLP_FABRIC) { | 3799 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -3697,6 +3819,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
3697 | */ | 3819 | */ |
3698 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 3820 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
3699 | nlp_listp) { | 3821 | nlp_listp) { |
3822 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
3823 | continue; | ||
3700 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 3824 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
3701 | continue; | 3825 | continue; |
3702 | 3826 | ||
@@ -3936,7 +4060,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3936 | uint32_t cmd, did, newnode, rjt_err = 0; | 4060 | uint32_t cmd, did, newnode, rjt_err = 0; |
3937 | IOCB_t *icmd = &elsiocb->iocb; | 4061 | IOCB_t *icmd = &elsiocb->iocb; |
3938 | 4062 | ||
3939 | if (vport == NULL || elsiocb->context2 == NULL) | 4063 | if (!vport || !(elsiocb->context2)) |
3940 | goto dropit; | 4064 | goto dropit; |
3941 | 4065 | ||
3942 | newnode = 0; | 4066 | newnode = 0; |
@@ -3971,14 +4095,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3971 | lpfc_nlp_init(vport, ndlp, did); | 4095 | lpfc_nlp_init(vport, ndlp, did); |
3972 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 4096 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
3973 | newnode = 1; | 4097 | newnode = 1; |
3974 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { | 4098 | if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) |
3975 | ndlp->nlp_type |= NLP_FABRIC; | 4099 | ndlp->nlp_type |= NLP_FABRIC; |
4100 | } else { | ||
4101 | if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
4102 | ndlp = lpfc_enable_node(vport, ndlp, | ||
4103 | NLP_STE_UNUSED_NODE); | ||
4104 | if (!ndlp) | ||
4105 | goto dropit; | ||
3976 | } | 4106 | } |
3977 | } | ||
3978 | else { | ||
3979 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { | 4107 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { |
3980 | /* This is simular to the new node path */ | 4108 | /* This is simular to the new node path */ |
3981 | lpfc_nlp_get(ndlp); | 4109 | ndlp = lpfc_nlp_get(ndlp); |
4110 | if (!ndlp) | ||
4111 | goto dropit; | ||
3982 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 4112 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
3983 | newnode = 1; | 4113 | newnode = 1; |
3984 | } | 4114 | } |
@@ -3987,6 +4117,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
3987 | phba->fc_stat.elsRcvFrame++; | 4117 | phba->fc_stat.elsRcvFrame++; |
3988 | if (elsiocb->context1) | 4118 | if (elsiocb->context1) |
3989 | lpfc_nlp_put(elsiocb->context1); | 4119 | lpfc_nlp_put(elsiocb->context1); |
4120 | |||
3990 | elsiocb->context1 = lpfc_nlp_get(ndlp); | 4121 | elsiocb->context1 = lpfc_nlp_get(ndlp); |
3991 | elsiocb->vport = vport; | 4122 | elsiocb->vport = vport; |
3992 | 4123 | ||
@@ -4007,8 +4138,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4007 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); | 4138 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); |
4008 | 4139 | ||
4009 | if (vport->port_state < LPFC_DISC_AUTH) { | 4140 | if (vport->port_state < LPFC_DISC_AUTH) { |
4010 | rjt_err = LSRJT_UNABLE_TPC; | 4141 | if (!(phba->pport->fc_flag & FC_PT2PT) || |
4011 | break; | 4142 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { |
4143 | rjt_err = LSRJT_UNABLE_TPC; | ||
4144 | break; | ||
4145 | } | ||
4146 | /* We get here, and drop thru, if we are PT2PT with | ||
4147 | * another NPort and the other side has initiated | ||
4148 | * the PLOGI before responding to our FLOGI. | ||
4149 | */ | ||
4012 | } | 4150 | } |
4013 | 4151 | ||
4014 | shost = lpfc_shost_from_vport(vport); | 4152 | shost = lpfc_shost_from_vport(vport); |
@@ -4251,15 +4389,15 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4251 | vport = lpfc_find_vport_by_vpid(phba, vpi); | 4389 | vport = lpfc_find_vport_by_vpid(phba, vpi); |
4252 | } | 4390 | } |
4253 | } | 4391 | } |
4254 | /* If there are no BDEs associated | 4392 | /* If there are no BDEs associated |
4255 | * with this IOCB, there is nothing to do. | 4393 | * with this IOCB, there is nothing to do. |
4256 | */ | 4394 | */ |
4257 | if (icmd->ulpBdeCount == 0) | 4395 | if (icmd->ulpBdeCount == 0) |
4258 | return; | 4396 | return; |
4259 | 4397 | ||
4260 | /* type of ELS cmd is first 32bit word | 4398 | /* type of ELS cmd is first 32bit word |
4261 | * in packet | 4399 | * in packet |
4262 | */ | 4400 | */ |
4263 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 4401 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
4264 | elsiocb->context2 = bdeBuf1; | 4402 | elsiocb->context2 = bdeBuf1; |
4265 | } else { | 4403 | } else { |
@@ -4314,6 +4452,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
4314 | } | 4452 | } |
4315 | lpfc_nlp_init(vport, ndlp, NameServer_DID); | 4453 | lpfc_nlp_init(vport, ndlp, NameServer_DID); |
4316 | ndlp->nlp_type |= NLP_FABRIC; | 4454 | ndlp->nlp_type |= NLP_FABRIC; |
4455 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
4456 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | ||
4457 | if (!ndlp) { | ||
4458 | if (phba->fc_topology == TOPOLOGY_LOOP) { | ||
4459 | lpfc_disc_start(vport); | ||
4460 | return; | ||
4461 | } | ||
4462 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
4463 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
4464 | "0348 NameServer login: node freed\n"); | ||
4465 | return; | ||
4466 | } | ||
4317 | } | 4467 | } |
4318 | 4468 | ||
4319 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); | 4469 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); |
@@ -4360,6 +4510,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4360 | switch (mb->mbxStatus) { | 4510 | switch (mb->mbxStatus) { |
4361 | case 0x11: /* unsupported feature */ | 4511 | case 0x11: /* unsupported feature */ |
4362 | case 0x9603: /* max_vpi exceeded */ | 4512 | case 0x9603: /* max_vpi exceeded */ |
4513 | case 0x9602: /* Link event since CLEAR_LA */ | ||
4363 | /* giving up on vport registration */ | 4514 | /* giving up on vport registration */ |
4364 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 4515 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
4365 | spin_lock_irq(shost->host_lock); | 4516 | spin_lock_irq(shost->host_lock); |
@@ -4373,7 +4524,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4373 | spin_lock_irq(shost->host_lock); | 4524 | spin_lock_irq(shost->host_lock); |
4374 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 4525 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
4375 | spin_unlock_irq(shost->host_lock); | 4526 | spin_unlock_irq(shost->host_lock); |
4376 | lpfc_initial_fdisc(vport); | 4527 | if (vport->port_type == LPFC_PHYSICAL_PORT) |
4528 | lpfc_initial_flogi(vport); | ||
4529 | else | ||
4530 | lpfc_initial_fdisc(vport); | ||
4377 | break; | 4531 | break; |
4378 | } | 4532 | } |
4379 | 4533 | ||
@@ -4471,7 +4625,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4471 | irsp->ulpStatus, irsp->un.ulpWord[4]); | 4625 | irsp->ulpStatus, irsp->un.ulpWord[4]); |
4472 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) | 4626 | if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) |
4473 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | 4627 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); |
4474 | |||
4475 | lpfc_nlp_put(ndlp); | 4628 | lpfc_nlp_put(ndlp); |
4476 | /* giving up on FDISC. Cancel discovery timer */ | 4629 | /* giving up on FDISC. Cancel discovery timer */ |
4477 | lpfc_can_disctmo(vport); | 4630 | lpfc_can_disctmo(vport); |
@@ -4492,8 +4645,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4492 | */ | 4645 | */ |
4493 | list_for_each_entry_safe(np, next_np, | 4646 | list_for_each_entry_safe(np, next_np, |
4494 | &vport->fc_nodes, nlp_listp) { | 4647 | &vport->fc_nodes, nlp_listp) { |
4495 | if (np->nlp_state != NLP_STE_NPR_NODE | 4648 | if (!NLP_CHK_NODE_ACT(ndlp) || |
4496 | || !(np->nlp_flag & NLP_NPR_ADISC)) | 4649 | (np->nlp_state != NLP_STE_NPR_NODE) || |
4650 | !(np->nlp_flag & NLP_NPR_ADISC)) | ||
4497 | continue; | 4651 | continue; |
4498 | spin_lock_irq(shost->host_lock); | 4652 | spin_lock_irq(shost->host_lock); |
4499 | np->nlp_flag &= ~NLP_NPR_ADISC; | 4653 | np->nlp_flag &= ~NLP_NPR_ADISC; |
@@ -4599,6 +4753,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4599 | { | 4753 | { |
4600 | struct lpfc_vport *vport = cmdiocb->vport; | 4754 | struct lpfc_vport *vport = cmdiocb->vport; |
4601 | IOCB_t *irsp; | 4755 | IOCB_t *irsp; |
4756 | struct lpfc_nodelist *ndlp; | ||
4757 | ndlp = (struct lpfc_nodelist *)cmdiocb->context1; | ||
4602 | 4758 | ||
4603 | irsp = &rspiocb->iocb; | 4759 | irsp = &rspiocb->iocb; |
4604 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | 4760 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, |
@@ -4607,6 +4763,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4607 | 4763 | ||
4608 | lpfc_els_free_iocb(phba, cmdiocb); | 4764 | lpfc_els_free_iocb(phba, cmdiocb); |
4609 | vport->unreg_vpi_cmpl = VPORT_ERROR; | 4765 | vport->unreg_vpi_cmpl = VPORT_ERROR; |
4766 | |||
4767 | /* Trigger the release of the ndlp after logo */ | ||
4768 | lpfc_nlp_put(ndlp); | ||
4610 | } | 4769 | } |
4611 | 4770 | ||
4612 | int | 4771 | int |
@@ -4686,11 +4845,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) | |||
4686 | repeat: | 4845 | repeat: |
4687 | iocb = NULL; | 4846 | iocb = NULL; |
4688 | spin_lock_irqsave(&phba->hbalock, iflags); | 4847 | spin_lock_irqsave(&phba->hbalock, iflags); |
4689 | /* Post any pending iocb to the SLI layer */ | 4848 | /* Post any pending iocb to the SLI layer */ |
4690 | if (atomic_read(&phba->fabric_iocb_count) == 0) { | 4849 | if (atomic_read(&phba->fabric_iocb_count) == 0) { |
4691 | list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), | 4850 | list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), |
4692 | list); | 4851 | list); |
4693 | if (iocb) | 4852 | if (iocb) |
4853 | /* Increment fabric iocb count to hold the position */ | ||
4694 | atomic_inc(&phba->fabric_iocb_count); | 4854 | atomic_inc(&phba->fabric_iocb_count); |
4695 | } | 4855 | } |
4696 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 4856 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
@@ -4737,9 +4897,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) | |||
4737 | int blocked; | 4897 | int blocked; |
4738 | 4898 | ||
4739 | blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); | 4899 | blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); |
4740 | /* Start a timer to unblock fabric | 4900 | /* Start a timer to unblock fabric iocbs after 100ms */ |
4741 | * iocbs after 100ms | ||
4742 | */ | ||
4743 | if (!blocked) | 4901 | if (!blocked) |
4744 | mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); | 4902 | mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); |
4745 | 4903 | ||
@@ -4787,8 +4945,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
4787 | 4945 | ||
4788 | atomic_dec(&phba->fabric_iocb_count); | 4946 | atomic_dec(&phba->fabric_iocb_count); |
4789 | if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { | 4947 | if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { |
4790 | /* Post any pending iocbs to HBA */ | 4948 | /* Post any pending iocbs to HBA */ |
4791 | lpfc_resume_fabric_iocbs(phba); | 4949 | lpfc_resume_fabric_iocbs(phba); |
4792 | } | 4950 | } |
4793 | } | 4951 | } |
4794 | 4952 | ||
@@ -4807,6 +4965,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
4807 | ready = atomic_read(&phba->fabric_iocb_count) == 0 && | 4965 | ready = atomic_read(&phba->fabric_iocb_count) == 0 && |
4808 | !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); | 4966 | !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); |
4809 | 4967 | ||
4968 | if (ready) | ||
4969 | /* Increment fabric iocb count to hold the position */ | ||
4970 | atomic_inc(&phba->fabric_iocb_count); | ||
4810 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 4971 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
4811 | if (ready) { | 4972 | if (ready) { |
4812 | iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; | 4973 | iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; |
@@ -4817,7 +4978,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) | |||
4817 | "Fabric sched2: ste:x%x", | 4978 | "Fabric sched2: ste:x%x", |
4818 | iocb->vport->port_state, 0, 0); | 4979 | iocb->vport->port_state, 0, 0); |
4819 | 4980 | ||
4820 | atomic_inc(&phba->fabric_iocb_count); | ||
4821 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); | 4981 | ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); |
4822 | 4982 | ||
4823 | if (ret == IOCB_ERROR) { | 4983 | if (ret == IOCB_ERROR) { |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index dc042bd97baa..976653440fba 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) | |||
272 | if (!(vport->load_flag & FC_UNLOADING) && | 272 | if (!(vport->load_flag & FC_UNLOADING) && |
273 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && | 273 | !(ndlp->nlp_flag & NLP_DELAY_TMO) && |
274 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && | 274 | !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && |
275 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { | 275 | (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) |
276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); | 276 | lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); |
277 | } | ||
278 | } | 277 | } |
279 | 278 | ||
280 | 279 | ||
@@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) | |||
566 | int rc; | 565 | int rc; |
567 | 566 | ||
568 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 567 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
568 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
569 | continue; | ||
569 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 570 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
570 | continue; | 571 | continue; |
571 | |||
572 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || | 572 | if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || |
573 | ((vport->port_type == LPFC_NPIV_PORT) && | 573 | ((vport->port_type == LPFC_NPIV_PORT) && |
574 | (ndlp->nlp_DID == NameServer_DID))) | 574 | (ndlp->nlp_DID == NameServer_DID))) |
@@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba) | |||
629 | LPFC_MBOXQ_t *mb; | 629 | LPFC_MBOXQ_t *mb; |
630 | int i; | 630 | int i; |
631 | 631 | ||
632 | if (phba->link_state == LPFC_LINK_DOWN) { | 632 | if (phba->link_state == LPFC_LINK_DOWN) |
633 | return 0; | 633 | return 0; |
634 | } | ||
635 | spin_lock_irq(&phba->hbalock); | 634 | spin_lock_irq(&phba->hbalock); |
636 | if (phba->link_state > LPFC_LINK_DOWN) { | 635 | if (phba->link_state > LPFC_LINK_DOWN) { |
637 | phba->link_state = LPFC_LINK_DOWN; | 636 | phba->link_state = LPFC_LINK_DOWN; |
@@ -684,20 +683,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) | |||
684 | struct lpfc_nodelist *ndlp; | 683 | struct lpfc_nodelist *ndlp; |
685 | 684 | ||
686 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 685 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
686 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
687 | continue; | ||
687 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 688 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
688 | continue; | 689 | continue; |
689 | |||
690 | if (ndlp->nlp_type & NLP_FABRIC) { | 690 | if (ndlp->nlp_type & NLP_FABRIC) { |
691 | /* On Linkup its safe to clean up the ndlp | 691 | /* On Linkup its safe to clean up the ndlp |
692 | * from Fabric connections. | 692 | * from Fabric connections. |
693 | */ | 693 | */ |
694 | if (ndlp->nlp_DID != Fabric_DID) | 694 | if (ndlp->nlp_DID != Fabric_DID) |
695 | lpfc_unreg_rpi(vport, ndlp); | 695 | lpfc_unreg_rpi(vport, ndlp); |
696 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 696 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
697 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { | 697 | } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { |
698 | /* Fail outstanding IO now since device is | 698 | /* Fail outstanding IO now since device is |
699 | * marked for PLOGI. | 699 | * marked for PLOGI. |
700 | */ | 700 | */ |
701 | lpfc_unreg_rpi(vport, ndlp); | 701 | lpfc_unreg_rpi(vport, ndlp); |
702 | } | 702 | } |
703 | } | 703 | } |
@@ -799,21 +799,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
799 | writel(control, phba->HCregaddr); | 799 | writel(control, phba->HCregaddr); |
800 | readl(phba->HCregaddr); /* flush */ | 800 | readl(phba->HCregaddr); /* flush */ |
801 | spin_unlock_irq(&phba->hbalock); | 801 | spin_unlock_irq(&phba->hbalock); |
802 | mempool_free(pmb, phba->mbox_mem_pool); | ||
802 | return; | 803 | return; |
803 | 804 | ||
804 | vport->num_disc_nodes = 0; | ||
805 | /* go thru NPR nodes and issue ELS PLOGIs */ | ||
806 | if (vport->fc_npr_cnt) | ||
807 | lpfc_els_disc_plogi(vport); | ||
808 | |||
809 | if (!vport->num_disc_nodes) { | ||
810 | spin_lock_irq(shost->host_lock); | ||
811 | vport->fc_flag &= ~FC_NDISC_ACTIVE; | ||
812 | spin_unlock_irq(shost->host_lock); | ||
813 | } | ||
814 | |||
815 | vport->port_state = LPFC_VPORT_READY; | ||
816 | |||
817 | out: | 805 | out: |
818 | /* Device Discovery completes */ | 806 | /* Device Discovery completes */ |
819 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 807 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, |
@@ -1133,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1133 | if (la->attType == AT_LINK_UP) { | 1121 | if (la->attType == AT_LINK_UP) { |
1134 | phba->fc_stat.LinkUp++; | 1122 | phba->fc_stat.LinkUp++; |
1135 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 1123 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1136 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 1124 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
1137 | "1306 Link Up Event in loop back mode " | 1125 | "1306 Link Up Event in loop back mode " |
1138 | "x%x received Data: x%x x%x x%x x%x\n", | 1126 | "x%x received Data: x%x x%x x%x x%x\n", |
1139 | la->eventTag, phba->fc_eventTag, | 1127 | la->eventTag, phba->fc_eventTag, |
@@ -1150,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1150 | lpfc_mbx_process_link_up(phba, la); | 1138 | lpfc_mbx_process_link_up(phba, la); |
1151 | } else { | 1139 | } else { |
1152 | phba->fc_stat.LinkDown++; | 1140 | phba->fc_stat.LinkDown++; |
1153 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 1141 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
1142 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1143 | "1308 Link Down Event in loop back mode " | ||
1144 | "x%x received " | ||
1145 | "Data: x%x x%x x%x\n", | ||
1146 | la->eventTag, phba->fc_eventTag, | ||
1147 | phba->pport->port_state, vport->fc_flag); | ||
1148 | } | ||
1149 | else { | ||
1150 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | ||
1154 | "1305 Link Down Event x%x received " | 1151 | "1305 Link Down Event x%x received " |
1155 | "Data: x%x x%x x%x\n", | 1152 | "Data: x%x x%x x%x\n", |
1156 | la->eventTag, phba->fc_eventTag, | 1153 | la->eventTag, phba->fc_eventTag, |
1157 | phba->pport->port_state, vport->fc_flag); | 1154 | phba->pport->port_state, vport->fc_flag); |
1155 | } | ||
1158 | lpfc_mbx_issue_link_down(phba); | 1156 | lpfc_mbx_issue_link_down(phba); |
1159 | } | 1157 | } |
1160 | 1158 | ||
@@ -1305,7 +1303,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1305 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1303 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1306 | kfree(mp); | 1304 | kfree(mp); |
1307 | mempool_free(pmb, phba->mbox_mem_pool); | 1305 | mempool_free(pmb, phba->mbox_mem_pool); |
1308 | lpfc_nlp_put(ndlp); | ||
1309 | 1306 | ||
1310 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 1307 | if (phba->fc_topology == TOPOLOGY_LOOP) { |
1311 | /* FLOGI failed, use loop map to make discovery list */ | 1308 | /* FLOGI failed, use loop map to make discovery list */ |
@@ -1313,6 +1310,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1313 | 1310 | ||
1314 | /* Start discovery */ | 1311 | /* Start discovery */ |
1315 | lpfc_disc_start(vport); | 1312 | lpfc_disc_start(vport); |
1313 | /* Decrement the reference count to ndlp after the | ||
1314 | * reference to the ndlp are done. | ||
1315 | */ | ||
1316 | lpfc_nlp_put(ndlp); | ||
1316 | return; | 1317 | return; |
1317 | } | 1318 | } |
1318 | 1319 | ||
@@ -1320,6 +1321,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1320 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, | 1321 | lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, |
1321 | "0258 Register Fabric login error: 0x%x\n", | 1322 | "0258 Register Fabric login error: 0x%x\n", |
1322 | mb->mbxStatus); | 1323 | mb->mbxStatus); |
1324 | /* Decrement the reference count to ndlp after the reference | ||
1325 | * to the ndlp are done. | ||
1326 | */ | ||
1327 | lpfc_nlp_put(ndlp); | ||
1323 | return; | 1328 | return; |
1324 | } | 1329 | } |
1325 | 1330 | ||
@@ -1327,8 +1332,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1327 | ndlp->nlp_type |= NLP_FABRIC; | 1332 | ndlp->nlp_type |= NLP_FABRIC; |
1328 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 1333 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
1329 | 1334 | ||
1330 | lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ | ||
1331 | |||
1332 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { | 1335 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
1333 | vports = lpfc_create_vport_work_array(phba); | 1336 | vports = lpfc_create_vport_work_array(phba); |
1334 | if (vports != NULL) | 1337 | if (vports != NULL) |
@@ -1356,6 +1359,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1356 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1359 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
1357 | kfree(mp); | 1360 | kfree(mp); |
1358 | mempool_free(pmb, phba->mbox_mem_pool); | 1361 | mempool_free(pmb, phba->mbox_mem_pool); |
1362 | |||
1363 | /* Drop the reference count from the mbox at the end after | ||
1364 | * all the current reference to the ndlp have been done. | ||
1365 | */ | ||
1366 | lpfc_nlp_put(ndlp); | ||
1359 | return; | 1367 | return; |
1360 | } | 1368 | } |
1361 | 1369 | ||
@@ -1463,9 +1471,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1463 | * registered the port. | 1471 | * registered the port. |
1464 | */ | 1472 | */ |
1465 | if (ndlp->rport && ndlp->rport->dd_data && | 1473 | if (ndlp->rport && ndlp->rport->dd_data && |
1466 | ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { | 1474 | ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) |
1467 | lpfc_nlp_put(ndlp); | 1475 | lpfc_nlp_put(ndlp); |
1468 | } | ||
1469 | 1476 | ||
1470 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, | 1477 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, |
1471 | "rport add: did:x%x flg:x%x type x%x", | 1478 | "rport add: did:x%x flg:x%x type x%x", |
@@ -1660,6 +1667,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1660 | } | 1667 | } |
1661 | 1668 | ||
1662 | void | 1669 | void |
1670 | lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | ||
1671 | { | ||
1672 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
1673 | |||
1674 | if (list_empty(&ndlp->nlp_listp)) { | ||
1675 | spin_lock_irq(shost->host_lock); | ||
1676 | list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); | ||
1677 | spin_unlock_irq(shost->host_lock); | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | void | ||
1663 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 1682 | lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1664 | { | 1683 | { |
1665 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 1684 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
@@ -1672,7 +1691,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1672 | list_del_init(&ndlp->nlp_listp); | 1691 | list_del_init(&ndlp->nlp_listp); |
1673 | spin_unlock_irq(shost->host_lock); | 1692 | spin_unlock_irq(shost->host_lock); |
1674 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | 1693 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, |
1675 | NLP_STE_UNUSED_NODE); | 1694 | NLP_STE_UNUSED_NODE); |
1695 | } | ||
1696 | |||
1697 | static void | ||
1698 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | ||
1699 | { | ||
1700 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | ||
1701 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | ||
1702 | if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) | ||
1703 | lpfc_nlp_counters(vport, ndlp->nlp_state, -1); | ||
1704 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | ||
1705 | NLP_STE_UNUSED_NODE); | ||
1706 | } | ||
1707 | |||
1708 | struct lpfc_nodelist * | ||
1709 | lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | ||
1710 | int state) | ||
1711 | { | ||
1712 | struct lpfc_hba *phba = vport->phba; | ||
1713 | uint32_t did; | ||
1714 | unsigned long flags; | ||
1715 | |||
1716 | if (!ndlp) | ||
1717 | return NULL; | ||
1718 | |||
1719 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
1720 | /* The ndlp should not be in memory free mode */ | ||
1721 | if (NLP_CHK_FREE_REQ(ndlp)) { | ||
1722 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1723 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
1724 | "0277 lpfc_enable_node: ndlp:x%p " | ||
1725 | "usgmap:x%x refcnt:%d\n", | ||
1726 | (void *)ndlp, ndlp->nlp_usg_map, | ||
1727 | atomic_read(&ndlp->kref.refcount)); | ||
1728 | return NULL; | ||
1729 | } | ||
1730 | /* The ndlp should not already be in active mode */ | ||
1731 | if (NLP_CHK_NODE_ACT(ndlp)) { | ||
1732 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1733 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
1734 | "0278 lpfc_enable_node: ndlp:x%p " | ||
1735 | "usgmap:x%x refcnt:%d\n", | ||
1736 | (void *)ndlp, ndlp->nlp_usg_map, | ||
1737 | atomic_read(&ndlp->kref.refcount)); | ||
1738 | return NULL; | ||
1739 | } | ||
1740 | |||
1741 | /* Keep the original DID */ | ||
1742 | did = ndlp->nlp_DID; | ||
1743 | |||
1744 | /* re-initialize ndlp except of ndlp linked list pointer */ | ||
1745 | memset((((char *)ndlp) + sizeof (struct list_head)), 0, | ||
1746 | sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); | ||
1747 | INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | ||
1748 | INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | ||
1749 | init_timer(&ndlp->nlp_delayfunc); | ||
1750 | ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | ||
1751 | ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | ||
1752 | ndlp->nlp_DID = did; | ||
1753 | ndlp->vport = vport; | ||
1754 | ndlp->nlp_sid = NLP_NO_SID; | ||
1755 | /* ndlp management re-initialize */ | ||
1756 | kref_init(&ndlp->kref); | ||
1757 | NLP_INT_NODE_ACT(ndlp); | ||
1758 | |||
1759 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
1760 | |||
1761 | if (state != NLP_STE_UNUSED_NODE) | ||
1762 | lpfc_nlp_set_state(vport, ndlp, state); | ||
1763 | |||
1764 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | ||
1765 | "node enable: did:x%x", | ||
1766 | ndlp->nlp_DID, 0, 0); | ||
1767 | return ndlp; | ||
1676 | } | 1768 | } |
1677 | 1769 | ||
1678 | void | 1770 | void |
@@ -1972,7 +2064,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1972 | "Data: x%x x%x x%x\n", | 2064 | "Data: x%x x%x x%x\n", |
1973 | ndlp->nlp_DID, ndlp->nlp_flag, | 2065 | ndlp->nlp_DID, ndlp->nlp_flag, |
1974 | ndlp->nlp_state, ndlp->nlp_rpi); | 2066 | ndlp->nlp_state, ndlp->nlp_rpi); |
1975 | lpfc_dequeue_node(vport, ndlp); | 2067 | if (NLP_CHK_FREE_REQ(ndlp)) { |
2068 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
2069 | "0280 lpfc_cleanup_node: ndlp:x%p " | ||
2070 | "usgmap:x%x refcnt:%d\n", | ||
2071 | (void *)ndlp, ndlp->nlp_usg_map, | ||
2072 | atomic_read(&ndlp->kref.refcount)); | ||
2073 | lpfc_dequeue_node(vport, ndlp); | ||
2074 | } else { | ||
2075 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, | ||
2076 | "0281 lpfc_cleanup_node: ndlp:x%p " | ||
2077 | "usgmap:x%x refcnt:%d\n", | ||
2078 | (void *)ndlp, ndlp->nlp_usg_map, | ||
2079 | atomic_read(&ndlp->kref.refcount)); | ||
2080 | lpfc_disable_node(vport, ndlp); | ||
2081 | } | ||
1976 | 2082 | ||
1977 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ | 2083 | /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ |
1978 | if ((mb = phba->sli.mbox_active)) { | 2084 | if ((mb = phba->sli.mbox_active)) { |
@@ -1994,12 +2100,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1994 | } | 2100 | } |
1995 | list_del(&mb->list); | 2101 | list_del(&mb->list); |
1996 | mempool_free(mb, phba->mbox_mem_pool); | 2102 | mempool_free(mb, phba->mbox_mem_pool); |
1997 | lpfc_nlp_put(ndlp); | 2103 | /* We shall not invoke the lpfc_nlp_put to decrement |
2104 | * the ndlp reference count as we are in the process | ||
2105 | * of lpfc_nlp_release. | ||
2106 | */ | ||
1998 | } | 2107 | } |
1999 | } | 2108 | } |
2000 | spin_unlock_irq(&phba->hbalock); | 2109 | spin_unlock_irq(&phba->hbalock); |
2001 | 2110 | ||
2002 | lpfc_els_abort(phba,ndlp); | 2111 | lpfc_els_abort(phba, ndlp); |
2112 | |||
2003 | spin_lock_irq(shost->host_lock); | 2113 | spin_lock_irq(shost->host_lock); |
2004 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; | 2114 | ndlp->nlp_flag &= ~NLP_DELAY_TMO; |
2005 | spin_unlock_irq(shost->host_lock); | 2115 | spin_unlock_irq(shost->host_lock); |
@@ -2057,7 +2167,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2057 | } | 2167 | } |
2058 | } | 2168 | } |
2059 | } | 2169 | } |
2060 | |||
2061 | lpfc_cleanup_node(vport, ndlp); | 2170 | lpfc_cleanup_node(vport, ndlp); |
2062 | 2171 | ||
2063 | /* | 2172 | /* |
@@ -2182,7 +2291,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) | |||
2182 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 2291 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
2183 | spin_unlock_irq(shost->host_lock); | 2292 | spin_unlock_irq(shost->host_lock); |
2184 | return ndlp; | 2293 | return ndlp; |
2294 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
2295 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); | ||
2296 | if (!ndlp) | ||
2297 | return NULL; | ||
2298 | spin_lock_irq(shost->host_lock); | ||
2299 | ndlp->nlp_flag |= NLP_NPR_2B_DISC; | ||
2300 | spin_unlock_irq(shost->host_lock); | ||
2301 | return ndlp; | ||
2185 | } | 2302 | } |
2303 | |||
2186 | if (vport->fc_flag & FC_RSCN_MODE) { | 2304 | if (vport->fc_flag & FC_RSCN_MODE) { |
2187 | if (lpfc_rscn_payload_check(vport, did)) { | 2305 | if (lpfc_rscn_payload_check(vport, did)) { |
2188 | /* If we've already recieved a PLOGI from this NPort | 2306 | /* If we've already recieved a PLOGI from this NPort |
@@ -2363,6 +2481,7 @@ lpfc_disc_start(struct lpfc_vport *vport) | |||
2363 | * continue discovery. | 2481 | * continue discovery. |
2364 | */ | 2482 | */ |
2365 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2483 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
2484 | !(vport->fc_flag & FC_PT2PT) && | ||
2366 | !(vport->fc_flag & FC_RSCN_MODE)) { | 2485 | !(vport->fc_flag & FC_RSCN_MODE)) { |
2367 | lpfc_issue_reg_vpi(phba, vport); | 2486 | lpfc_issue_reg_vpi(phba, vport); |
2368 | return; | 2487 | return; |
@@ -2485,6 +2604,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) | |||
2485 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { | 2604 | if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { |
2486 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 2605 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
2487 | nlp_listp) { | 2606 | nlp_listp) { |
2607 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2608 | continue; | ||
2488 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || | 2609 | if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || |
2489 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { | 2610 | ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { |
2490 | lpfc_free_tx(phba, ndlp); | 2611 | lpfc_free_tx(phba, ndlp); |
@@ -2572,6 +2693,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) | |||
2572 | /* Start discovery by sending FLOGI, clean up old rpis */ | 2693 | /* Start discovery by sending FLOGI, clean up old rpis */ |
2573 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, | 2694 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, |
2574 | nlp_listp) { | 2695 | nlp_listp) { |
2696 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
2697 | continue; | ||
2575 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) | 2698 | if (ndlp->nlp_state != NLP_STE_NPR_NODE) |
2576 | continue; | 2699 | continue; |
2577 | if (ndlp->nlp_type & NLP_FABRIC) { | 2700 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -2618,7 +2741,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) | |||
2618 | "NameServer login\n"); | 2741 | "NameServer login\n"); |
2619 | /* Next look for NameServer ndlp */ | 2742 | /* Next look for NameServer ndlp */ |
2620 | ndlp = lpfc_findnode_did(vport, NameServer_DID); | 2743 | ndlp = lpfc_findnode_did(vport, NameServer_DID); |
2621 | if (ndlp) | 2744 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) |
2622 | lpfc_els_abort(phba, ndlp); | 2745 | lpfc_els_abort(phba, ndlp); |
2623 | 2746 | ||
2624 | /* ReStart discovery */ | 2747 | /* ReStart discovery */ |
@@ -2897,6 +3020,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2897 | ndlp->nlp_sid = NLP_NO_SID; | 3020 | ndlp->nlp_sid = NLP_NO_SID; |
2898 | INIT_LIST_HEAD(&ndlp->nlp_listp); | 3021 | INIT_LIST_HEAD(&ndlp->nlp_listp); |
2899 | kref_init(&ndlp->kref); | 3022 | kref_init(&ndlp->kref); |
3023 | NLP_INT_NODE_ACT(ndlp); | ||
2900 | 3024 | ||
2901 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | 3025 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, |
2902 | "node init: did:x%x", | 3026 | "node init: did:x%x", |
@@ -2911,6 +3035,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2911 | static void | 3035 | static void |
2912 | lpfc_nlp_release(struct kref *kref) | 3036 | lpfc_nlp_release(struct kref *kref) |
2913 | { | 3037 | { |
3038 | struct lpfc_hba *phba; | ||
3039 | unsigned long flags; | ||
2914 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, | 3040 | struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, |
2915 | kref); | 3041 | kref); |
2916 | 3042 | ||
@@ -2918,8 +3044,24 @@ lpfc_nlp_release(struct kref *kref) | |||
2918 | "node release: did:x%x flg:x%x type:x%x", | 3044 | "node release: did:x%x flg:x%x type:x%x", |
2919 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); | 3045 | ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); |
2920 | 3046 | ||
3047 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, | ||
3048 | "0279 lpfc_nlp_release: ndlp:x%p " | ||
3049 | "usgmap:x%x refcnt:%d\n", | ||
3050 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3051 | atomic_read(&ndlp->kref.refcount)); | ||
3052 | |||
3053 | /* remove ndlp from action. */ | ||
2921 | lpfc_nlp_remove(ndlp->vport, ndlp); | 3054 | lpfc_nlp_remove(ndlp->vport, ndlp); |
2922 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | 3055 | |
3056 | /* clear the ndlp active flag for all release cases */ | ||
3057 | phba = ndlp->vport->phba; | ||
3058 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3059 | NLP_CLR_NODE_ACT(ndlp); | ||
3060 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3061 | |||
3062 | /* free ndlp memory for final ndlp release */ | ||
3063 | if (NLP_CHK_FREE_REQ(ndlp)) | ||
3064 | mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); | ||
2923 | } | 3065 | } |
2924 | 3066 | ||
2925 | /* This routine bumps the reference count for a ndlp structure to ensure | 3067 | /* This routine bumps the reference count for a ndlp structure to ensure |
@@ -2929,37 +3071,108 @@ lpfc_nlp_release(struct kref *kref) | |||
2929 | struct lpfc_nodelist * | 3071 | struct lpfc_nodelist * |
2930 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) | 3072 | lpfc_nlp_get(struct lpfc_nodelist *ndlp) |
2931 | { | 3073 | { |
3074 | struct lpfc_hba *phba; | ||
3075 | unsigned long flags; | ||
3076 | |||
2932 | if (ndlp) { | 3077 | if (ndlp) { |
2933 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | 3078 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, |
2934 | "node get: did:x%x flg:x%x refcnt:x%x", | 3079 | "node get: did:x%x flg:x%x refcnt:x%x", |
2935 | ndlp->nlp_DID, ndlp->nlp_flag, | 3080 | ndlp->nlp_DID, ndlp->nlp_flag, |
2936 | atomic_read(&ndlp->kref.refcount)); | 3081 | atomic_read(&ndlp->kref.refcount)); |
2937 | kref_get(&ndlp->kref); | 3082 | /* The check of ndlp usage to prevent incrementing the |
3083 | * ndlp reference count that is in the process of being | ||
3084 | * released. | ||
3085 | */ | ||
3086 | phba = ndlp->vport->phba; | ||
3087 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3088 | if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { | ||
3089 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3090 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3091 | "0276 lpfc_nlp_get: ndlp:x%p " | ||
3092 | "usgmap:x%x refcnt:%d\n", | ||
3093 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3094 | atomic_read(&ndlp->kref.refcount)); | ||
3095 | return NULL; | ||
3096 | } else | ||
3097 | kref_get(&ndlp->kref); | ||
3098 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
2938 | } | 3099 | } |
2939 | return ndlp; | 3100 | return ndlp; |
2940 | } | 3101 | } |
2941 | 3102 | ||
2942 | |||
2943 | /* This routine decrements the reference count for a ndlp structure. If the | 3103 | /* This routine decrements the reference count for a ndlp structure. If the |
2944 | * count goes to 0, this indicates the the associated nodelist should be freed. | 3104 | * count goes to 0, this indicates the the associated nodelist should be |
3105 | * freed. Returning 1 indicates the ndlp resource has been released; on the | ||
3106 | * other hand, returning 0 indicates the ndlp resource has not been released | ||
3107 | * yet. | ||
2945 | */ | 3108 | */ |
2946 | int | 3109 | int |
2947 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) | 3110 | lpfc_nlp_put(struct lpfc_nodelist *ndlp) |
2948 | { | 3111 | { |
2949 | if (ndlp) { | 3112 | struct lpfc_hba *phba; |
2950 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | 3113 | unsigned long flags; |
2951 | "node put: did:x%x flg:x%x refcnt:x%x", | 3114 | |
2952 | ndlp->nlp_DID, ndlp->nlp_flag, | 3115 | if (!ndlp) |
2953 | atomic_read(&ndlp->kref.refcount)); | 3116 | return 1; |
3117 | |||
3118 | lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, | ||
3119 | "node put: did:x%x flg:x%x refcnt:x%x", | ||
3120 | ndlp->nlp_DID, ndlp->nlp_flag, | ||
3121 | atomic_read(&ndlp->kref.refcount)); | ||
3122 | phba = ndlp->vport->phba; | ||
3123 | spin_lock_irqsave(&phba->ndlp_lock, flags); | ||
3124 | /* Check the ndlp memory free acknowledge flag to avoid the | ||
3125 | * possible race condition that kref_put got invoked again | ||
3126 | * after previous one has done ndlp memory free. | ||
3127 | */ | ||
3128 | if (NLP_CHK_FREE_ACK(ndlp)) { | ||
3129 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3130 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3131 | "0274 lpfc_nlp_put: ndlp:x%p " | ||
3132 | "usgmap:x%x refcnt:%d\n", | ||
3133 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3134 | atomic_read(&ndlp->kref.refcount)); | ||
3135 | return 1; | ||
2954 | } | 3136 | } |
2955 | return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; | 3137 | /* Check the ndlp inactivate log flag to avoid the possible |
3138 | * race condition that kref_put got invoked again after ndlp | ||
3139 | * is already in inactivating state. | ||
3140 | */ | ||
3141 | if (NLP_CHK_IACT_REQ(ndlp)) { | ||
3142 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3143 | lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, | ||
3144 | "0275 lpfc_nlp_put: ndlp:x%p " | ||
3145 | "usgmap:x%x refcnt:%d\n", | ||
3146 | (void *)ndlp, ndlp->nlp_usg_map, | ||
3147 | atomic_read(&ndlp->kref.refcount)); | ||
3148 | return 1; | ||
3149 | } | ||
3150 | /* For last put, mark the ndlp usage flags to make sure no | ||
3151 | * other kref_get and kref_put on the same ndlp shall get | ||
3152 | * in between the process when the final kref_put has been | ||
3153 | * invoked on this ndlp. | ||
3154 | */ | ||
3155 | if (atomic_read(&ndlp->kref.refcount) == 1) { | ||
3156 | /* Indicate ndlp is put to inactive state. */ | ||
3157 | NLP_SET_IACT_REQ(ndlp); | ||
3158 | /* Acknowledge ndlp memory free has been seen. */ | ||
3159 | if (NLP_CHK_FREE_REQ(ndlp)) | ||
3160 | NLP_SET_FREE_ACK(ndlp); | ||
3161 | } | ||
3162 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | ||
3163 | /* Note, the kref_put returns 1 when decrementing a reference | ||
3164 | * count that was 1, it invokes the release callback function, | ||
3165 | * but it still left the reference count as 1 (not actually | ||
3166 | * performs the last decrementation). Otherwise, it actually | ||
3167 | * decrements the reference count and returns 0. | ||
3168 | */ | ||
3169 | return kref_put(&ndlp->kref, lpfc_nlp_release); | ||
2956 | } | 3170 | } |
2957 | 3171 | ||
2958 | /* This routine free's the specified nodelist if it is not in use | 3172 | /* This routine free's the specified nodelist if it is not in use |
2959 | * by any other discovery thread. This routine returns 1 if the ndlp | 3173 | * by any other discovery thread. This routine returns 1 if the |
2960 | * is not being used by anyone and has been freed. A return value of | 3174 | * ndlp has been freed. A return value of 0 indicates the ndlp is |
2961 | * 0 indicates it is being used by another discovery thread and the | 3175 | * not yet been released. |
2962 | * refcount is left unchanged. | ||
2963 | */ | 3176 | */ |
2964 | int | 3177 | int |
2965 | lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) | 3178 | lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) |
@@ -2968,11 +3181,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) | |||
2968 | "node not used: did:x%x flg:x%x refcnt:x%x", | 3181 | "node not used: did:x%x flg:x%x refcnt:x%x", |
2969 | ndlp->nlp_DID, ndlp->nlp_flag, | 3182 | ndlp->nlp_DID, ndlp->nlp_flag, |
2970 | atomic_read(&ndlp->kref.refcount)); | 3183 | atomic_read(&ndlp->kref.refcount)); |
2971 | 3184 | if (atomic_read(&ndlp->kref.refcount) == 1) | |
2972 | if (atomic_read(&ndlp->kref.refcount) == 1) { | 3185 | if (lpfc_nlp_put(ndlp)) |
2973 | lpfc_nlp_put(ndlp); | 3186 | return 1; |
2974 | return 1; | ||
2975 | } | ||
2976 | return 0; | 3187 | return 0; |
2977 | } | 3188 | } |
2978 | |||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 041f83e7634a..7773b949aa7c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -581,6 +581,7 @@ struct ls_rjt { /* Structure is in Big Endian format */ | |||
581 | #define LSEXP_INVALID_O_SID 0x15 | 581 | #define LSEXP_INVALID_O_SID 0x15 |
582 | #define LSEXP_INVALID_OX_RX 0x17 | 582 | #define LSEXP_INVALID_OX_RX 0x17 |
583 | #define LSEXP_CMD_IN_PROGRESS 0x19 | 583 | #define LSEXP_CMD_IN_PROGRESS 0x19 |
584 | #define LSEXP_PORT_LOGIN_REQ 0x1E | ||
584 | #define LSEXP_INVALID_NPORT_ID 0x1F | 585 | #define LSEXP_INVALID_NPORT_ID 0x1F |
585 | #define LSEXP_INVALID_SEQ_ID 0x21 | 586 | #define LSEXP_INVALID_SEQ_ID 0x21 |
586 | #define LSEXP_INVALID_XCHG 0x23 | 587 | #define LSEXP_INVALID_XCHG 0x23 |
@@ -1376,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */ | |||
1376 | #define CMD_QUE_XRI64_CX 0xB3 | 1377 | #define CMD_QUE_XRI64_CX 0xB3 |
1377 | #define CMD_IOCB_RCV_SEQ64_CX 0xB5 | 1378 | #define CMD_IOCB_RCV_SEQ64_CX 0xB5 |
1378 | #define CMD_IOCB_RCV_ELS64_CX 0xB7 | 1379 | #define CMD_IOCB_RCV_ELS64_CX 0xB7 |
1380 | #define CMD_IOCB_RET_XRI64_CX 0xB9 | ||
1379 | #define CMD_IOCB_RCV_CONT64_CX 0xBB | 1381 | #define CMD_IOCB_RCV_CONT64_CX 0xBB |
1380 | 1382 | ||
1381 | #define CMD_GEN_REQUEST64_CR 0xC2 | 1383 | #define CMD_GEN_REQUEST64_CR 0xC2 |
1382 | #define CMD_GEN_REQUEST64_CX 0xC3 | 1384 | #define CMD_GEN_REQUEST64_CX 0xC3 |
1383 | 1385 | ||
1386 | /* Unhandled SLI-3 Commands */ | ||
1387 | #define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 | ||
1388 | #define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 | ||
1389 | #define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 | ||
1390 | #define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD | ||
1391 | #define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 | ||
1392 | #define CMD_IOCB_ABORT_EXTENDED_CN 0xBA | ||
1393 | #define CMD_IOCB_RET_HBQE64_CN 0xCA | ||
1394 | #define CMD_IOCB_FCP_IBIDIR64_CR 0xAC | ||
1395 | #define CMD_IOCB_FCP_IBIDIR64_CX 0xAD | ||
1396 | #define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF | ||
1397 | #define CMD_IOCB_LOGENTRY_CN 0x94 | ||
1398 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 | ||
1399 | |||
1384 | #define CMD_MAX_IOCB_CMD 0xE6 | 1400 | #define CMD_MAX_IOCB_CMD 0xE6 |
1385 | #define CMD_IOCB_MASK 0xff | 1401 | #define CMD_IOCB_MASK 0xff |
1386 | 1402 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6cfeba7454d4..22843751c2ca 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -461,11 +461,21 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
461 | int | 461 | int |
462 | lpfc_hba_down_prep(struct lpfc_hba *phba) | 462 | lpfc_hba_down_prep(struct lpfc_hba *phba) |
463 | { | 463 | { |
464 | struct lpfc_vport **vports; | ||
465 | int i; | ||
464 | /* Disable interrupts */ | 466 | /* Disable interrupts */ |
465 | writel(0, phba->HCregaddr); | 467 | writel(0, phba->HCregaddr); |
466 | readl(phba->HCregaddr); /* flush */ | 468 | readl(phba->HCregaddr); /* flush */ |
467 | 469 | ||
468 | lpfc_cleanup_discovery_resources(phba->pport); | 470 | if (phba->pport->load_flag & FC_UNLOADING) |
471 | lpfc_cleanup_discovery_resources(phba->pport); | ||
472 | else { | ||
473 | vports = lpfc_create_vport_work_array(phba); | ||
474 | if (vports != NULL) | ||
475 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | ||
476 | lpfc_cleanup_discovery_resources(vports[i]); | ||
477 | lpfc_destroy_vport_work_array(phba, vports); | ||
478 | } | ||
469 | return 0; | 479 | return 0; |
470 | } | 480 | } |
471 | 481 | ||
@@ -1422,9 +1432,32 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1422 | lpfc_port_link_failure(vport); | 1432 | lpfc_port_link_failure(vport); |
1423 | 1433 | ||
1424 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 1434 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
1435 | if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
1436 | ndlp = lpfc_enable_node(vport, ndlp, | ||
1437 | NLP_STE_UNUSED_NODE); | ||
1438 | if (!ndlp) | ||
1439 | continue; | ||
1440 | spin_lock_irq(&phba->ndlp_lock); | ||
1441 | NLP_SET_FREE_REQ(ndlp); | ||
1442 | spin_unlock_irq(&phba->ndlp_lock); | ||
1443 | /* Trigger the release of the ndlp memory */ | ||
1444 | lpfc_nlp_put(ndlp); | ||
1445 | continue; | ||
1446 | } | ||
1447 | spin_lock_irq(&phba->ndlp_lock); | ||
1448 | if (NLP_CHK_FREE_REQ(ndlp)) { | ||
1449 | /* The ndlp should not be in memory free mode already */ | ||
1450 | spin_unlock_irq(&phba->ndlp_lock); | ||
1451 | continue; | ||
1452 | } else | ||
1453 | /* Indicate request for freeing ndlp memory */ | ||
1454 | NLP_SET_FREE_REQ(ndlp); | ||
1455 | spin_unlock_irq(&phba->ndlp_lock); | ||
1456 | |||
1425 | if (ndlp->nlp_type & NLP_FABRIC) | 1457 | if (ndlp->nlp_type & NLP_FABRIC) |
1426 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1458 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1427 | NLP_EVT_DEVICE_RECOVERY); | 1459 | NLP_EVT_DEVICE_RECOVERY); |
1460 | |||
1428 | lpfc_disc_state_machine(vport, ndlp, NULL, | 1461 | lpfc_disc_state_machine(vport, ndlp, NULL, |
1429 | NLP_EVT_DEVICE_RM); | 1462 | NLP_EVT_DEVICE_RM); |
1430 | } | 1463 | } |
@@ -1438,6 +1471,17 @@ lpfc_cleanup(struct lpfc_vport *vport) | |||
1438 | if (i++ > 3000) { | 1471 | if (i++ > 3000) { |
1439 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 1472 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
1440 | "0233 Nodelist not empty\n"); | 1473 | "0233 Nodelist not empty\n"); |
1474 | list_for_each_entry_safe(ndlp, next_ndlp, | ||
1475 | &vport->fc_nodes, nlp_listp) { | ||
1476 | lpfc_printf_vlog(ndlp->vport, KERN_ERR, | ||
1477 | LOG_NODE, | ||
1478 | "0282: did:x%x ndlp:x%p " | ||
1479 | "usgmap:x%x refcnt:%d\n", | ||
1480 | ndlp->nlp_DID, (void *)ndlp, | ||
1481 | ndlp->nlp_usg_map, | ||
1482 | atomic_read( | ||
1483 | &ndlp->kref.refcount)); | ||
1484 | } | ||
1441 | break; | 1485 | break; |
1442 | } | 1486 | } |
1443 | 1487 | ||
@@ -1586,6 +1630,8 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
1586 | list_for_each_entry_safe(ndlp, next_ndlp, | 1630 | list_for_each_entry_safe(ndlp, next_ndlp, |
1587 | &vports[i]->fc_nodes, | 1631 | &vports[i]->fc_nodes, |
1588 | nlp_listp) { | 1632 | nlp_listp) { |
1633 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
1634 | continue; | ||
1589 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 1635 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
1590 | continue; | 1636 | continue; |
1591 | if (ndlp->nlp_type & NLP_FABRIC) { | 1637 | if (ndlp->nlp_type & NLP_FABRIC) { |
@@ -1695,9 +1741,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
1695 | 1741 | ||
1696 | vport = (struct lpfc_vport *) shost->hostdata; | 1742 | vport = (struct lpfc_vport *) shost->hostdata; |
1697 | vport->phba = phba; | 1743 | vport->phba = phba; |
1698 | |||
1699 | vport->load_flag |= FC_LOADING; | 1744 | vport->load_flag |= FC_LOADING; |
1700 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 1745 | vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; |
1746 | vport->fc_rscn_flush = 0; | ||
1701 | 1747 | ||
1702 | lpfc_get_vport_cfgparam(vport); | 1748 | lpfc_get_vport_cfgparam(vport); |
1703 | shost->unique_id = instance; | 1749 | shost->unique_id = instance; |
@@ -1879,6 +1925,42 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) | |||
1879 | spin_unlock_irq(shost->host_lock); | 1925 | spin_unlock_irq(shost->host_lock); |
1880 | } | 1926 | } |
1881 | 1927 | ||
1928 | static int | ||
1929 | lpfc_enable_msix(struct lpfc_hba *phba) | ||
1930 | { | ||
1931 | int error; | ||
1932 | |||
1933 | phba->msix_entries[0].entry = 0; | ||
1934 | phba->msix_entries[0].vector = 0; | ||
1935 | |||
1936 | error = pci_enable_msix(phba->pcidev, phba->msix_entries, | ||
1937 | ARRAY_SIZE(phba->msix_entries)); | ||
1938 | if (error) { | ||
1939 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
1940 | "0420 Enable MSI-X failed (%d), continuing " | ||
1941 | "with MSI\n", error); | ||
1942 | pci_disable_msix(phba->pcidev); | ||
1943 | return error; | ||
1944 | } | ||
1945 | |||
1946 | error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, | ||
1947 | LPFC_DRIVER_NAME, phba); | ||
1948 | if (error) { | ||
1949 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1950 | "0421 MSI-X request_irq failed (%d), " | ||
1951 | "continuing with MSI\n", error); | ||
1952 | pci_disable_msix(phba->pcidev); | ||
1953 | } | ||
1954 | return error; | ||
1955 | } | ||
1956 | |||
1957 | static void | ||
1958 | lpfc_disable_msix(struct lpfc_hba *phba) | ||
1959 | { | ||
1960 | free_irq(phba->msix_entries[0].vector, phba); | ||
1961 | pci_disable_msix(phba->pcidev); | ||
1962 | } | ||
1963 | |||
1882 | static int __devinit | 1964 | static int __devinit |
1883 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | 1965 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) |
1884 | { | 1966 | { |
@@ -1905,6 +1987,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
1905 | 1987 | ||
1906 | spin_lock_init(&phba->hbalock); | 1988 | spin_lock_init(&phba->hbalock); |
1907 | 1989 | ||
1990 | /* Initialize ndlp management spinlock */ | ||
1991 | spin_lock_init(&phba->ndlp_lock); | ||
1992 | |||
1908 | phba->pcidev = pdev; | 1993 | phba->pcidev = pdev; |
1909 | 1994 | ||
1910 | /* Assign an unused board number */ | 1995 | /* Assign an unused board number */ |
@@ -2002,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2002 | 2087 | ||
2003 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); | 2088 | memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); |
2004 | 2089 | ||
2090 | INIT_LIST_HEAD(&phba->hbqbuf_in_list); | ||
2091 | |||
2005 | /* Initialize the SLI Layer to run with lpfc HBAs. */ | 2092 | /* Initialize the SLI Layer to run with lpfc HBAs. */ |
2006 | lpfc_sli_setup(phba); | 2093 | lpfc_sli_setup(phba); |
2007 | lpfc_sli_queue_setup(phba); | 2094 | lpfc_sli_queue_setup(phba); |
@@ -2077,24 +2164,36 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
2077 | lpfc_debugfs_initialize(vport); | 2164 | lpfc_debugfs_initialize(vport); |
2078 | 2165 | ||
2079 | pci_set_drvdata(pdev, shost); | 2166 | pci_set_drvdata(pdev, shost); |
2167 | phba->intr_type = NONE; | ||
2080 | 2168 | ||
2081 | if (phba->cfg_use_msi) { | 2169 | if (phba->cfg_use_msi == 2) { |
2170 | error = lpfc_enable_msix(phba); | ||
2171 | if (!error) | ||
2172 | phba->intr_type = MSIX; | ||
2173 | } | ||
2174 | |||
2175 | /* Fallback to MSI if MSI-X initialization failed */ | ||
2176 | if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | ||
2082 | retval = pci_enable_msi(phba->pcidev); | 2177 | retval = pci_enable_msi(phba->pcidev); |
2083 | if (!retval) | 2178 | if (!retval) |
2084 | phba->using_msi = 1; | 2179 | phba->intr_type = MSI; |
2085 | else | 2180 | else |
2086 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2181 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
2087 | "0452 Enable MSI failed, continuing " | 2182 | "0452 Enable MSI failed, continuing " |
2088 | "with IRQ\n"); | 2183 | "with IRQ\n"); |
2089 | } | 2184 | } |
2090 | 2185 | ||
2091 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, | 2186 | /* MSI-X is the only case the doesn't need to call request_irq */ |
2092 | LPFC_DRIVER_NAME, phba); | 2187 | if (phba->intr_type != MSIX) { |
2093 | if (retval) { | 2188 | retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, |
2094 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2189 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); |
2095 | "0451 Enable interrupt handler failed\n"); | 2190 | if (retval) { |
2096 | error = retval; | 2191 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " |
2097 | goto out_disable_msi; | 2192 | "interrupt handler failed\n"); |
2193 | error = retval; | ||
2194 | goto out_disable_msi; | ||
2195 | } else if (phba->intr_type != MSI) | ||
2196 | phba->intr_type = INTx; | ||
2098 | } | 2197 | } |
2099 | 2198 | ||
2100 | phba->MBslimaddr = phba->slim_memmap_p; | 2199 | phba->MBslimaddr = phba->slim_memmap_p; |
@@ -2139,9 +2238,14 @@ out_remove_device: | |||
2139 | out_free_irq: | 2238 | out_free_irq: |
2140 | lpfc_stop_phba_timers(phba); | 2239 | lpfc_stop_phba_timers(phba); |
2141 | phba->pport->work_port_events = 0; | 2240 | phba->pport->work_port_events = 0; |
2142 | free_irq(phba->pcidev->irq, phba); | 2241 | |
2242 | if (phba->intr_type == MSIX) | ||
2243 | lpfc_disable_msix(phba); | ||
2244 | else | ||
2245 | free_irq(phba->pcidev->irq, phba); | ||
2246 | |||
2143 | out_disable_msi: | 2247 | out_disable_msi: |
2144 | if (phba->using_msi) | 2248 | if (phba->intr_type == MSI) |
2145 | pci_disable_msi(phba->pcidev); | 2249 | pci_disable_msi(phba->pcidev); |
2146 | destroy_port(vport); | 2250 | destroy_port(vport); |
2147 | out_kthread_stop: | 2251 | out_kthread_stop: |
@@ -2214,10 +2318,13 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
2214 | 2318 | ||
2215 | lpfc_debugfs_terminate(vport); | 2319 | lpfc_debugfs_terminate(vport); |
2216 | 2320 | ||
2217 | /* Release the irq reservation */ | 2321 | if (phba->intr_type == MSIX) |
2218 | free_irq(phba->pcidev->irq, phba); | 2322 | lpfc_disable_msix(phba); |
2219 | if (phba->using_msi) | 2323 | else { |
2220 | pci_disable_msi(phba->pcidev); | 2324 | free_irq(phba->pcidev->irq, phba); |
2325 | if (phba->intr_type == MSI) | ||
2326 | pci_disable_msi(phba->pcidev); | ||
2327 | } | ||
2221 | 2328 | ||
2222 | pci_set_drvdata(pdev, NULL); | 2329 | pci_set_drvdata(pdev, NULL); |
2223 | scsi_host_put(shost); | 2330 | scsi_host_put(shost); |
@@ -2276,10 +2383,13 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
2276 | pring = &psli->ring[psli->fcp_ring]; | 2383 | pring = &psli->ring[psli->fcp_ring]; |
2277 | lpfc_sli_abort_iocb_ring(phba, pring); | 2384 | lpfc_sli_abort_iocb_ring(phba, pring); |
2278 | 2385 | ||
2279 | /* Release the irq reservation */ | 2386 | if (phba->intr_type == MSIX) |
2280 | free_irq(phba->pcidev->irq, phba); | 2387 | lpfc_disable_msix(phba); |
2281 | if (phba->using_msi) | 2388 | else { |
2282 | pci_disable_msi(phba->pcidev); | 2389 | free_irq(phba->pcidev->irq, phba); |
2390 | if (phba->intr_type == MSI) | ||
2391 | pci_disable_msi(phba->pcidev); | ||
2392 | } | ||
2283 | 2393 | ||
2284 | /* Request a slot reset. */ | 2394 | /* Request a slot reset. */ |
2285 | return PCI_ERS_RESULT_NEED_RESET; | 2395 | return PCI_ERS_RESULT_NEED_RESET; |
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index c5841d7565f7..39fd2b843bec 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2005 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -35,11 +35,15 @@ | |||
35 | #define LOG_ALL_MSG 0xffff /* LOG all messages */ | 35 | #define LOG_ALL_MSG 0xffff /* LOG all messages */ |
36 | 36 | ||
37 | #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ | 37 | #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ |
38 | do { \ | ||
38 | { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ | 39 | { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ |
39 | dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ | 40 | dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ |
40 | fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } | 41 | fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ |
42 | } while (0) | ||
41 | 43 | ||
42 | #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ | 44 | #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ |
45 | do { \ | ||
43 | { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ | 46 | { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ |
44 | dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ | 47 | dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ |
45 | fmt, phba->brd_no, ##arg); } | 48 | fmt, phba->brd_no, ##arg); } \ |
49 | } while (0) | ||
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 6dc5ab8d6716..3c0cebc71800 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -264,19 +264,30 @@ void | |||
264 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) | 264 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) |
265 | { | 265 | { |
266 | struct hbq_dmabuf *hbq_entry; | 266 | struct hbq_dmabuf *hbq_entry; |
267 | unsigned long flags; | ||
268 | |||
269 | if (!mp) | ||
270 | return; | ||
267 | 271 | ||
268 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 272 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
273 | /* Check whether HBQ is still in use */ | ||
274 | spin_lock_irqsave(&phba->hbalock, flags); | ||
275 | if (!phba->hbq_in_use) { | ||
276 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
277 | return; | ||
278 | } | ||
269 | hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); | 279 | hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); |
280 | list_del(&hbq_entry->dbuf.list); | ||
270 | if (hbq_entry->tag == -1) { | 281 | if (hbq_entry->tag == -1) { |
271 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | 282 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) |
272 | (phba, hbq_entry); | 283 | (phba, hbq_entry); |
273 | } else { | 284 | } else { |
274 | lpfc_sli_free_hbq(phba, hbq_entry); | 285 | lpfc_sli_free_hbq(phba, hbq_entry); |
275 | } | 286 | } |
287 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
276 | } else { | 288 | } else { |
277 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 289 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
278 | kfree(mp); | 290 | kfree(mp); |
279 | } | 291 | } |
280 | return; | 292 | return; |
281 | } | 293 | } |
282 | |||
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4a0e3406e37a..d513813f6697 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -249,6 +249,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
249 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 249 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
250 | struct lpfc_hba *phba = vport->phba; | 250 | struct lpfc_hba *phba = vport->phba; |
251 | struct lpfc_dmabuf *pcmd; | 251 | struct lpfc_dmabuf *pcmd; |
252 | struct lpfc_work_evt *evtp; | ||
252 | uint32_t *lp; | 253 | uint32_t *lp; |
253 | IOCB_t *icmd; | 254 | IOCB_t *icmd; |
254 | struct serv_parm *sp; | 255 | struct serv_parm *sp; |
@@ -435,8 +436,14 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
435 | del_timer_sync(&ndlp->nlp_delayfunc); | 436 | del_timer_sync(&ndlp->nlp_delayfunc); |
436 | ndlp->nlp_last_elscmd = 0; | 437 | ndlp->nlp_last_elscmd = 0; |
437 | 438 | ||
438 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) | 439 | if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { |
439 | list_del_init(&ndlp->els_retry_evt.evt_listp); | 440 | list_del_init(&ndlp->els_retry_evt.evt_listp); |
441 | /* Decrement ndlp reference count held for the | ||
442 | * delayed retry | ||
443 | */ | ||
444 | evtp = &ndlp->els_retry_evt; | ||
445 | lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); | ||
446 | } | ||
440 | 447 | ||
441 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { | 448 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { |
442 | spin_lock_irq(shost->host_lock); | 449 | spin_lock_irq(shost->host_lock); |
@@ -638,13 +645,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
638 | return 0; | 645 | return 0; |
639 | } | 646 | } |
640 | 647 | ||
641 | /* Check config parameter use-adisc or FCP-2 */ | 648 | if (!(vport->fc_flag & FC_PT2PT)) { |
642 | if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || | 649 | /* Check config parameter use-adisc or FCP-2 */ |
643 | ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { | 650 | if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || |
644 | spin_lock_irq(shost->host_lock); | 651 | ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { |
645 | ndlp->nlp_flag |= NLP_NPR_ADISC; | 652 | spin_lock_irq(shost->host_lock); |
646 | spin_unlock_irq(shost->host_lock); | 653 | ndlp->nlp_flag |= NLP_NPR_ADISC; |
647 | return 1; | 654 | spin_unlock_irq(shost->host_lock); |
655 | return 1; | ||
656 | } | ||
648 | } | 657 | } |
649 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 658 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
650 | lpfc_unreg_rpi(vport, ndlp); | 659 | lpfc_unreg_rpi(vport, ndlp); |
@@ -656,7 +665,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
656 | void *arg, uint32_t evt) | 665 | void *arg, uint32_t evt) |
657 | { | 666 | { |
658 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 667 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
659 | "0253 Illegal State Transition: node x%x " | 668 | "0271 Illegal State Transition: node x%x " |
660 | "event x%x, state x%x Data: x%x x%x\n", | 669 | "event x%x, state x%x Data: x%x x%x\n", |
661 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, | 670 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, |
662 | ndlp->nlp_flag); | 671 | ndlp->nlp_flag); |
@@ -674,7 +683,7 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
674 | */ | 683 | */ |
675 | if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { | 684 | if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { |
676 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 685 | lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, |
677 | "0253 Illegal State Transition: node x%x " | 686 | "0272 Illegal State Transition: node x%x " |
678 | "event x%x, state x%x Data: x%x x%x\n", | 687 | "event x%x, state x%x Data: x%x x%x\n", |
679 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, | 688 | ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, |
680 | ndlp->nlp_flag); | 689 | ndlp->nlp_flag); |
@@ -2144,8 +2153,11 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2144 | uint32_t cur_state, rc; | 2153 | uint32_t cur_state, rc; |
2145 | uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, | 2154 | uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, |
2146 | uint32_t); | 2155 | uint32_t); |
2156 | uint32_t got_ndlp = 0; | ||
2157 | |||
2158 | if (lpfc_nlp_get(ndlp)) | ||
2159 | got_ndlp = 1; | ||
2147 | 2160 | ||
2148 | lpfc_nlp_get(ndlp); | ||
2149 | cur_state = ndlp->nlp_state; | 2161 | cur_state = ndlp->nlp_state; |
2150 | 2162 | ||
2151 | /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ | 2163 | /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ |
@@ -2162,15 +2174,24 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
2162 | rc = (func) (vport, ndlp, arg, evt); | 2174 | rc = (func) (vport, ndlp, arg, evt); |
2163 | 2175 | ||
2164 | /* DSM out state <rc> on NPort <nlp_DID> */ | 2176 | /* DSM out state <rc> on NPort <nlp_DID> */ |
2165 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | 2177 | if (got_ndlp) { |
2178 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
2166 | "0212 DSM out state %d on NPort x%x Data: x%x\n", | 2179 | "0212 DSM out state %d on NPort x%x Data: x%x\n", |
2167 | rc, ndlp->nlp_DID, ndlp->nlp_flag); | 2180 | rc, ndlp->nlp_DID, ndlp->nlp_flag); |
2168 | 2181 | ||
2169 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, | 2182 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, |
2170 | "DSM out: ste:%d did:x%x flg:x%x", | 2183 | "DSM out: ste:%d did:x%x flg:x%x", |
2171 | rc, ndlp->nlp_DID, ndlp->nlp_flag); | 2184 | rc, ndlp->nlp_DID, ndlp->nlp_flag); |
2185 | /* Decrement the ndlp reference count held for this function */ | ||
2186 | lpfc_nlp_put(ndlp); | ||
2187 | } else { | ||
2188 | lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, | ||
2189 | "0212 DSM out state %d on NPort free\n", rc); | ||
2172 | 2190 | ||
2173 | lpfc_nlp_put(ndlp); | 2191 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, |
2192 | "DSM out: ste:%d did:x%x flg:x%x", | ||
2193 | rc, 0, 0); | ||
2194 | } | ||
2174 | 2195 | ||
2175 | return rc; | 2196 | return rc; |
2176 | } | 2197 | } |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index fc5c3a42b05a..70255c11d3ad 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -1283,6 +1283,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
1283 | match = 0; | 1283 | match = 0; |
1284 | spin_lock_irq(shost->host_lock); | 1284 | spin_lock_irq(shost->host_lock); |
1285 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 1285 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
1286 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
1287 | continue; | ||
1286 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && | 1288 | if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && |
1287 | i == ndlp->nlp_sid && | 1289 | i == ndlp->nlp_sid && |
1288 | ndlp->rport) { | 1290 | ndlp->rport) { |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fdd01e384e36..fc0d9501aba6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2007 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
203 | case CMD_IOCB_RCV_SEQ64_CX: | 203 | case CMD_IOCB_RCV_SEQ64_CX: |
204 | case CMD_IOCB_RCV_ELS64_CX: | 204 | case CMD_IOCB_RCV_ELS64_CX: |
205 | case CMD_IOCB_RCV_CONT64_CX: | 205 | case CMD_IOCB_RCV_CONT64_CX: |
206 | case CMD_IOCB_RET_XRI64_CX: | ||
206 | type = LPFC_UNSOL_IOCB; | 207 | type = LPFC_UNSOL_IOCB; |
207 | break; | 208 | break; |
209 | case CMD_IOCB_XMIT_MSEQ64_CR: | ||
210 | case CMD_IOCB_XMIT_MSEQ64_CX: | ||
211 | case CMD_IOCB_RCV_SEQ_LIST64_CX: | ||
212 | case CMD_IOCB_RCV_ELS_LIST64_CX: | ||
213 | case CMD_IOCB_CLOSE_EXTENDED_CN: | ||
214 | case CMD_IOCB_ABORT_EXTENDED_CN: | ||
215 | case CMD_IOCB_RET_HBQE64_CN: | ||
216 | case CMD_IOCB_FCP_IBIDIR64_CR: | ||
217 | case CMD_IOCB_FCP_IBIDIR64_CX: | ||
218 | case CMD_IOCB_FCP_ITASKMGT64_CX: | ||
219 | case CMD_IOCB_LOGENTRY_CN: | ||
220 | case CMD_IOCB_LOGENTRY_ASYNC_CN: | ||
221 | printk("%s - Unhandled SLI-3 Command x%x\n", | ||
222 | __FUNCTION__, iocb_cmnd); | ||
223 | type = LPFC_UNKNOWN_IOCB; | ||
224 | break; | ||
208 | default: | 225 | default: |
209 | type = LPFC_UNKNOWN_IOCB; | 226 | type = LPFC_UNKNOWN_IOCB; |
210 | break; | 227 | break; |
@@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
529 | { | 546 | { |
530 | struct lpfc_dmabuf *dmabuf, *next_dmabuf; | 547 | struct lpfc_dmabuf *dmabuf, *next_dmabuf; |
531 | struct hbq_dmabuf *hbq_buf; | 548 | struct hbq_dmabuf *hbq_buf; |
549 | unsigned long flags; | ||
532 | int i, hbq_count; | 550 | int i, hbq_count; |
551 | uint32_t hbqno; | ||
533 | 552 | ||
534 | hbq_count = lpfc_sli_hbq_count(); | 553 | hbq_count = lpfc_sli_hbq_count(); |
535 | /* Return all memory used by all HBQs */ | 554 | /* Return all memory used by all HBQs */ |
555 | spin_lock_irqsave(&phba->hbalock, flags); | ||
536 | for (i = 0; i < hbq_count; ++i) { | 556 | for (i = 0; i < hbq_count; ++i) { |
537 | list_for_each_entry_safe(dmabuf, next_dmabuf, | 557 | list_for_each_entry_safe(dmabuf, next_dmabuf, |
538 | &phba->hbqs[i].hbq_buffer_list, list) { | 558 | &phba->hbqs[i].hbq_buffer_list, list) { |
@@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) | |||
542 | } | 562 | } |
543 | phba->hbqs[i].buffer_count = 0; | 563 | phba->hbqs[i].buffer_count = 0; |
544 | } | 564 | } |
565 | /* Return all HBQ buffer that are in-fly */ | ||
566 | list_for_each_entry_safe(dmabuf, next_dmabuf, | ||
567 | &phba->hbqbuf_in_list, list) { | ||
568 | hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); | ||
569 | list_del(&hbq_buf->dbuf.list); | ||
570 | if (hbq_buf->tag == -1) { | ||
571 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | ||
572 | (phba, hbq_buf); | ||
573 | } else { | ||
574 | hbqno = hbq_buf->tag >> 16; | ||
575 | if (hbqno >= LPFC_MAX_HBQS) | ||
576 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) | ||
577 | (phba, hbq_buf); | ||
578 | else | ||
579 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, | ||
580 | hbq_buf); | ||
581 | } | ||
582 | } | ||
583 | |||
584 | /* Mark the HBQs not in use */ | ||
585 | phba->hbq_in_use = 0; | ||
586 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
545 | } | 587 | } |
546 | 588 | ||
547 | static struct lpfc_hbq_entry * | 589 | static struct lpfc_hbq_entry * |
@@ -603,30 +645,40 @@ static int | |||
603 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | 645 | lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) |
604 | { | 646 | { |
605 | uint32_t i, start, end; | 647 | uint32_t i, start, end; |
648 | unsigned long flags; | ||
606 | struct hbq_dmabuf *hbq_buffer; | 649 | struct hbq_dmabuf *hbq_buffer; |
607 | 650 | ||
608 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) { | 651 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) |
609 | return 0; | 652 | return 0; |
610 | } | ||
611 | 653 | ||
612 | start = phba->hbqs[hbqno].buffer_count; | 654 | start = phba->hbqs[hbqno].buffer_count; |
613 | end = count + start; | 655 | end = count + start; |
614 | if (end > lpfc_hbq_defs[hbqno]->entry_count) { | 656 | if (end > lpfc_hbq_defs[hbqno]->entry_count) |
615 | end = lpfc_hbq_defs[hbqno]->entry_count; | 657 | end = lpfc_hbq_defs[hbqno]->entry_count; |
616 | } | 658 | |
659 | /* Check whether HBQ is still in use */ | ||
660 | spin_lock_irqsave(&phba->hbalock, flags); | ||
661 | if (!phba->hbq_in_use) | ||
662 | goto out; | ||
617 | 663 | ||
618 | /* Populate HBQ entries */ | 664 | /* Populate HBQ entries */ |
619 | for (i = start; i < end; i++) { | 665 | for (i = start; i < end; i++) { |
620 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 666 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
621 | if (!hbq_buffer) | 667 | if (!hbq_buffer) |
622 | return 1; | 668 | goto err; |
623 | hbq_buffer->tag = (i | (hbqno << 16)); | 669 | hbq_buffer->tag = (i | (hbqno << 16)); |
624 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) | 670 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) |
625 | phba->hbqs[hbqno].buffer_count++; | 671 | phba->hbqs[hbqno].buffer_count++; |
626 | else | 672 | else |
627 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | 673 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
628 | } | 674 | } |
675 | |||
676 | out: | ||
677 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
629 | return 0; | 678 | return 0; |
679 | err: | ||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
681 | return 1; | ||
630 | } | 682 | } |
631 | 683 | ||
632 | int | 684 | int |
@@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
910 | uint32_t hbqno; | 962 | uint32_t hbqno; |
911 | void *virt; /* virtual address ptr */ | 963 | void *virt; /* virtual address ptr */ |
912 | dma_addr_t phys; /* mapped address */ | 964 | dma_addr_t phys; /* mapped address */ |
965 | unsigned long flags; | ||
966 | |||
967 | /* Check whether HBQ is still in use */ | ||
968 | spin_lock_irqsave(&phba->hbalock, flags); | ||
969 | if (!phba->hbq_in_use) { | ||
970 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
971 | return NULL; | ||
972 | } | ||
913 | 973 | ||
914 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); | 974 | hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); |
915 | if (hbq_entry == NULL) | 975 | if (hbq_entry == NULL) { |
976 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
916 | return NULL; | 977 | return NULL; |
978 | } | ||
917 | list_del(&hbq_entry->dbuf.list); | 979 | list_del(&hbq_entry->dbuf.list); |
918 | 980 | ||
919 | hbqno = tag >> 16; | 981 | hbqno = tag >> 16; |
920 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 982 | new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
921 | if (new_hbq_entry == NULL) | 983 | if (new_hbq_entry == NULL) { |
984 | list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
985 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
922 | return &hbq_entry->dbuf; | 986 | return &hbq_entry->dbuf; |
987 | } | ||
923 | new_hbq_entry->tag = -1; | 988 | new_hbq_entry->tag = -1; |
924 | phys = new_hbq_entry->dbuf.phys; | 989 | phys = new_hbq_entry->dbuf.phys; |
925 | virt = new_hbq_entry->dbuf.virt; | 990 | virt = new_hbq_entry->dbuf.virt; |
@@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) | |||
928 | hbq_entry->dbuf.phys = phys; | 993 | hbq_entry->dbuf.phys = phys; |
929 | hbq_entry->dbuf.virt = virt; | 994 | hbq_entry->dbuf.virt = virt; |
930 | lpfc_sli_free_hbq(phba, hbq_entry); | 995 | lpfc_sli_free_hbq(phba, hbq_entry); |
996 | list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); | ||
997 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
998 | |||
931 | return &new_hbq_entry->dbuf; | 999 | return &new_hbq_entry->dbuf; |
932 | } | 1000 | } |
933 | 1001 | ||
@@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
951 | uint32_t Rctl, Type; | 1019 | uint32_t Rctl, Type; |
952 | uint32_t match, i; | 1020 | uint32_t match, i; |
953 | struct lpfc_iocbq *iocbq; | 1021 | struct lpfc_iocbq *iocbq; |
1022 | struct lpfc_dmabuf *dmzbuf; | ||
954 | 1023 | ||
955 | match = 0; | 1024 | match = 0; |
956 | irsp = &(saveq->iocb); | 1025 | irsp = &(saveq->iocb); |
@@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
972 | return 1; | 1041 | return 1; |
973 | } | 1042 | } |
974 | 1043 | ||
1044 | if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && | ||
1045 | (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { | ||
1046 | if (irsp->ulpBdeCount > 0) { | ||
1047 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1048 | irsp->un.ulpWord[3]); | ||
1049 | lpfc_in_buf_free(phba, dmzbuf); | ||
1050 | } | ||
1051 | |||
1052 | if (irsp->ulpBdeCount > 1) { | ||
1053 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1054 | irsp->unsli3.sli3Words[3]); | ||
1055 | lpfc_in_buf_free(phba, dmzbuf); | ||
1056 | } | ||
1057 | |||
1058 | if (irsp->ulpBdeCount > 2) { | ||
1059 | dmzbuf = lpfc_sli_get_buff(phba, pring, | ||
1060 | irsp->unsli3.sli3Words[7]); | ||
1061 | lpfc_in_buf_free(phba, dmzbuf); | ||
1062 | } | ||
1063 | |||
1064 | return 1; | ||
1065 | } | ||
1066 | |||
975 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 1067 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
976 | if (irsp->ulpBdeCount != 0) { | 1068 | if (irsp->ulpBdeCount != 0) { |
977 | saveq->context2 = lpfc_sli_get_buff(phba, pring, | 1069 | saveq->context2 = lpfc_sli_get_buff(phba, pring, |
@@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) | |||
2293 | 2385 | ||
2294 | /* Initialize the struct lpfc_sli_hbq structure for each hbq */ | 2386 | /* Initialize the struct lpfc_sli_hbq structure for each hbq */ |
2295 | phba->link_state = LPFC_INIT_MBX_CMDS; | 2387 | phba->link_state = LPFC_INIT_MBX_CMDS; |
2388 | phba->hbq_in_use = 1; | ||
2296 | 2389 | ||
2297 | hbq_entry_index = 0; | 2390 | hbq_entry_index = 0; |
2298 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { | 2391 | for (hbqno = 0; hbqno < hbq_count; ++hbqno) { |
@@ -2404,9 +2497,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) | |||
2404 | if ((pmb->mb.un.varCfgPort.sli_mode == 3) && | 2497 | if ((pmb->mb.un.varCfgPort.sli_mode == 3) && |
2405 | (!pmb->mb.un.varCfgPort.cMA)) { | 2498 | (!pmb->mb.un.varCfgPort.cMA)) { |
2406 | rc = -ENXIO; | 2499 | rc = -ENXIO; |
2407 | goto do_prep_failed; | ||
2408 | } | 2500 | } |
2409 | return rc; | ||
2410 | 2501 | ||
2411 | do_prep_failed: | 2502 | do_prep_failed: |
2412 | mempool_free(pmb, phba->mbox_mem_pool); | 2503 | mempool_free(pmb, phba->mbox_mem_pool); |
@@ -2625,14 +2716,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | |||
2625 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2716 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2626 | 2717 | ||
2627 | /* Mbox command <mbxCommand> cannot issue */ | 2718 | /* Mbox command <mbxCommand> cannot issue */ |
2628 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) | 2719 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2629 | return MBX_NOT_FINISHED; | 2720 | return MBX_NOT_FINISHED; |
2630 | } | 2721 | } |
2631 | 2722 | ||
2632 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && | 2723 | if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && |
2633 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { | 2724 | !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { |
2634 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 2725 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
2635 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) | 2726 | LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); |
2636 | return MBX_NOT_FINISHED; | 2727 | return MBX_NOT_FINISHED; |
2637 | } | 2728 | } |
2638 | 2729 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4b633d39a82a..ca540d1d041e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.2.4" | 21 | #define LPFC_DRIVER_VERSION "8.2.5" |
22 | 22 | ||
23 | #define LPFC_DRIVER_NAME "lpfc" | 23 | #define LPFC_DRIVER_NAME "lpfc" |
24 | 24 | ||
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 9fad7663c117..86d05beb00b8 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 4 | * Copyright (C) 2004-2008 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * | 7 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
@@ -327,7 +327,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
327 | * up and ready to FDISC. | 327 | * up and ready to FDISC. |
328 | */ | 328 | */ |
329 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 329 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
330 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 330 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
331 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
331 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { | 332 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { |
332 | lpfc_set_disctmo(vport); | 333 | lpfc_set_disctmo(vport); |
333 | lpfc_initial_fdisc(vport); | 334 | lpfc_initial_fdisc(vport); |
@@ -358,7 +359,8 @@ disable_vport(struct fc_vport *fc_vport) | |||
358 | long timeout; | 359 | long timeout; |
359 | 360 | ||
360 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | 361 | ndlp = lpfc_findnode_did(vport, Fabric_DID); |
361 | if (ndlp && phba->link_state >= LPFC_LINK_UP) { | 362 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
363 | && phba->link_state >= LPFC_LINK_UP) { | ||
362 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 364 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
363 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 365 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
364 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | 366 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) |
@@ -372,6 +374,8 @@ disable_vport(struct fc_vport *fc_vport) | |||
372 | * calling lpfc_cleanup_rpis(vport, 1) | 374 | * calling lpfc_cleanup_rpis(vport, 1) |
373 | */ | 375 | */ |
374 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 376 | list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { |
377 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
378 | continue; | ||
375 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 379 | if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) |
376 | continue; | 380 | continue; |
377 | lpfc_disc_state_machine(vport, ndlp, NULL, | 381 | lpfc_disc_state_machine(vport, ndlp, NULL, |
@@ -414,7 +418,8 @@ enable_vport(struct fc_vport *fc_vport) | |||
414 | * up and ready to FDISC. | 418 | * up and ready to FDISC. |
415 | */ | 419 | */ |
416 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 420 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
417 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | 421 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) |
422 | && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { | ||
418 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { | 423 | if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { |
419 | lpfc_set_disctmo(vport); | 424 | lpfc_set_disctmo(vport); |
420 | lpfc_initial_fdisc(vport); | 425 | lpfc_initial_fdisc(vport); |
@@ -498,7 +503,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
498 | scsi_remove_host(lpfc_shost_from_vport(vport)); | 503 | scsi_remove_host(lpfc_shost_from_vport(vport)); |
499 | 504 | ||
500 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); | 505 | ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); |
501 | if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | 506 | |
507 | /* In case of driver unload, we shall not perform fabric logo as the | ||
508 | * worker thread already stopped at this stage and, in this case, we | ||
509 | * can safely skip the fabric logo. | ||
510 | */ | ||
511 | if (phba->pport->load_flag & FC_UNLOADING) { | ||
512 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | ||
513 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | ||
514 | phba->link_state >= LPFC_LINK_UP) { | ||
515 | /* First look for the Fabric ndlp */ | ||
516 | ndlp = lpfc_findnode_did(vport, Fabric_DID); | ||
517 | if (!ndlp) | ||
518 | goto skip_logo; | ||
519 | else if (!NLP_CHK_NODE_ACT(ndlp)) { | ||
520 | ndlp = lpfc_enable_node(vport, ndlp, | ||
521 | NLP_STE_UNUSED_NODE); | ||
522 | if (!ndlp) | ||
523 | goto skip_logo; | ||
524 | } | ||
525 | /* Remove ndlp from vport npld list */ | ||
526 | lpfc_dequeue_node(vport, ndlp); | ||
527 | |||
528 | /* Indicate free memory when release */ | ||
529 | spin_lock_irq(&phba->ndlp_lock); | ||
530 | NLP_SET_FREE_REQ(ndlp); | ||
531 | spin_unlock_irq(&phba->ndlp_lock); | ||
532 | /* Kick off release ndlp when it can be safely done */ | ||
533 | lpfc_nlp_put(ndlp); | ||
534 | } | ||
535 | goto skip_logo; | ||
536 | } | ||
537 | |||
538 | /* Otherwise, we will perform fabric logo as needed */ | ||
539 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | ||
540 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | ||
502 | phba->link_state >= LPFC_LINK_UP) { | 541 | phba->link_state >= LPFC_LINK_UP) { |
503 | if (vport->cfg_enable_da_id) { | 542 | if (vport->cfg_enable_da_id) { |
504 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 543 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
@@ -519,8 +558,27 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
519 | if (!ndlp) | 558 | if (!ndlp) |
520 | goto skip_logo; | 559 | goto skip_logo; |
521 | lpfc_nlp_init(vport, ndlp, Fabric_DID); | 560 | lpfc_nlp_init(vport, ndlp, Fabric_DID); |
561 | /* Indicate free memory when release */ | ||
562 | NLP_SET_FREE_REQ(ndlp); | ||
522 | } else { | 563 | } else { |
564 | if (!NLP_CHK_NODE_ACT(ndlp)) | ||
565 | ndlp = lpfc_enable_node(vport, ndlp, | ||
566 | NLP_STE_UNUSED_NODE); | ||
567 | if (!ndlp) | ||
568 | goto skip_logo; | ||
569 | |||
570 | /* Remove ndlp from vport npld list */ | ||
523 | lpfc_dequeue_node(vport, ndlp); | 571 | lpfc_dequeue_node(vport, ndlp); |
572 | spin_lock_irq(&phba->ndlp_lock); | ||
573 | if (!NLP_CHK_FREE_REQ(ndlp)) | ||
574 | /* Indicate free memory when release */ | ||
575 | NLP_SET_FREE_REQ(ndlp); | ||
576 | else { | ||
577 | /* Skip this if ndlp is already in free mode */ | ||
578 | spin_unlock_irq(&phba->ndlp_lock); | ||
579 | goto skip_logo; | ||
580 | } | ||
581 | spin_unlock_irq(&phba->ndlp_lock); | ||
524 | } | 582 | } |
525 | vport->unreg_vpi_cmpl = VPORT_INVAL; | 583 | vport->unreg_vpi_cmpl = VPORT_INVAL; |
526 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 584 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
@@ -534,9 +592,9 @@ skip_logo: | |||
534 | lpfc_sli_host_down(vport); | 592 | lpfc_sli_host_down(vport); |
535 | 593 | ||
536 | lpfc_stop_vport_timers(vport); | 594 | lpfc_stop_vport_timers(vport); |
537 | lpfc_unreg_all_rpis(vport); | ||
538 | 595 | ||
539 | if (!(phba->pport->load_flag & FC_UNLOADING)) { | 596 | if (!(phba->pport->load_flag & FC_UNLOADING)) { |
597 | lpfc_unreg_all_rpis(vport); | ||
540 | lpfc_unreg_default_rpis(vport); | 598 | lpfc_unreg_default_rpis(vport); |
541 | /* | 599 | /* |
542 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) | 600 | * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 4d59ae8491a4..b135a1ed4b2c 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -151,19 +151,19 @@ mega_setup_mailbox(adapter_t *adapter) | |||
151 | */ | 151 | */ |
152 | if( adapter->flag & BOARD_IOMAP ) { | 152 | if( adapter->flag & BOARD_IOMAP ) { |
153 | 153 | ||
154 | outb_p(adapter->mbox_dma & 0xFF, | 154 | outb(adapter->mbox_dma & 0xFF, |
155 | adapter->host->io_port + MBOX_PORT0); | 155 | adapter->host->io_port + MBOX_PORT0); |
156 | 156 | ||
157 | outb_p((adapter->mbox_dma >> 8) & 0xFF, | 157 | outb((adapter->mbox_dma >> 8) & 0xFF, |
158 | adapter->host->io_port + MBOX_PORT1); | 158 | adapter->host->io_port + MBOX_PORT1); |
159 | 159 | ||
160 | outb_p((adapter->mbox_dma >> 16) & 0xFF, | 160 | outb((adapter->mbox_dma >> 16) & 0xFF, |
161 | adapter->host->io_port + MBOX_PORT2); | 161 | adapter->host->io_port + MBOX_PORT2); |
162 | 162 | ||
163 | outb_p((adapter->mbox_dma >> 24) & 0xFF, | 163 | outb((adapter->mbox_dma >> 24) & 0xFF, |
164 | adapter->host->io_port + MBOX_PORT3); | 164 | adapter->host->io_port + MBOX_PORT3); |
165 | 165 | ||
166 | outb_p(ENABLE_MBOX_BYTE, | 166 | outb(ENABLE_MBOX_BYTE, |
167 | adapter->host->io_port + ENABLE_MBOX_REGION); | 167 | adapter->host->io_port + ENABLE_MBOX_REGION); |
168 | 168 | ||
169 | irq_ack(adapter); | 169 | irq_ack(adapter); |
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index b6587a6d8486..0ad215e27b83 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c | |||
@@ -59,7 +59,6 @@ EXPORT_SYMBOL(mraid_mm_register_adp); | |||
59 | EXPORT_SYMBOL(mraid_mm_unregister_adp); | 59 | EXPORT_SYMBOL(mraid_mm_unregister_adp); |
60 | EXPORT_SYMBOL(mraid_mm_adapter_app_handle); | 60 | EXPORT_SYMBOL(mraid_mm_adapter_app_handle); |
61 | 61 | ||
62 | static int majorno; | ||
63 | static uint32_t drvr_ver = 0x02200207; | 62 | static uint32_t drvr_ver = 0x02200207; |
64 | 63 | ||
65 | static int adapters_count_g; | 64 | static int adapters_count_g; |
@@ -76,6 +75,12 @@ static const struct file_operations lsi_fops = { | |||
76 | .owner = THIS_MODULE, | 75 | .owner = THIS_MODULE, |
77 | }; | 76 | }; |
78 | 77 | ||
78 | static struct miscdevice megaraid_mm_dev = { | ||
79 | .minor = MISC_DYNAMIC_MINOR, | ||
80 | .name = "megadev0", | ||
81 | .fops = &lsi_fops, | ||
82 | }; | ||
83 | |||
79 | /** | 84 | /** |
80 | * mraid_mm_open - open routine for char node interface | 85 | * mraid_mm_open - open routine for char node interface |
81 | * @inode : unused | 86 | * @inode : unused |
@@ -1184,15 +1189,16 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) | |||
1184 | static int __init | 1189 | static int __init |
1185 | mraid_mm_init(void) | 1190 | mraid_mm_init(void) |
1186 | { | 1191 | { |
1192 | int err; | ||
1193 | |||
1187 | // Announce the driver version | 1194 | // Announce the driver version |
1188 | con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", | 1195 | con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", |
1189 | LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); | 1196 | LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); |
1190 | 1197 | ||
1191 | majorno = register_chrdev(0, "megadev", &lsi_fops); | 1198 | err = misc_register(&megaraid_mm_dev); |
1192 | 1199 | if (err < 0) { | |
1193 | if (majorno < 0) { | 1200 | con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); |
1194 | con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); | 1201 | return err; |
1195 | return majorno; | ||
1196 | } | 1202 | } |
1197 | 1203 | ||
1198 | init_waitqueue_head(&wait_q); | 1204 | init_waitqueue_head(&wait_q); |
@@ -1230,7 +1236,7 @@ mraid_mm_exit(void) | |||
1230 | { | 1236 | { |
1231 | con_log(CL_DLEVEL1 , ("exiting common mod\n")); | 1237 | con_log(CL_DLEVEL1 , ("exiting common mod\n")); |
1232 | 1238 | ||
1233 | unregister_chrdev(majorno, "megadev"); | 1239 | misc_deregister(&megaraid_mm_dev); |
1234 | } | 1240 | } |
1235 | 1241 | ||
1236 | module_init(mraid_mm_init); | 1242 | module_init(mraid_mm_init); |
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index c8762b2b8ed1..55b425c0a654 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/moduleparam.h> | 22 | #include <linux/moduleparam.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/miscdevice.h> | ||
25 | 26 | ||
26 | #include "mbox_defs.h" | 27 | #include "mbox_defs.h" |
27 | #include "megaraid_ioctl.h" | 28 | #include "megaraid_ioctl.h" |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 651d09b08f2a..fd63b06d9ef1 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -1759,6 +1759,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) | |||
1759 | 1759 | ||
1760 | switch (mesg.event) { | 1760 | switch (mesg.event) { |
1761 | case PM_EVENT_SUSPEND: | 1761 | case PM_EVENT_SUSPEND: |
1762 | case PM_EVENT_HIBERNATE: | ||
1762 | case PM_EVENT_FREEZE: | 1763 | case PM_EVENT_FREEZE: |
1763 | break; | 1764 | break; |
1764 | default: | 1765 | default: |
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c new file mode 100644 index 000000000000..d4a6ac3c9c47 --- /dev/null +++ b/drivers/scsi/mvsas.c | |||
@@ -0,0 +1,2970 @@ | |||
1 | /* | ||
2 | mvsas.c - Marvell 88SE6440 SAS/SATA support | ||
3 | |||
4 | Copyright 2007 Red Hat, Inc. | ||
5 | Copyright 2008 Marvell. <kewei@marvell.com> | ||
6 | |||
7 | This program is free software; you can redistribute it and/or | ||
8 | modify it under the terms of the GNU General Public License as | ||
9 | published by the Free Software Foundation; either version 2, | ||
10 | or (at your option) any later version. | ||
11 | |||
12 | This program is distributed in the hope that it will be useful, | ||
13 | but WITHOUT ANY WARRANTY; without even the implied warranty | ||
14 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | See the GNU General Public License for more details. | ||
16 | |||
17 | You should have received a copy of the GNU General Public | ||
18 | License along with this program; see the file COPYING. If not, | ||
19 | write to the Free Software Foundation, 675 Mass Ave, Cambridge, | ||
20 | MA 02139, USA. | ||
21 | |||
22 | --------------------------------------------------------------- | ||
23 | |||
24 | Random notes: | ||
25 | * hardware supports controlling the endian-ness of data | ||
26 | structures. this permits elimination of all the le32_to_cpu() | ||
27 | and cpu_to_le32() conversions. | ||
28 | |||
29 | */ | ||
30 | |||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/ctype.h> | ||
39 | #include <scsi/libsas.h> | ||
40 | #include <asm/io.h> | ||
41 | |||
42 | #define DRV_NAME "mvsas" | ||
43 | #define DRV_VERSION "0.5" | ||
44 | #define _MV_DUMP 0 | ||
45 | #define MVS_DISABLE_NVRAM | ||
46 | #define MVS_DISABLE_MSI | ||
47 | |||
48 | #define mr32(reg) readl(regs + MVS_##reg) | ||
49 | #define mw32(reg,val) writel((val), regs + MVS_##reg) | ||
50 | #define mw32_f(reg,val) do { \ | ||
51 | writel((val), regs + MVS_##reg); \ | ||
52 | readl(regs + MVS_##reg); \ | ||
53 | } while (0) | ||
54 | |||
55 | #define MVS_ID_NOT_MAPPED 0xff | ||
56 | #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) | ||
57 | |||
58 | /* offset for D2H FIS in the Received FIS List Structure */ | ||
59 | #define SATA_RECEIVED_D2H_FIS(reg_set) \ | ||
60 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) | ||
61 | #define SATA_RECEIVED_PIO_FIS(reg_set) \ | ||
62 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) | ||
63 | #define UNASSOC_D2H_FIS(id) \ | ||
64 | ((void *) mvi->rx_fis + 0x100 * id) | ||
65 | |||
66 | #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ | ||
67 | for ((__mc) = (__lseq_mask), (__lseq) = 0; \ | ||
68 | (__mc) != 0 && __rest; \ | ||
69 | (++__lseq), (__mc) >>= 1) | ||
70 | |||
71 | /* driver compile-time configuration */ | ||
72 | enum driver_configuration { | ||
73 | MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ | ||
74 | MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ | ||
75 | /* software requires power-of-2 | ||
76 | ring size */ | ||
77 | |||
78 | MVS_SLOTS = 512, /* command slots */ | ||
79 | MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ | ||
80 | MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ | ||
81 | MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ | ||
82 | MVS_OAF_SZ = 64, /* Open address frame buffer size */ | ||
83 | |||
84 | MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ | ||
85 | |||
86 | MVS_QUEUE_SIZE = 30, /* Support Queue depth */ | ||
87 | }; | ||
88 | |||
89 | /* unchangeable hardware details */ | ||
90 | enum hardware_details { | ||
91 | MVS_MAX_PHYS = 8, /* max. possible phys */ | ||
92 | MVS_MAX_PORTS = 8, /* max. possible ports */ | ||
93 | MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), | ||
94 | }; | ||
95 | |||
96 | /* peripheral registers (BAR2) */ | ||
97 | enum peripheral_registers { | ||
98 | SPI_CTL = 0x10, /* EEPROM control */ | ||
99 | SPI_CMD = 0x14, /* EEPROM command */ | ||
100 | SPI_DATA = 0x18, /* EEPROM data */ | ||
101 | }; | ||
102 | |||
103 | enum peripheral_register_bits { | ||
104 | TWSI_RDY = (1U << 7), /* EEPROM interface ready */ | ||
105 | TWSI_RD = (1U << 4), /* EEPROM read access */ | ||
106 | |||
107 | SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ | ||
108 | }; | ||
109 | |||
110 | /* enhanced mode registers (BAR4) */ | ||
111 | enum hw_registers { | ||
112 | MVS_GBL_CTL = 0x04, /* global control */ | ||
113 | MVS_GBL_INT_STAT = 0x08, /* global irq status */ | ||
114 | MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ | ||
115 | MVS_GBL_PORT_TYPE = 0xa0, /* port type */ | ||
116 | |||
117 | MVS_CTL = 0x100, /* SAS/SATA port configuration */ | ||
118 | MVS_PCS = 0x104, /* SAS/SATA port control/status */ | ||
119 | MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ | ||
120 | MVS_CMD_LIST_HI = 0x10C, | ||
121 | MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ | ||
122 | MVS_RX_FIS_HI = 0x114, | ||
123 | |||
124 | MVS_TX_CFG = 0x120, /* TX configuration */ | ||
125 | MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ | ||
126 | MVS_TX_HI = 0x128, | ||
127 | |||
128 | MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ | ||
129 | MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ | ||
130 | MVS_RX_CFG = 0x134, /* RX configuration */ | ||
131 | MVS_RX_LO = 0x138, /* RX (completion) ring addr */ | ||
132 | MVS_RX_HI = 0x13C, | ||
133 | MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ | ||
134 | |||
135 | MVS_INT_COAL = 0x148, /* Int coalescing config */ | ||
136 | MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ | ||
137 | MVS_INT_STAT = 0x150, /* Central int status */ | ||
138 | MVS_INT_MASK = 0x154, /* Central int enable */ | ||
139 | MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ | ||
140 | MVS_INT_MASK_SRS = 0x15C, | ||
141 | |||
142 | /* ports 1-3 follow after this */ | ||
143 | MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ | ||
144 | MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ | ||
145 | MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ | ||
146 | MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ | ||
147 | |||
148 | /* ports 1-3 follow after this */ | ||
149 | MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ | ||
150 | MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ | ||
151 | |||
152 | MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ | ||
153 | MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ | ||
154 | |||
155 | /* ports 1-3 follow after this */ | ||
156 | MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ | ||
157 | MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ | ||
158 | MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ | ||
159 | MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ | ||
160 | |||
161 | /* ports 1-3 follow after this */ | ||
162 | MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ | ||
163 | MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ | ||
164 | MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ | ||
165 | MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ | ||
166 | }; | ||
167 | |||
168 | enum hw_register_bits { | ||
169 | /* MVS_GBL_CTL */ | ||
170 | INT_EN = (1U << 1), /* Global int enable */ | ||
171 | HBA_RST = (1U << 0), /* HBA reset */ | ||
172 | |||
173 | /* MVS_GBL_INT_STAT */ | ||
174 | INT_XOR = (1U << 4), /* XOR engine event */ | ||
175 | INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ | ||
176 | |||
177 | /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ | ||
178 | SATA_TARGET = (1U << 16), /* port0 SATA target enable */ | ||
179 | MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ | ||
180 | MODE_AUTO_DET_PORT6 = (1U << 14), | ||
181 | MODE_AUTO_DET_PORT5 = (1U << 13), | ||
182 | MODE_AUTO_DET_PORT4 = (1U << 12), | ||
183 | MODE_AUTO_DET_PORT3 = (1U << 11), | ||
184 | MODE_AUTO_DET_PORT2 = (1U << 10), | ||
185 | MODE_AUTO_DET_PORT1 = (1U << 9), | ||
186 | MODE_AUTO_DET_PORT0 = (1U << 8), | ||
187 | MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | | ||
188 | MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | | ||
189 | MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | | ||
190 | MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, | ||
191 | MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ | ||
192 | MODE_SAS_PORT6_MASK = (1U << 6), | ||
193 | MODE_SAS_PORT5_MASK = (1U << 5), | ||
194 | MODE_SAS_PORT4_MASK = (1U << 4), | ||
195 | MODE_SAS_PORT3_MASK = (1U << 3), | ||
196 | MODE_SAS_PORT2_MASK = (1U << 2), | ||
197 | MODE_SAS_PORT1_MASK = (1U << 1), | ||
198 | MODE_SAS_PORT0_MASK = (1U << 0), | ||
199 | MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | | ||
200 | MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | | ||
201 | MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | | ||
202 | MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, | ||
203 | |||
204 | /* SAS_MODE value may be | ||
205 | * dictated (in hw) by values | ||
206 | * of SATA_TARGET & AUTO_DET | ||
207 | */ | ||
208 | |||
209 | /* MVS_TX_CFG */ | ||
210 | TX_EN = (1U << 16), /* Enable TX */ | ||
211 | TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ | ||
212 | |||
213 | /* MVS_RX_CFG */ | ||
214 | RX_EN = (1U << 16), /* Enable RX */ | ||
215 | RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ | ||
216 | |||
217 | /* MVS_INT_COAL */ | ||
218 | COAL_EN = (1U << 16), /* Enable int coalescing */ | ||
219 | |||
220 | /* MVS_INT_STAT, MVS_INT_MASK */ | ||
221 | CINT_I2C = (1U << 31), /* I2C event */ | ||
222 | CINT_SW0 = (1U << 30), /* software event 0 */ | ||
223 | CINT_SW1 = (1U << 29), /* software event 1 */ | ||
224 | CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ | ||
225 | CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ | ||
226 | CINT_MEM = (1U << 26), /* int mem parity err */ | ||
227 | CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ | ||
228 | CINT_SRS = (1U << 3), /* SRS event */ | ||
229 | CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ | ||
230 | CINT_DONE = (1U << 0), /* cmd completion */ | ||
231 | |||
232 | /* shl for ports 1-3 */ | ||
233 | CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ | ||
234 | CINT_PORT = (1U << 8), /* port0 event */ | ||
235 | CINT_PORT_MASK_OFFSET = 8, | ||
236 | CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), | ||
237 | |||
238 | /* TX (delivery) ring bits */ | ||
239 | TXQ_CMD_SHIFT = 29, | ||
240 | TXQ_CMD_SSP = 1, /* SSP protocol */ | ||
241 | TXQ_CMD_SMP = 2, /* SMP protocol */ | ||
242 | TXQ_CMD_STP = 3, /* STP/SATA protocol */ | ||
243 | TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ | ||
244 | TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ | ||
245 | TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ | ||
246 | TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ | ||
247 | TXQ_SRS_SHIFT = 20, /* SATA register set */ | ||
248 | TXQ_SRS_MASK = 0x7f, | ||
249 | TXQ_PHY_SHIFT = 12, /* PHY bitmap */ | ||
250 | TXQ_PHY_MASK = 0xff, | ||
251 | TXQ_SLOT_MASK = 0xfff, /* slot number */ | ||
252 | |||
253 | /* RX (completion) ring bits */ | ||
254 | RXQ_GOOD = (1U << 23), /* Response good */ | ||
255 | RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ | ||
256 | RXQ_CMD_RX = (1U << 20), /* target cmd received */ | ||
257 | RXQ_ATTN = (1U << 19), /* attention */ | ||
258 | RXQ_RSP = (1U << 18), /* response frame xfer'd */ | ||
259 | RXQ_ERR = (1U << 17), /* err info rec xfer'd */ | ||
260 | RXQ_DONE = (1U << 16), /* cmd complete */ | ||
261 | RXQ_SLOT_MASK = 0xfff, /* slot number */ | ||
262 | |||
263 | /* mvs_cmd_hdr bits */ | ||
264 | MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ | ||
265 | MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ | ||
266 | |||
267 | /* SSP initiator only */ | ||
268 | MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ | ||
269 | |||
270 | /* SSP initiator or target */ | ||
271 | MCH_SSP_FR_TASK = 0x1, /* TASK frame */ | ||
272 | |||
273 | /* SSP target only */ | ||
274 | MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ | ||
275 | MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ | ||
276 | MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ | ||
277 | MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ | ||
278 | |||
279 | MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ | ||
280 | MCH_FBURST = (1U << 11), /* first burst (SSP) */ | ||
281 | MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ | ||
282 | MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ | ||
283 | MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ | ||
284 | MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ | ||
285 | MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ | ||
286 | MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ | ||
287 | MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ | ||
288 | MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ | ||
289 | |||
290 | CCTL_RST = (1U << 5), /* port logic reset */ | ||
291 | |||
292 | /* 0(LSB first), 1(MSB first) */ | ||
293 | CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ | ||
294 | CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ | ||
295 | CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ | ||
296 | CCTL_ENDIAN_CMD = (1U << 0), /* command table */ | ||
297 | |||
298 | /* MVS_Px_SER_CTLSTAT (per-phy control) */ | ||
299 | PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ | ||
300 | PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ | ||
301 | PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ | ||
302 | PHY_RST = (1U << 0), /* phy reset */ | ||
303 | PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), | ||
304 | PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), | ||
305 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), | ||
306 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK = | ||
307 | (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), | ||
308 | PHY_READY_MASK = (1U << 20), | ||
309 | |||
310 | /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ | ||
311 | PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ | ||
312 | PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ | ||
313 | PHYEV_AN = (1U << 18), /* SATA async notification */ | ||
314 | PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ | ||
315 | PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ | ||
316 | PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ | ||
317 | PHYEV_IU_BIG = (1U << 11), /* IU too long err */ | ||
318 | PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ | ||
319 | PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ | ||
320 | PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ | ||
321 | PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ | ||
322 | PHYEV_PORT_SEL = (1U << 6), /* port selector present */ | ||
323 | PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ | ||
324 | PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ | ||
325 | PHYEV_ID_FAIL = (1U << 3), /* identify failed */ | ||
326 | PHYEV_ID_DONE = (1U << 2), /* identify done */ | ||
327 | PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ | ||
328 | PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ | ||
329 | |||
330 | /* MVS_PCS */ | ||
331 | PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ | ||
332 | PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ | ||
333 | PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ | ||
334 | PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ | ||
335 | PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ | ||
336 | PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ | ||
337 | PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ | ||
338 | PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ | ||
339 | PCS_CMD_RST = (1U << 1), /* reset cmd issue */ | ||
340 | PCS_CMD_EN = (1U << 0), /* enable cmd issue */ | ||
341 | |||
342 | /* Port n Attached Device Info */ | ||
343 | PORT_DEV_SSP_TRGT = (1U << 19), | ||
344 | PORT_DEV_SMP_TRGT = (1U << 18), | ||
345 | PORT_DEV_STP_TRGT = (1U << 17), | ||
346 | PORT_DEV_SSP_INIT = (1U << 11), | ||
347 | PORT_DEV_SMP_INIT = (1U << 10), | ||
348 | PORT_DEV_STP_INIT = (1U << 9), | ||
349 | PORT_PHY_ID_MASK = (0xFFU << 24), | ||
350 | PORT_DEV_TRGT_MASK = (0x7U << 17), | ||
351 | PORT_DEV_INIT_MASK = (0x7U << 9), | ||
352 | PORT_DEV_TYPE_MASK = (0x7U << 0), | ||
353 | |||
354 | /* Port n PHY Status */ | ||
355 | PHY_RDY = (1U << 2), | ||
356 | PHY_DW_SYNC = (1U << 1), | ||
357 | PHY_OOB_DTCTD = (1U << 0), | ||
358 | |||
359 | /* VSR */ | ||
360 | /* PHYMODE 6 (CDB) */ | ||
361 | PHY_MODE6_DTL_SPEED = (1U << 27), | ||
362 | }; | ||
363 | |||
364 | enum mvs_info_flags { | ||
365 | MVF_MSI = (1U << 0), /* MSI is enabled */ | ||
366 | MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ | ||
367 | }; | ||
368 | |||
369 | enum sas_cmd_port_registers { | ||
370 | CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ | ||
371 | CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ | ||
372 | CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ | ||
373 | CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ | ||
374 | CMD_OOB_SPACE = 0x110, /* OOB space control register */ | ||
375 | CMD_OOB_BURST = 0x114, /* OOB burst control register */ | ||
376 | CMD_PHY_TIMER = 0x118, /* PHY timer control register */ | ||
377 | CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ | ||
378 | CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ | ||
379 | CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ | ||
380 | CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ | ||
381 | CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ | ||
382 | CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ | ||
383 | CMD_ID_TEST = 0x134, /* ID test register */ | ||
384 | CMD_PL_TIMER = 0x138, /* PL timer register */ | ||
385 | CMD_WD_TIMER = 0x13c, /* WD timer register */ | ||
386 | CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ | ||
387 | CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ | ||
388 | CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ | ||
389 | CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ | ||
390 | CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ | ||
391 | CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ | ||
392 | CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ | ||
393 | CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ | ||
394 | CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ | ||
395 | CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ | ||
396 | CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ | ||
397 | CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ | ||
398 | CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ | ||
399 | CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ | ||
400 | CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ | ||
401 | CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ | ||
402 | CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ | ||
403 | CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ | ||
404 | CMD_RESET_COUNT = 0x188, /* Reset Count */ | ||
405 | CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ | ||
406 | CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ | ||
407 | CMD_PHY_CTL = 0x194, /* PHY Control and Status */ | ||
408 | CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ | ||
409 | CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ | ||
410 | CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ | ||
411 | CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ | ||
412 | CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ | ||
413 | CMD_HOST_CTL = 0x1AC, /* Host Control Status */ | ||
414 | CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ | ||
415 | CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ | ||
416 | CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ | ||
417 | CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ | ||
418 | CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ | ||
419 | CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ | ||
420 | }; | ||
421 | |||
422 | /* SAS/SATA configuration port registers, aka phy registers */ | ||
423 | enum sas_sata_config_port_regs { | ||
424 | PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ | ||
425 | PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ | ||
426 | PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ | ||
427 | PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ | ||
428 | PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ | ||
429 | PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ | ||
430 | PHYR_SATA_CTL = 0x18, /* SATA control */ | ||
431 | PHYR_PHY_STAT = 0x1C, /* PHY status */ | ||
432 | PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ | ||
433 | PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ | ||
434 | PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ | ||
435 | PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ | ||
436 | PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ | ||
437 | PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ | ||
438 | PHYR_WIDE_PORT = 0x38, /* wide port participating */ | ||
439 | PHYR_CURRENT0 = 0x80, /* current connection info 0 */ | ||
440 | PHYR_CURRENT1 = 0x84, /* current connection info 1 */ | ||
441 | PHYR_CURRENT2 = 0x88, /* current connection info 2 */ | ||
442 | }; | ||
443 | |||
444 | /* SAS/SATA Vendor Specific Port Registers */ | ||
445 | enum sas_sata_vsp_regs { | ||
446 | VSR_PHY_STAT = 0x00, /* Phy Status */ | ||
447 | VSR_PHY_MODE1 = 0x01, /* phy tx */ | ||
448 | VSR_PHY_MODE2 = 0x02, /* tx scc */ | ||
449 | VSR_PHY_MODE3 = 0x03, /* pll */ | ||
450 | VSR_PHY_MODE4 = 0x04, /* VCO */ | ||
451 | VSR_PHY_MODE5 = 0x05, /* Rx */ | ||
452 | VSR_PHY_MODE6 = 0x06, /* CDR */ | ||
453 | VSR_PHY_MODE7 = 0x07, /* Impedance */ | ||
454 | VSR_PHY_MODE8 = 0x08, /* Voltage */ | ||
455 | VSR_PHY_MODE9 = 0x09, /* Test */ | ||
456 | VSR_PHY_MODE10 = 0x0A, /* Power */ | ||
457 | VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ | ||
458 | VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ | ||
459 | VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ | ||
460 | }; | ||
461 | |||
462 | enum pci_cfg_registers { | ||
463 | PCR_PHY_CTL = 0x40, | ||
464 | PCR_PHY_CTL2 = 0x90, | ||
465 | PCR_DEV_CTRL = 0xE8, | ||
466 | }; | ||
467 | |||
468 | enum pci_cfg_register_bits { | ||
469 | PCTL_PWR_ON = (0xFU << 24), | ||
470 | PCTL_OFF = (0xFU << 12), | ||
471 | PRD_REQ_SIZE = (0x4000), | ||
472 | PRD_REQ_MASK = (0x00007000), | ||
473 | }; | ||
474 | |||
475 | enum nvram_layout_offsets { | ||
476 | NVR_SIG = 0x00, /* 0xAA, 0x55 */ | ||
477 | NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ | ||
478 | }; | ||
479 | |||
480 | enum chip_flavors { | ||
481 | chip_6320, | ||
482 | chip_6440, | ||
483 | chip_6480, | ||
484 | }; | ||
485 | |||
486 | enum port_type { | ||
487 | PORT_TYPE_SAS = (1L << 1), | ||
488 | PORT_TYPE_SATA = (1L << 0), | ||
489 | }; | ||
490 | |||
491 | /* Command Table Format */ | ||
492 | enum ct_format { | ||
493 | /* SSP */ | ||
494 | SSP_F_H = 0x00, | ||
495 | SSP_F_IU = 0x18, | ||
496 | SSP_F_MAX = 0x4D, | ||
497 | /* STP */ | ||
498 | STP_CMD_FIS = 0x00, | ||
499 | STP_ATAPI_CMD = 0x40, | ||
500 | STP_F_MAX = 0x10, | ||
501 | /* SMP */ | ||
502 | SMP_F_T = 0x00, | ||
503 | SMP_F_DEP = 0x01, | ||
504 | SMP_F_MAX = 0x101, | ||
505 | }; | ||
506 | |||
507 | enum status_buffer { | ||
508 | SB_EIR_OFF = 0x00, /* Error Information Record */ | ||
509 | SB_RFB_OFF = 0x08, /* Response Frame Buffer */ | ||
510 | SB_RFB_MAX = 0x400, /* RFB size*/ | ||
511 | }; | ||
512 | |||
513 | enum error_info_rec { | ||
514 | CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ | ||
515 | }; | ||
516 | |||
517 | struct mvs_chip_info { | ||
518 | u32 n_phy; | ||
519 | u32 srs_sz; | ||
520 | u32 slot_width; | ||
521 | }; | ||
522 | |||
523 | struct mvs_err_info { | ||
524 | __le32 flags; | ||
525 | __le32 flags2; | ||
526 | }; | ||
527 | |||
528 | struct mvs_prd { | ||
529 | __le64 addr; /* 64-bit buffer address */ | ||
530 | __le32 reserved; | ||
531 | __le32 len; /* 16-bit length */ | ||
532 | }; | ||
533 | |||
534 | struct mvs_cmd_hdr { | ||
535 | __le32 flags; /* PRD tbl len; SAS, SATA ctl */ | ||
536 | __le32 lens; /* cmd, max resp frame len */ | ||
537 | __le32 tags; /* targ port xfer tag; tag */ | ||
538 | __le32 data_len; /* data xfer len */ | ||
539 | __le64 cmd_tbl; /* command table address */ | ||
540 | __le64 open_frame; /* open addr frame address */ | ||
541 | __le64 status_buf; /* status buffer address */ | ||
542 | __le64 prd_tbl; /* PRD tbl address */ | ||
543 | __le32 reserved[4]; | ||
544 | }; | ||
545 | |||
546 | struct mvs_slot_info { | ||
547 | struct sas_task *task; | ||
548 | u32 n_elem; | ||
549 | u32 tx; | ||
550 | |||
551 | /* DMA buffer for storing cmd tbl, open addr frame, status buffer, | ||
552 | * and PRD table | ||
553 | */ | ||
554 | void *buf; | ||
555 | dma_addr_t buf_dma; | ||
556 | #if _MV_DUMP | ||
557 | u32 cmd_size; | ||
558 | #endif | ||
559 | |||
560 | void *response; | ||
561 | }; | ||
562 | |||
563 | struct mvs_port { | ||
564 | struct asd_sas_port sas_port; | ||
565 | u8 port_attached; | ||
566 | u8 taskfileset; | ||
567 | u8 wide_port_phymap; | ||
568 | }; | ||
569 | |||
570 | struct mvs_phy { | ||
571 | struct mvs_port *port; | ||
572 | struct asd_sas_phy sas_phy; | ||
573 | struct sas_identify identify; | ||
574 | struct scsi_device *sdev; | ||
575 | u64 dev_sas_addr; | ||
576 | u64 att_dev_sas_addr; | ||
577 | u32 att_dev_info; | ||
578 | u32 dev_info; | ||
579 | u32 phy_type; | ||
580 | u32 phy_status; | ||
581 | u32 irq_status; | ||
582 | u32 frame_rcvd_size; | ||
583 | u8 frame_rcvd[32]; | ||
584 | u8 phy_attached; | ||
585 | }; | ||
586 | |||
587 | struct mvs_info { | ||
588 | unsigned long flags; | ||
589 | |||
590 | spinlock_t lock; /* host-wide lock */ | ||
591 | struct pci_dev *pdev; /* our device */ | ||
592 | void __iomem *regs; /* enhanced mode registers */ | ||
593 | void __iomem *peri_regs; /* peripheral registers */ | ||
594 | |||
595 | u8 sas_addr[SAS_ADDR_SIZE]; | ||
596 | struct sas_ha_struct sas; /* SCSI/SAS glue */ | ||
597 | struct Scsi_Host *shost; | ||
598 | |||
599 | __le32 *tx; /* TX (delivery) DMA ring */ | ||
600 | dma_addr_t tx_dma; | ||
601 | u32 tx_prod; /* cached next-producer idx */ | ||
602 | |||
603 | __le32 *rx; /* RX (completion) DMA ring */ | ||
604 | dma_addr_t rx_dma; | ||
605 | u32 rx_cons; /* RX consumer idx */ | ||
606 | |||
607 | __le32 *rx_fis; /* RX'd FIS area */ | ||
608 | dma_addr_t rx_fis_dma; | ||
609 | |||
610 | struct mvs_cmd_hdr *slot; /* DMA command header slots */ | ||
611 | dma_addr_t slot_dma; | ||
612 | |||
613 | const struct mvs_chip_info *chip; | ||
614 | |||
615 | unsigned long tags[MVS_SLOTS]; | ||
616 | struct mvs_slot_info slot_info[MVS_SLOTS]; | ||
617 | /* further per-slot information */ | ||
618 | struct mvs_phy phy[MVS_MAX_PHYS]; | ||
619 | struct mvs_port port[MVS_MAX_PHYS]; | ||
620 | |||
621 | u32 can_queue; /* per adapter */ | ||
622 | u32 tag_out; /*Get*/ | ||
623 | u32 tag_in; /*Give*/ | ||
624 | }; | ||
625 | |||
626 | struct mvs_queue_task { | ||
627 | struct list_head list; | ||
628 | |||
629 | void *uldd_task; | ||
630 | }; | ||
631 | |||
632 | static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
633 | void *funcdata); | ||
634 | static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); | ||
635 | static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); | ||
636 | static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); | ||
637 | static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); | ||
638 | static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); | ||
639 | static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); | ||
640 | |||
641 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); | ||
642 | static void mvs_detect_porttype(struct mvs_info *mvi, int i); | ||
643 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); | ||
644 | |||
645 | static int mvs_scan_finished(struct Scsi_Host *, unsigned long); | ||
646 | static void mvs_scan_start(struct Scsi_Host *); | ||
647 | static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev); | ||
648 | |||
649 | static struct scsi_transport_template *mvs_stt; | ||
650 | |||
651 | static const struct mvs_chip_info mvs_chips[] = { | ||
652 | [chip_6320] = { 2, 16, 9 }, | ||
653 | [chip_6440] = { 4, 16, 9 }, | ||
654 | [chip_6480] = { 8, 32, 10 }, | ||
655 | }; | ||
656 | |||
657 | static struct scsi_host_template mvs_sht = { | ||
658 | .module = THIS_MODULE, | ||
659 | .name = DRV_NAME, | ||
660 | .queuecommand = sas_queuecommand, | ||
661 | .target_alloc = sas_target_alloc, | ||
662 | .slave_configure = sas_slave_configure, | ||
663 | .slave_destroy = sas_slave_destroy, | ||
664 | .scan_finished = mvs_scan_finished, | ||
665 | .scan_start = mvs_scan_start, | ||
666 | .change_queue_depth = sas_change_queue_depth, | ||
667 | .change_queue_type = sas_change_queue_type, | ||
668 | .bios_param = sas_bios_param, | ||
669 | .can_queue = 1, | ||
670 | .cmd_per_lun = 1, | ||
671 | .this_id = -1, | ||
672 | .sg_tablesize = SG_ALL, | ||
673 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, | ||
674 | .use_clustering = ENABLE_CLUSTERING, | ||
675 | .eh_device_reset_handler = sas_eh_device_reset_handler, | ||
676 | .eh_bus_reset_handler = sas_eh_bus_reset_handler, | ||
677 | .slave_alloc = mvs_sas_slave_alloc, | ||
678 | .target_destroy = sas_target_destroy, | ||
679 | .ioctl = sas_ioctl, | ||
680 | }; | ||
681 | |||
682 | static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) | ||
683 | { | ||
684 | u32 i; | ||
685 | u32 run; | ||
686 | u32 offset; | ||
687 | |||
688 | offset = 0; | ||
689 | while (size) { | ||
690 | printk("%08X : ", baseaddr + offset); | ||
691 | if (size >= 16) | ||
692 | run = 16; | ||
693 | else | ||
694 | run = size; | ||
695 | size -= run; | ||
696 | for (i = 0; i < 16; i++) { | ||
697 | if (i < run) | ||
698 | printk("%02X ", (u32)data[i]); | ||
699 | else | ||
700 | printk(" "); | ||
701 | } | ||
702 | printk(": "); | ||
703 | for (i = 0; i < run; i++) | ||
704 | printk("%c", isalnum(data[i]) ? data[i] : '.'); | ||
705 | printk("\n"); | ||
706 | data = &data[16]; | ||
707 | offset += run; | ||
708 | } | ||
709 | printk("\n"); | ||
710 | } | ||
711 | |||
712 | static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, | ||
713 | enum sas_protocol proto) | ||
714 | { | ||
715 | #if _MV_DUMP | ||
716 | u32 offset; | ||
717 | struct pci_dev *pdev = mvi->pdev; | ||
718 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
719 | |||
720 | offset = slot->cmd_size + MVS_OAF_SZ + | ||
721 | sizeof(struct mvs_prd) * slot->n_elem; | ||
722 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", | ||
723 | tag); | ||
724 | mvs_hexdump(32, (u8 *) slot->response, | ||
725 | (u32) slot->buf_dma + offset); | ||
726 | #endif | ||
727 | } | ||
728 | |||
729 | static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, | ||
730 | enum sas_protocol proto) | ||
731 | { | ||
732 | #if _MV_DUMP | ||
733 | u32 sz, w_ptr, r_ptr; | ||
734 | u64 addr; | ||
735 | void __iomem *regs = mvi->regs; | ||
736 | struct pci_dev *pdev = mvi->pdev; | ||
737 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
738 | |||
739 | /*Delivery Queue */ | ||
740 | sz = mr32(TX_CFG) & TX_RING_SZ_MASK; | ||
741 | w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK; | ||
742 | r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK; | ||
743 | addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); | ||
744 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
745 | "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n", | ||
746 | sz, w_ptr, r_ptr); | ||
747 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
748 | "Delivery Queue Base Address=0x%llX (PA)" | ||
749 | "(tx_dma=0x%llX), Entry=%04d\n", | ||
750 | addr, mvi->tx_dma, w_ptr); | ||
751 | mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), | ||
752 | (u32) mvi->tx_dma + sizeof(u32) * w_ptr); | ||
753 | /*Command List */ | ||
754 | addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO); | ||
755 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
756 | "Command List Base Address=0x%llX (PA)" | ||
757 | "(slot_dma=0x%llX), Header=%03d\n", | ||
758 | addr, mvi->slot_dma, tag); | ||
759 | dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); | ||
760 | /*mvs_cmd_hdr */ | ||
761 | mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), | ||
762 | (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); | ||
763 | /*1.command table area */ | ||
764 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); | ||
765 | mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); | ||
766 | /*2.open address frame area */ | ||
767 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); | ||
768 | mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, | ||
769 | (u32) slot->buf_dma + slot->cmd_size); | ||
770 | /*3.status buffer */ | ||
771 | mvs_hba_sb_dump(mvi, tag, proto); | ||
772 | /*4.PRD table */ | ||
773 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); | ||
774 | mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, | ||
775 | (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, | ||
776 | (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); | ||
777 | #endif | ||
778 | } | ||
779 | |||
780 | static void mvs_hba_cq_dump(struct mvs_info *mvi) | ||
781 | { | ||
782 | #if _MV_DUMP | ||
783 | u64 addr; | ||
784 | void __iomem *regs = mvi->regs; | ||
785 | struct pci_dev *pdev = mvi->pdev; | ||
786 | u32 entry = mvi->rx_cons + 1; | ||
787 | u32 rx_desc = le32_to_cpu(mvi->rx[entry]); | ||
788 | |||
789 | /*Completion Queue */ | ||
790 | addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); | ||
791 | dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n", | ||
792 | (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); | ||
793 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
794 | "Completion List Base Address=0x%llX (PA), " | ||
795 | "CQ_Entry=%04d, CQ_WP=0x%08X\n", | ||
796 | addr, entry - 1, mvi->rx[0]); | ||
797 | mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), | ||
798 | mvi->rx_dma + sizeof(u32) * entry); | ||
799 | #endif | ||
800 | } | ||
801 | |||
802 | static void mvs_hba_interrupt_enable(struct mvs_info *mvi) | ||
803 | { | ||
804 | void __iomem *regs = mvi->regs; | ||
805 | u32 tmp; | ||
806 | |||
807 | tmp = mr32(GBL_CTL); | ||
808 | |||
809 | mw32(GBL_CTL, tmp | INT_EN); | ||
810 | } | ||
811 | |||
812 | static void mvs_hba_interrupt_disable(struct mvs_info *mvi) | ||
813 | { | ||
814 | void __iomem *regs = mvi->regs; | ||
815 | u32 tmp; | ||
816 | |||
817 | tmp = mr32(GBL_CTL); | ||
818 | |||
819 | mw32(GBL_CTL, tmp & ~INT_EN); | ||
820 | } | ||
821 | |||
822 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); | ||
823 | |||
824 | /* move to PCI layer or libata core? */ | ||
825 | static int pci_go_64(struct pci_dev *pdev) | ||
826 | { | ||
827 | int rc; | ||
828 | |||
829 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
830 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
831 | if (rc) { | ||
832 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
833 | if (rc) { | ||
834 | dev_printk(KERN_ERR, &pdev->dev, | ||
835 | "64-bit DMA enable failed\n"); | ||
836 | return rc; | ||
837 | } | ||
838 | } | ||
839 | } else { | ||
840 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
841 | if (rc) { | ||
842 | dev_printk(KERN_ERR, &pdev->dev, | ||
843 | "32-bit DMA enable failed\n"); | ||
844 | return rc; | ||
845 | } | ||
846 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
847 | if (rc) { | ||
848 | dev_printk(KERN_ERR, &pdev->dev, | ||
849 | "32-bit consistent DMA enable failed\n"); | ||
850 | return rc; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | return rc; | ||
855 | } | ||
856 | |||
857 | static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) | ||
858 | { | ||
859 | mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1); | ||
860 | mvi->tags[mvi->tag_in] = tag; | ||
861 | } | ||
862 | |||
863 | static void mvs_tag_free(struct mvs_info *mvi, u32 tag) | ||
864 | { | ||
865 | mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1); | ||
866 | } | ||
867 | |||
868 | static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) | ||
869 | { | ||
870 | if (mvi->tag_out != mvi->tag_in) { | ||
871 | *tag_out = mvi->tags[mvi->tag_out]; | ||
872 | mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1); | ||
873 | return 0; | ||
874 | } | ||
875 | return -EBUSY; | ||
876 | } | ||
877 | |||
878 | static void mvs_tag_init(struct mvs_info *mvi) | ||
879 | { | ||
880 | int i; | ||
881 | for (i = 0; i < MVS_SLOTS; ++i) | ||
882 | mvi->tags[i] = i; | ||
883 | mvi->tag_out = 0; | ||
884 | mvi->tag_in = MVS_SLOTS - 1; | ||
885 | } | ||
886 | |||
887 | #ifndef MVS_DISABLE_NVRAM | ||
888 | static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) | ||
889 | { | ||
890 | int timeout = 1000; | ||
891 | |||
892 | if (addr & ~SPI_ADDR_MASK) | ||
893 | return -EINVAL; | ||
894 | |||
895 | writel(addr, regs + SPI_CMD); | ||
896 | writel(TWSI_RD, regs + SPI_CTL); | ||
897 | |||
898 | while (timeout-- > 0) { | ||
899 | if (readl(regs + SPI_CTL) & TWSI_RDY) { | ||
900 | *data = readl(regs + SPI_DATA); | ||
901 | return 0; | ||
902 | } | ||
903 | |||
904 | udelay(10); | ||
905 | } | ||
906 | |||
907 | return -EBUSY; | ||
908 | } | ||
909 | |||
910 | static int mvs_eep_read_buf(void __iomem *regs, u32 addr, | ||
911 | void *buf, u32 buflen) | ||
912 | { | ||
913 | u32 addr_end, tmp_addr, i, j; | ||
914 | u32 tmp = 0; | ||
915 | int rc; | ||
916 | u8 *tmp8, *buf8 = buf; | ||
917 | |||
918 | addr_end = addr + buflen; | ||
919 | tmp_addr = ALIGN(addr, 4); | ||
920 | if (addr > 0xff) | ||
921 | return -EINVAL; | ||
922 | |||
923 | j = addr & 0x3; | ||
924 | if (j) { | ||
925 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
926 | if (rc) | ||
927 | return rc; | ||
928 | |||
929 | tmp8 = (u8 *)&tmp; | ||
930 | for (i = j; i < 4; i++) | ||
931 | *buf8++ = tmp8[i]; | ||
932 | |||
933 | tmp_addr += 4; | ||
934 | } | ||
935 | |||
936 | for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { | ||
937 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
938 | if (rc) | ||
939 | return rc; | ||
940 | |||
941 | memcpy(buf8, &tmp, 4); | ||
942 | buf8 += 4; | ||
943 | } | ||
944 | |||
945 | if (tmp_addr < addr_end) { | ||
946 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
947 | if (rc) | ||
948 | return rc; | ||
949 | |||
950 | tmp8 = (u8 *)&tmp; | ||
951 | j = addr_end - tmp_addr; | ||
952 | for (i = 0; i < j; i++) | ||
953 | *buf8++ = tmp8[i]; | ||
954 | |||
955 | tmp_addr += 4; | ||
956 | } | ||
957 | |||
958 | return 0; | ||
959 | } | ||
960 | #endif | ||
961 | |||
962 | static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, | ||
963 | void *buf, u32 buflen) | ||
964 | { | ||
965 | #ifndef MVS_DISABLE_NVRAM | ||
966 | void __iomem *regs = mvi->regs; | ||
967 | int rc, i; | ||
968 | u32 sum; | ||
969 | u8 hdr[2], *tmp; | ||
970 | const char *msg; | ||
971 | |||
972 | rc = mvs_eep_read_buf(regs, addr, &hdr, 2); | ||
973 | if (rc) { | ||
974 | msg = "nvram hdr read failed"; | ||
975 | goto err_out; | ||
976 | } | ||
977 | rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); | ||
978 | if (rc) { | ||
979 | msg = "nvram read failed"; | ||
980 | goto err_out; | ||
981 | } | ||
982 | |||
983 | if (hdr[0] != 0x5A) { | ||
984 | /* entry id */ | ||
985 | msg = "invalid nvram entry id"; | ||
986 | rc = -ENOENT; | ||
987 | goto err_out; | ||
988 | } | ||
989 | |||
990 | tmp = buf; | ||
991 | sum = ((u32)hdr[0]) + ((u32)hdr[1]); | ||
992 | for (i = 0; i < buflen; i++) | ||
993 | sum += ((u32)tmp[i]); | ||
994 | |||
995 | if (sum) { | ||
996 | msg = "nvram checksum failure"; | ||
997 | rc = -EILSEQ; | ||
998 | goto err_out; | ||
999 | } | ||
1000 | |||
1001 | return 0; | ||
1002 | |||
1003 | err_out: | ||
1004 | dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); | ||
1005 | return rc; | ||
1006 | #else | ||
1007 | /* FIXME , For SAS target mode */ | ||
1008 | memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8); | ||
1009 | return 0; | ||
1010 | #endif | ||
1011 | } | ||
1012 | |||
1013 | static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) | ||
1014 | { | ||
1015 | struct mvs_phy *phy = &mvi->phy[i]; | ||
1016 | |||
1017 | if (!phy->phy_attached) | ||
1018 | return; | ||
1019 | |||
1020 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
1021 | struct sas_identify_frame *id; | ||
1022 | |||
1023 | id = (struct sas_identify_frame *)phy->frame_rcvd; | ||
1024 | id->dev_type = phy->identify.device_type; | ||
1025 | id->initiator_bits = SAS_PROTOCOL_ALL; | ||
1026 | id->target_bits = phy->identify.target_port_protocols; | ||
1027 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
1028 | /* TODO */ | ||
1029 | } | ||
1030 | mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; | ||
1031 | mvi->sas.notify_port_event(mvi->sas.sas_phy[i], | ||
1032 | PORTE_BYTES_DMAED); | ||
1033 | } | ||
1034 | |||
1035 | static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) | ||
1036 | { | ||
1037 | /* give the phy enabling interrupt event time to come in (1s | ||
1038 | * is empirically about all it takes) */ | ||
1039 | if (time < HZ) | ||
1040 | return 0; | ||
1041 | /* Wait for discovery to finish */ | ||
1042 | scsi_flush_work(shost); | ||
1043 | return 1; | ||
1044 | } | ||
1045 | |||
1046 | static void mvs_scan_start(struct Scsi_Host *shost) | ||
1047 | { | ||
1048 | int i; | ||
1049 | struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; | ||
1050 | |||
1051 | for (i = 0; i < mvi->chip->n_phy; ++i) { | ||
1052 | mvs_bytes_dmaed(mvi, i); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev) | ||
1057 | { | ||
1058 | int rc; | ||
1059 | |||
1060 | rc = sas_slave_alloc(scsi_dev); | ||
1061 | |||
1062 | return rc; | ||
1063 | } | ||
1064 | |||
1065 | static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events) | ||
1066 | { | ||
1067 | struct pci_dev *pdev = mvi->pdev; | ||
1068 | struct sas_ha_struct *sas_ha = &mvi->sas; | ||
1069 | struct mvs_phy *phy = &mvi->phy[port_no]; | ||
1070 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
1071 | |||
1072 | phy->irq_status = mvs_read_port_irq_stat(mvi, port_no); | ||
1073 | /* | ||
1074 | * events is port event now , | ||
1075 | * we need check the interrupt status which belongs to per port. | ||
1076 | */ | ||
1077 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1078 | "Port %d Event = %X\n", | ||
1079 | port_no, phy->irq_status); | ||
1080 | |||
1081 | if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { | ||
1082 | if (!mvs_is_phy_ready(mvi, port_no)) { | ||
1083 | sas_phy_disconnected(sas_phy); | ||
1084 | sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); | ||
1085 | } else | ||
1086 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); | ||
1087 | } | ||
1088 | if (!(phy->irq_status & PHYEV_DEC_ERR)) { | ||
1089 | if (phy->irq_status & PHYEV_COMWAKE) { | ||
1090 | u32 tmp = mvs_read_port_irq_mask(mvi, port_no); | ||
1091 | mvs_write_port_irq_mask(mvi, port_no, | ||
1092 | tmp | PHYEV_SIG_FIS); | ||
1093 | } | ||
1094 | if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { | ||
1095 | phy->phy_status = mvs_is_phy_ready(mvi, port_no); | ||
1096 | if (phy->phy_status) { | ||
1097 | mvs_detect_porttype(mvi, port_no); | ||
1098 | |||
1099 | if (phy->phy_type & PORT_TYPE_SATA) { | ||
1100 | u32 tmp = mvs_read_port_irq_mask(mvi, | ||
1101 | port_no); | ||
1102 | tmp &= ~PHYEV_SIG_FIS; | ||
1103 | mvs_write_port_irq_mask(mvi, | ||
1104 | port_no, tmp); | ||
1105 | } | ||
1106 | |||
1107 | mvs_update_phyinfo(mvi, port_no, 0); | ||
1108 | sas_ha->notify_phy_event(sas_phy, | ||
1109 | PHYE_OOB_DONE); | ||
1110 | mvs_bytes_dmaed(mvi, port_no); | ||
1111 | } else { | ||
1112 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1113 | "plugin interrupt but phy is gone\n"); | ||
1114 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, | ||
1115 | NULL); | ||
1116 | } | ||
1117 | } else if (phy->irq_status & PHYEV_BROAD_CH) | ||
1118 | sas_ha->notify_port_event(sas_phy, | ||
1119 | PORTE_BROADCAST_RCVD); | ||
1120 | } | ||
1121 | mvs_write_port_irq_stat(mvi, port_no, phy->irq_status); | ||
1122 | } | ||
1123 | |||
1124 | static void mvs_int_sata(struct mvs_info *mvi) | ||
1125 | { | ||
1126 | /* FIXME */ | ||
1127 | } | ||
1128 | |||
1129 | static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, | ||
1130 | struct mvs_slot_info *slot, u32 slot_idx) | ||
1131 | { | ||
1132 | if (!sas_protocol_ata(task->task_proto)) | ||
1133 | if (slot->n_elem) | ||
1134 | pci_unmap_sg(mvi->pdev, task->scatter, | ||
1135 | slot->n_elem, task->data_dir); | ||
1136 | |||
1137 | switch (task->task_proto) { | ||
1138 | case SAS_PROTOCOL_SMP: | ||
1139 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, | ||
1140 | PCI_DMA_FROMDEVICE); | ||
1141 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, | ||
1142 | PCI_DMA_TODEVICE); | ||
1143 | break; | ||
1144 | |||
1145 | case SAS_PROTOCOL_SATA: | ||
1146 | case SAS_PROTOCOL_STP: | ||
1147 | case SAS_PROTOCOL_SSP: | ||
1148 | default: | ||
1149 | /* do nothing */ | ||
1150 | break; | ||
1151 | } | ||
1152 | |||
1153 | slot->task = NULL; | ||
1154 | mvs_tag_clear(mvi, slot_idx); | ||
1155 | } | ||
1156 | |||
1157 | static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, | ||
1158 | u32 slot_idx) | ||
1159 | { | ||
1160 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | ||
1161 | u64 err_dw0 = *(u32 *) slot->response; | ||
1162 | void __iomem *regs = mvi->regs; | ||
1163 | u32 tmp; | ||
1164 | |||
1165 | if (err_dw0 & CMD_ISS_STPD) | ||
1166 | if (sas_protocol_ata(task->task_proto)) { | ||
1167 | tmp = mr32(INT_STAT_SRS); | ||
1168 | mw32(INT_STAT_SRS, tmp & 0xFFFF); | ||
1169 | } | ||
1170 | |||
1171 | mvs_hba_sb_dump(mvi, slot_idx, task->task_proto); | ||
1172 | } | ||
1173 | |||
1174 | static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | ||
1175 | { | ||
1176 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | ||
1177 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | ||
1178 | struct sas_task *task = slot->task; | ||
1179 | struct task_status_struct *tstat = &task->task_status; | ||
1180 | struct mvs_port *port = &mvi->port[task->dev->port->id]; | ||
1181 | bool aborted; | ||
1182 | void *to; | ||
1183 | |||
1184 | spin_lock(&task->task_state_lock); | ||
1185 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; | ||
1186 | if (!aborted) { | ||
1187 | task->task_state_flags &= | ||
1188 | ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); | ||
1189 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1190 | } | ||
1191 | spin_unlock(&task->task_state_lock); | ||
1192 | |||
1193 | if (aborted) | ||
1194 | return -1; | ||
1195 | |||
1196 | memset(tstat, 0, sizeof(*tstat)); | ||
1197 | tstat->resp = SAS_TASK_COMPLETE; | ||
1198 | |||
1199 | |||
1200 | if (unlikely(!port->port_attached)) { | ||
1201 | tstat->stat = SAS_PHY_DOWN; | ||
1202 | goto out; | ||
1203 | } | ||
1204 | |||
1205 | /* error info record present */ | ||
1206 | if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) { | ||
1207 | tstat->stat = SAM_CHECK_COND; | ||
1208 | mvs_slot_err(mvi, task, slot_idx); | ||
1209 | goto out; | ||
1210 | } | ||
1211 | |||
1212 | switch (task->task_proto) { | ||
1213 | case SAS_PROTOCOL_SSP: | ||
1214 | /* hw says status == 0, datapres == 0 */ | ||
1215 | if (rx_desc & RXQ_GOOD) { | ||
1216 | tstat->stat = SAM_GOOD; | ||
1217 | tstat->resp = SAS_TASK_COMPLETE; | ||
1218 | } | ||
1219 | /* response frame present */ | ||
1220 | else if (rx_desc & RXQ_RSP) { | ||
1221 | struct ssp_response_iu *iu = | ||
1222 | slot->response + sizeof(struct mvs_err_info); | ||
1223 | sas_ssp_task_response(&mvi->pdev->dev, task, iu); | ||
1224 | } | ||
1225 | |||
1226 | /* should never happen? */ | ||
1227 | else | ||
1228 | tstat->stat = SAM_CHECK_COND; | ||
1229 | break; | ||
1230 | |||
1231 | case SAS_PROTOCOL_SMP: { | ||
1232 | struct scatterlist *sg_resp = &task->smp_task.smp_resp; | ||
1233 | tstat->stat = SAM_GOOD; | ||
1234 | to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); | ||
1235 | memcpy(to + sg_resp->offset, | ||
1236 | slot->response + sizeof(struct mvs_err_info), | ||
1237 | sg_dma_len(sg_resp)); | ||
1238 | kunmap_atomic(to, KM_IRQ0); | ||
1239 | break; | ||
1240 | } | ||
1241 | |||
1242 | case SAS_PROTOCOL_SATA: | ||
1243 | case SAS_PROTOCOL_STP: | ||
1244 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { | ||
1245 | struct ata_task_resp *resp = | ||
1246 | (struct ata_task_resp *)tstat->buf; | ||
1247 | |||
1248 | if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) == | ||
1249 | RXQ_DONE) | ||
1250 | tstat->stat = SAM_GOOD; | ||
1251 | else | ||
1252 | tstat->stat = SAM_CHECK_COND; | ||
1253 | |||
1254 | resp->frame_len = sizeof(struct dev_to_host_fis); | ||
1255 | memcpy(&resp->ending_fis[0], | ||
1256 | SATA_RECEIVED_D2H_FIS(port->taskfileset), | ||
1257 | sizeof(struct dev_to_host_fis)); | ||
1258 | if (resp->ending_fis[2] & ATA_ERR) | ||
1259 | mvs_hexdump(16, resp->ending_fis, 0); | ||
1260 | break; | ||
1261 | } | ||
1262 | |||
1263 | default: | ||
1264 | tstat->stat = SAM_CHECK_COND; | ||
1265 | break; | ||
1266 | } | ||
1267 | |||
1268 | out: | ||
1269 | mvs_slot_free(mvi, task, slot, slot_idx); | ||
1270 | task->task_done(task); | ||
1271 | return tstat->stat; | ||
1272 | } | ||
1273 | |||
1274 | static void mvs_int_full(struct mvs_info *mvi) | ||
1275 | { | ||
1276 | void __iomem *regs = mvi->regs; | ||
1277 | u32 tmp, stat; | ||
1278 | int i; | ||
1279 | |||
1280 | stat = mr32(INT_STAT); | ||
1281 | |||
1282 | mvs_int_rx(mvi, false); | ||
1283 | |||
1284 | for (i = 0; i < MVS_MAX_PORTS; i++) { | ||
1285 | tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); | ||
1286 | if (tmp) | ||
1287 | mvs_int_port(mvi, i, tmp); | ||
1288 | } | ||
1289 | |||
1290 | if (stat & CINT_SRS) | ||
1291 | mvs_int_sata(mvi); | ||
1292 | |||
1293 | mw32(INT_STAT, stat); | ||
1294 | } | ||
1295 | |||
1296 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | ||
1297 | { | ||
1298 | void __iomem *regs = mvi->regs; | ||
1299 | u32 rx_prod_idx, rx_desc; | ||
1300 | bool attn = false; | ||
1301 | struct pci_dev *pdev = mvi->pdev; | ||
1302 | |||
1303 | /* the first dword in the RX ring is special: it contains | ||
1304 | * a mirror of the hardware's RX producer index, so that | ||
1305 | * we don't have to stall the CPU reading that register. | ||
1306 | * The actual RX ring is offset by one dword, due to this. | ||
1307 | */ | ||
1308 | rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; | ||
1309 | if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */ | ||
1310 | mvi->rx_cons = 0xfff; | ||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | /* The CMPL_Q may come late, read from register and try again | ||
1315 | * note: if coalescing is enabled, | ||
1316 | * it will need to read from register every time for sure | ||
1317 | */ | ||
1318 | if (mvi->rx_cons == rx_prod_idx) | ||
1319 | return 0; | ||
1320 | |||
1321 | if (mvi->rx_cons == 0xfff) | ||
1322 | mvi->rx_cons = MVS_RX_RING_SZ - 1; | ||
1323 | |||
1324 | while (mvi->rx_cons != rx_prod_idx) { | ||
1325 | |||
1326 | /* increment our internal RX consumer pointer */ | ||
1327 | mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1); | ||
1328 | |||
1329 | rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]); | ||
1330 | |||
1331 | mvs_hba_cq_dump(mvi); | ||
1332 | |||
1333 | if (unlikely(rx_desc & RXQ_DONE)) | ||
1334 | mvs_slot_complete(mvi, rx_desc); | ||
1335 | if (rx_desc & RXQ_ATTN) { | ||
1336 | attn = true; | ||
1337 | dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", | ||
1338 | rx_desc); | ||
1339 | } else if (rx_desc & RXQ_ERR) { | ||
1340 | dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", | ||
1341 | rx_desc); | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | if (attn && self_clear) | ||
1346 | mvs_int_full(mvi); | ||
1347 | |||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static irqreturn_t mvs_interrupt(int irq, void *opaque) | ||
1352 | { | ||
1353 | struct mvs_info *mvi = opaque; | ||
1354 | void __iomem *regs = mvi->regs; | ||
1355 | u32 stat; | ||
1356 | |||
1357 | stat = mr32(GBL_INT_STAT); | ||
1358 | |||
1359 | /* clear CMD_CMPLT ASAP */ | ||
1360 | mw32_f(INT_STAT, CINT_DONE); | ||
1361 | |||
1362 | if (stat == 0 || stat == 0xffffffff) | ||
1363 | return IRQ_NONE; | ||
1364 | |||
1365 | spin_lock(&mvi->lock); | ||
1366 | |||
1367 | mvs_int_full(mvi); | ||
1368 | |||
1369 | spin_unlock(&mvi->lock); | ||
1370 | |||
1371 | return IRQ_HANDLED; | ||
1372 | } | ||
1373 | |||
1374 | #ifndef MVS_DISABLE_MSI | ||
1375 | static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) | ||
1376 | { | ||
1377 | struct mvs_info *mvi = opaque; | ||
1378 | |||
1379 | spin_lock(&mvi->lock); | ||
1380 | |||
1381 | mvs_int_rx(mvi, true); | ||
1382 | |||
1383 | spin_unlock(&mvi->lock); | ||
1384 | |||
1385 | return IRQ_HANDLED; | ||
1386 | } | ||
1387 | #endif | ||
1388 | |||
1389 | struct mvs_task_exec_info { | ||
1390 | struct sas_task *task; | ||
1391 | struct mvs_cmd_hdr *hdr; | ||
1392 | struct mvs_port *port; | ||
1393 | u32 tag; | ||
1394 | int n_elem; | ||
1395 | }; | ||
1396 | |||
1397 | static int mvs_task_prep_smp(struct mvs_info *mvi, | ||
1398 | struct mvs_task_exec_info *tei) | ||
1399 | { | ||
1400 | int elem, rc, i; | ||
1401 | struct sas_task *task = tei->task; | ||
1402 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1403 | struct scatterlist *sg_req, *sg_resp; | ||
1404 | u32 req_len, resp_len, tag = tei->tag; | ||
1405 | void *buf_tmp; | ||
1406 | u8 *buf_oaf; | ||
1407 | dma_addr_t buf_tmp_dma; | ||
1408 | struct mvs_prd *buf_prd; | ||
1409 | struct scatterlist *sg; | ||
1410 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
1411 | struct asd_sas_port *sas_port = task->dev->port; | ||
1412 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
1413 | #if _MV_DUMP | ||
1414 | u8 *buf_cmd; | ||
1415 | void *from; | ||
1416 | #endif | ||
1417 | /* | ||
1418 | * DMA-map SMP request, response buffers | ||
1419 | */ | ||
1420 | sg_req = &task->smp_task.smp_req; | ||
1421 | elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); | ||
1422 | if (!elem) | ||
1423 | return -ENOMEM; | ||
1424 | req_len = sg_dma_len(sg_req); | ||
1425 | |||
1426 | sg_resp = &task->smp_task.smp_resp; | ||
1427 | elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); | ||
1428 | if (!elem) { | ||
1429 | rc = -ENOMEM; | ||
1430 | goto err_out; | ||
1431 | } | ||
1432 | resp_len = sg_dma_len(sg_resp); | ||
1433 | |||
1434 | /* must be in dwords */ | ||
1435 | if ((req_len & 0x3) || (resp_len & 0x3)) { | ||
1436 | rc = -EINVAL; | ||
1437 | goto err_out_2; | ||
1438 | } | ||
1439 | |||
1440 | /* | ||
1441 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1442 | */ | ||
1443 | |||
1444 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ | ||
1445 | buf_tmp = slot->buf; | ||
1446 | buf_tmp_dma = slot->buf_dma; | ||
1447 | |||
1448 | #if _MV_DUMP | ||
1449 | buf_cmd = buf_tmp; | ||
1450 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1451 | buf_tmp += req_len; | ||
1452 | buf_tmp_dma += req_len; | ||
1453 | slot->cmd_size = req_len; | ||
1454 | #else | ||
1455 | hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); | ||
1456 | #endif | ||
1457 | |||
1458 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1459 | buf_oaf = buf_tmp; | ||
1460 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1461 | |||
1462 | buf_tmp += MVS_OAF_SZ; | ||
1463 | buf_tmp_dma += MVS_OAF_SZ; | ||
1464 | |||
1465 | /* region 3: PRD table ********************************************* */ | ||
1466 | buf_prd = buf_tmp; | ||
1467 | if (tei->n_elem) | ||
1468 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1469 | else | ||
1470 | hdr->prd_tbl = 0; | ||
1471 | |||
1472 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1473 | buf_tmp += i; | ||
1474 | buf_tmp_dma += i; | ||
1475 | |||
1476 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1477 | slot->response = buf_tmp; | ||
1478 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1479 | |||
1480 | /* | ||
1481 | * Fill in TX ring and command slot header | ||
1482 | */ | ||
1483 | slot->tx = mvi->tx_prod; | ||
1484 | mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | | ||
1485 | TXQ_MODE_I | tag | | ||
1486 | (sas_port->phy_mask << TXQ_PHY_SHIFT)); | ||
1487 | |||
1488 | hdr->flags |= flags; | ||
1489 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); | ||
1490 | hdr->tags = cpu_to_le32(tag); | ||
1491 | hdr->data_len = 0; | ||
1492 | |||
1493 | /* generate open address frame hdr (first 12 bytes) */ | ||
1494 | buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ | ||
1495 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1496 | *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ | ||
1497 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1498 | |||
1499 | /* fill in PRD (scatter/gather) table, if any */ | ||
1500 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1501 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1502 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1503 | buf_prd++; | ||
1504 | } | ||
1505 | |||
1506 | #if _MV_DUMP | ||
1507 | /* copy cmd table */ | ||
1508 | from = kmap_atomic(sg_page(sg_req), KM_IRQ0); | ||
1509 | memcpy(buf_cmd, from + sg_req->offset, req_len); | ||
1510 | kunmap_atomic(from, KM_IRQ0); | ||
1511 | #endif | ||
1512 | return 0; | ||
1513 | |||
1514 | err_out_2: | ||
1515 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, | ||
1516 | PCI_DMA_FROMDEVICE); | ||
1517 | err_out: | ||
1518 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, | ||
1519 | PCI_DMA_TODEVICE); | ||
1520 | return rc; | ||
1521 | } | ||
1522 | |||
1523 | static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) | ||
1524 | { | ||
1525 | void __iomem *regs = mvi->regs; | ||
1526 | u32 tmp, offs; | ||
1527 | u8 *tfs = &port->taskfileset; | ||
1528 | |||
1529 | if (*tfs == MVS_ID_NOT_MAPPED) | ||
1530 | return; | ||
1531 | |||
1532 | offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); | ||
1533 | if (*tfs < 16) { | ||
1534 | tmp = mr32(PCS); | ||
1535 | mw32(PCS, tmp & ~offs); | ||
1536 | } else { | ||
1537 | tmp = mr32(CTL); | ||
1538 | mw32(CTL, tmp & ~offs); | ||
1539 | } | ||
1540 | |||
1541 | tmp = mr32(INT_STAT_SRS) & (1U << *tfs); | ||
1542 | if (tmp) | ||
1543 | mw32(INT_STAT_SRS, tmp); | ||
1544 | |||
1545 | *tfs = MVS_ID_NOT_MAPPED; | ||
1546 | } | ||
1547 | |||
1548 | static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) | ||
1549 | { | ||
1550 | int i; | ||
1551 | u32 tmp, offs; | ||
1552 | void __iomem *regs = mvi->regs; | ||
1553 | |||
1554 | if (port->taskfileset != MVS_ID_NOT_MAPPED) | ||
1555 | return 0; | ||
1556 | |||
1557 | tmp = mr32(PCS); | ||
1558 | |||
1559 | for (i = 0; i < mvi->chip->srs_sz; i++) { | ||
1560 | if (i == 16) | ||
1561 | tmp = mr32(CTL); | ||
1562 | offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); | ||
1563 | if (!(tmp & offs)) { | ||
1564 | port->taskfileset = i; | ||
1565 | |||
1566 | if (i < 16) | ||
1567 | mw32(PCS, tmp | offs); | ||
1568 | else | ||
1569 | mw32(CTL, tmp | offs); | ||
1570 | tmp = mr32(INT_STAT_SRS) & (1U << i); | ||
1571 | if (tmp) | ||
1572 | mw32(INT_STAT_SRS, tmp); | ||
1573 | return 0; | ||
1574 | } | ||
1575 | } | ||
1576 | return MVS_ID_NOT_MAPPED; | ||
1577 | } | ||
1578 | |||
1579 | static u32 mvs_get_ncq_tag(struct sas_task *task) | ||
1580 | { | ||
1581 | u32 tag = 0; | ||
1582 | struct ata_queued_cmd *qc = task->uldd_task; | ||
1583 | |||
1584 | if (qc) | ||
1585 | tag = qc->tag; | ||
1586 | |||
1587 | return tag; | ||
1588 | } | ||
1589 | |||
1590 | static int mvs_task_prep_ata(struct mvs_info *mvi, | ||
1591 | struct mvs_task_exec_info *tei) | ||
1592 | { | ||
1593 | struct sas_task *task = tei->task; | ||
1594 | struct domain_device *dev = task->dev; | ||
1595 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1596 | struct asd_sas_port *sas_port = dev->port; | ||
1597 | struct mvs_slot_info *slot; | ||
1598 | struct scatterlist *sg; | ||
1599 | struct mvs_prd *buf_prd; | ||
1600 | struct mvs_port *port = tei->port; | ||
1601 | u32 tag = tei->tag; | ||
1602 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
1603 | void *buf_tmp; | ||
1604 | u8 *buf_cmd, *buf_oaf; | ||
1605 | dma_addr_t buf_tmp_dma; | ||
1606 | u32 i, req_len, resp_len; | ||
1607 | const u32 max_resp_len = SB_RFB_MAX; | ||
1608 | |||
1609 | if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) | ||
1610 | return -EBUSY; | ||
1611 | |||
1612 | slot = &mvi->slot_info[tag]; | ||
1613 | slot->tx = mvi->tx_prod; | ||
1614 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | ||
1615 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | | ||
1616 | (sas_port->phy_mask << TXQ_PHY_SHIFT) | | ||
1617 | (port->taskfileset << TXQ_SRS_SHIFT)); | ||
1618 | |||
1619 | if (task->ata_task.use_ncq) | ||
1620 | flags |= MCH_FPDMA; | ||
1621 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { | ||
1622 | if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) | ||
1623 | flags |= MCH_ATAPI; | ||
1624 | } | ||
1625 | |||
1626 | /* FIXME: fill in port multiplier number */ | ||
1627 | |||
1628 | hdr->flags = cpu_to_le32(flags); | ||
1629 | |||
1630 | /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ | ||
1631 | if (task->ata_task.use_ncq) { | ||
1632 | hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task)); | ||
1633 | /*Fill in task file */ | ||
1634 | task->ata_task.fis.sector_count = hdr->tags << 3; | ||
1635 | } else | ||
1636 | hdr->tags = cpu_to_le32(tag); | ||
1637 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | ||
1638 | |||
1639 | /* | ||
1640 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1641 | */ | ||
1642 | |||
1643 | /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ | ||
1644 | buf_cmd = buf_tmp = slot->buf; | ||
1645 | buf_tmp_dma = slot->buf_dma; | ||
1646 | |||
1647 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1648 | |||
1649 | buf_tmp += MVS_ATA_CMD_SZ; | ||
1650 | buf_tmp_dma += MVS_ATA_CMD_SZ; | ||
1651 | #if _MV_DUMP | ||
1652 | slot->cmd_size = MVS_ATA_CMD_SZ; | ||
1653 | #endif | ||
1654 | |||
1655 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1656 | /* used for STP. unused for SATA? */ | ||
1657 | buf_oaf = buf_tmp; | ||
1658 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1659 | |||
1660 | buf_tmp += MVS_OAF_SZ; | ||
1661 | buf_tmp_dma += MVS_OAF_SZ; | ||
1662 | |||
1663 | /* region 3: PRD table ********************************************* */ | ||
1664 | buf_prd = buf_tmp; | ||
1665 | if (tei->n_elem) | ||
1666 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1667 | else | ||
1668 | hdr->prd_tbl = 0; | ||
1669 | |||
1670 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1671 | buf_tmp += i; | ||
1672 | buf_tmp_dma += i; | ||
1673 | |||
1674 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1675 | /* FIXME: probably unused, for SATA. kept here just in case | ||
1676 | * we get a STP/SATA error information record | ||
1677 | */ | ||
1678 | slot->response = buf_tmp; | ||
1679 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1680 | |||
1681 | req_len = sizeof(struct host_to_dev_fis); | ||
1682 | resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - | ||
1683 | sizeof(struct mvs_err_info) - i; | ||
1684 | |||
1685 | /* request, response lengths */ | ||
1686 | resp_len = min(resp_len, max_resp_len); | ||
1687 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | ||
1688 | |||
1689 | task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ | ||
1690 | /* fill in command FIS and ATAPI CDB */ | ||
1691 | memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); | ||
1692 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) | ||
1693 | memcpy(buf_cmd + STP_ATAPI_CMD, | ||
1694 | task->ata_task.atapi_packet, 16); | ||
1695 | |||
1696 | /* generate open address frame hdr (first 12 bytes) */ | ||
1697 | buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ | ||
1698 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1699 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | ||
1700 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1701 | |||
1702 | /* fill in PRD (scatter/gather) table, if any */ | ||
1703 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1704 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1705 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1706 | buf_prd++; | ||
1707 | } | ||
1708 | |||
1709 | return 0; | ||
1710 | } | ||
1711 | |||
1712 | static int mvs_task_prep_ssp(struct mvs_info *mvi, | ||
1713 | struct mvs_task_exec_info *tei) | ||
1714 | { | ||
1715 | struct sas_task *task = tei->task; | ||
1716 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1717 | struct mvs_port *port = tei->port; | ||
1718 | struct mvs_slot_info *slot; | ||
1719 | struct scatterlist *sg; | ||
1720 | struct mvs_prd *buf_prd; | ||
1721 | struct ssp_frame_hdr *ssp_hdr; | ||
1722 | void *buf_tmp; | ||
1723 | u8 *buf_cmd, *buf_oaf, fburst = 0; | ||
1724 | dma_addr_t buf_tmp_dma; | ||
1725 | u32 flags; | ||
1726 | u32 resp_len, req_len, i, tag = tei->tag; | ||
1727 | const u32 max_resp_len = SB_RFB_MAX; | ||
1728 | |||
1729 | slot = &mvi->slot_info[tag]; | ||
1730 | |||
1731 | slot->tx = mvi->tx_prod; | ||
1732 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | ||
1733 | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | | ||
1734 | (port->wide_port_phymap << TXQ_PHY_SHIFT)); | ||
1735 | |||
1736 | flags = MCH_RETRY; | ||
1737 | if (task->ssp_task.enable_first_burst) { | ||
1738 | flags |= MCH_FBURST; | ||
1739 | fburst = (1 << 7); | ||
1740 | } | ||
1741 | hdr->flags = cpu_to_le32(flags | | ||
1742 | (tei->n_elem << MCH_PRD_LEN_SHIFT) | | ||
1743 | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); | ||
1744 | |||
1745 | hdr->tags = cpu_to_le32(tag); | ||
1746 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | ||
1747 | |||
1748 | /* | ||
1749 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1750 | */ | ||
1751 | |||
1752 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ | ||
1753 | buf_cmd = buf_tmp = slot->buf; | ||
1754 | buf_tmp_dma = slot->buf_dma; | ||
1755 | |||
1756 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1757 | |||
1758 | buf_tmp += MVS_SSP_CMD_SZ; | ||
1759 | buf_tmp_dma += MVS_SSP_CMD_SZ; | ||
1760 | #if _MV_DUMP | ||
1761 | slot->cmd_size = MVS_SSP_CMD_SZ; | ||
1762 | #endif | ||
1763 | |||
1764 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1765 | buf_oaf = buf_tmp; | ||
1766 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1767 | |||
1768 | buf_tmp += MVS_OAF_SZ; | ||
1769 | buf_tmp_dma += MVS_OAF_SZ; | ||
1770 | |||
1771 | /* region 3: PRD table ********************************************* */ | ||
1772 | buf_prd = buf_tmp; | ||
1773 | if (tei->n_elem) | ||
1774 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1775 | else | ||
1776 | hdr->prd_tbl = 0; | ||
1777 | |||
1778 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1779 | buf_tmp += i; | ||
1780 | buf_tmp_dma += i; | ||
1781 | |||
1782 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1783 | slot->response = buf_tmp; | ||
1784 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1785 | |||
1786 | resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - | ||
1787 | sizeof(struct mvs_err_info) - i; | ||
1788 | resp_len = min(resp_len, max_resp_len); | ||
1789 | |||
1790 | req_len = sizeof(struct ssp_frame_hdr) + 28; | ||
1791 | |||
1792 | /* request, response lengths */ | ||
1793 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | ||
1794 | |||
1795 | /* generate open address frame hdr (first 12 bytes) */ | ||
1796 | buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ | ||
1797 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1798 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | ||
1799 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1800 | |||
1801 | /* fill in SSP frame header (Command Table.SSP frame header) */ | ||
1802 | ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; | ||
1803 | ssp_hdr->frame_type = SSP_COMMAND; | ||
1804 | memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, | ||
1805 | HASHED_SAS_ADDR_SIZE); | ||
1806 | memcpy(ssp_hdr->hashed_src_addr, | ||
1807 | task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); | ||
1808 | ssp_hdr->tag = cpu_to_be16(tag); | ||
1809 | |||
1810 | /* fill in command frame IU */ | ||
1811 | buf_cmd += sizeof(*ssp_hdr); | ||
1812 | memcpy(buf_cmd, &task->ssp_task.LUN, 8); | ||
1813 | buf_cmd[9] = fburst | task->ssp_task.task_attr | | ||
1814 | (task->ssp_task.task_prio << 3); | ||
1815 | memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); | ||
1816 | |||
1817 | /* fill in PRD (scatter/gather) table, if any */ | ||
1818 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1819 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1820 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1821 | buf_prd++; | ||
1822 | } | ||
1823 | |||
1824 | return 0; | ||
1825 | } | ||
1826 | |||
1827 | static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | ||
1828 | { | ||
1829 | struct domain_device *dev = task->dev; | ||
1830 | struct mvs_info *mvi = dev->port->ha->lldd_ha; | ||
1831 | struct pci_dev *pdev = mvi->pdev; | ||
1832 | void __iomem *regs = mvi->regs; | ||
1833 | struct mvs_task_exec_info tei; | ||
1834 | struct sas_task *t = task; | ||
1835 | u32 tag = 0xdeadbeef, rc, n_elem = 0; | ||
1836 | unsigned long flags; | ||
1837 | u32 n = num, pass = 0; | ||
1838 | |||
1839 | spin_lock_irqsave(&mvi->lock, flags); | ||
1840 | |||
1841 | do { | ||
1842 | tei.port = &mvi->port[dev->port->id]; | ||
1843 | |||
1844 | if (!tei.port->port_attached) { | ||
1845 | struct task_status_struct *ts = &t->task_status; | ||
1846 | ts->stat = SAS_PHY_DOWN; | ||
1847 | t->task_done(t); | ||
1848 | rc = 0; | ||
1849 | goto exec_exit; | ||
1850 | } | ||
1851 | if (!sas_protocol_ata(t->task_proto)) { | ||
1852 | if (t->num_scatter) { | ||
1853 | n_elem = pci_map_sg(mvi->pdev, t->scatter, | ||
1854 | t->num_scatter, | ||
1855 | t->data_dir); | ||
1856 | if (!n_elem) { | ||
1857 | rc = -ENOMEM; | ||
1858 | goto err_out; | ||
1859 | } | ||
1860 | } | ||
1861 | } else { | ||
1862 | n_elem = t->num_scatter; | ||
1863 | } | ||
1864 | |||
1865 | rc = mvs_tag_alloc(mvi, &tag); | ||
1866 | if (rc) | ||
1867 | goto err_out; | ||
1868 | |||
1869 | mvi->slot_info[tag].task = t; | ||
1870 | mvi->slot_info[tag].n_elem = n_elem; | ||
1871 | memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ); | ||
1872 | tei.task = t; | ||
1873 | tei.hdr = &mvi->slot[tag]; | ||
1874 | tei.tag = tag; | ||
1875 | tei.n_elem = n_elem; | ||
1876 | |||
1877 | switch (t->task_proto) { | ||
1878 | case SAS_PROTOCOL_SMP: | ||
1879 | rc = mvs_task_prep_smp(mvi, &tei); | ||
1880 | break; | ||
1881 | case SAS_PROTOCOL_SSP: | ||
1882 | rc = mvs_task_prep_ssp(mvi, &tei); | ||
1883 | break; | ||
1884 | case SAS_PROTOCOL_SATA: | ||
1885 | case SAS_PROTOCOL_STP: | ||
1886 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
1887 | rc = mvs_task_prep_ata(mvi, &tei); | ||
1888 | break; | ||
1889 | default: | ||
1890 | dev_printk(KERN_ERR, &pdev->dev, | ||
1891 | "unknown sas_task proto: 0x%x\n", | ||
1892 | t->task_proto); | ||
1893 | rc = -EINVAL; | ||
1894 | break; | ||
1895 | } | ||
1896 | |||
1897 | if (rc) | ||
1898 | goto err_out_tag; | ||
1899 | |||
1900 | /* TODO: select normal or high priority */ | ||
1901 | |||
1902 | spin_lock(&t->task_state_lock); | ||
1903 | t->task_state_flags |= SAS_TASK_AT_INITIATOR; | ||
1904 | spin_unlock(&t->task_state_lock); | ||
1905 | |||
1906 | if (n == 1) { | ||
1907 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1908 | mw32(TX_PROD_IDX, mvi->tx_prod); | ||
1909 | } | ||
1910 | mvs_hba_memory_dump(mvi, tag, t->task_proto); | ||
1911 | |||
1912 | ++pass; | ||
1913 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); | ||
1914 | |||
1915 | if (n == 1) | ||
1916 | break; | ||
1917 | |||
1918 | t = list_entry(t->list.next, struct sas_task, list); | ||
1919 | } while (--n); | ||
1920 | |||
1921 | return 0; | ||
1922 | |||
1923 | err_out_tag: | ||
1924 | mvs_tag_free(mvi, tag); | ||
1925 | err_out: | ||
1926 | dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); | ||
1927 | if (!sas_protocol_ata(t->task_proto)) | ||
1928 | if (n_elem) | ||
1929 | pci_unmap_sg(mvi->pdev, t->scatter, n_elem, | ||
1930 | t->data_dir); | ||
1931 | exec_exit: | ||
1932 | if (pass) | ||
1933 | mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); | ||
1934 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1935 | return rc; | ||
1936 | } | ||
1937 | |||
1938 | static int mvs_task_abort(struct sas_task *task) | ||
1939 | { | ||
1940 | int rc = 1; | ||
1941 | unsigned long flags; | ||
1942 | struct mvs_info *mvi = task->dev->port->ha->lldd_ha; | ||
1943 | struct pci_dev *pdev = mvi->pdev; | ||
1944 | |||
1945 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1946 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | ||
1947 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1948 | goto out_done; | ||
1949 | } | ||
1950 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1951 | |||
1952 | /*FIXME*/ | ||
1953 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1954 | |||
1955 | switch (task->task_proto) { | ||
1956 | case SAS_PROTOCOL_SMP: | ||
1957 | dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! "); | ||
1958 | break; | ||
1959 | case SAS_PROTOCOL_SSP: | ||
1960 | dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! "); | ||
1961 | break; | ||
1962 | case SAS_PROTOCOL_SATA: | ||
1963 | case SAS_PROTOCOL_STP: | ||
1964 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ | ||
1965 | dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! " | ||
1966 | "Dump D2H FIS: \n"); | ||
1967 | mvs_hexdump(sizeof(struct host_to_dev_fis), | ||
1968 | (void *)&task->ata_task.fis, 0); | ||
1969 | dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); | ||
1970 | mvs_hexdump(16, task->ata_task.atapi_packet, 0); | ||
1971 | break; | ||
1972 | } | ||
1973 | default: | ||
1974 | break; | ||
1975 | } | ||
1976 | out_done: | ||
1977 | return rc; | ||
1978 | } | ||
1979 | |||
1980 | static void mvs_free(struct mvs_info *mvi) | ||
1981 | { | ||
1982 | int i; | ||
1983 | |||
1984 | if (!mvi) | ||
1985 | return; | ||
1986 | |||
1987 | for (i = 0; i < MVS_SLOTS; i++) { | ||
1988 | struct mvs_slot_info *slot = &mvi->slot_info[i]; | ||
1989 | |||
1990 | if (slot->buf) | ||
1991 | dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, | ||
1992 | slot->buf, slot->buf_dma); | ||
1993 | } | ||
1994 | |||
1995 | if (mvi->tx) | ||
1996 | dma_free_coherent(&mvi->pdev->dev, | ||
1997 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, | ||
1998 | mvi->tx, mvi->tx_dma); | ||
1999 | if (mvi->rx_fis) | ||
2000 | dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, | ||
2001 | mvi->rx_fis, mvi->rx_fis_dma); | ||
2002 | if (mvi->rx) | ||
2003 | dma_free_coherent(&mvi->pdev->dev, | ||
2004 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | ||
2005 | mvi->rx, mvi->rx_dma); | ||
2006 | if (mvi->slot) | ||
2007 | dma_free_coherent(&mvi->pdev->dev, | ||
2008 | sizeof(*mvi->slot) * MVS_SLOTS, | ||
2009 | mvi->slot, mvi->slot_dma); | ||
2010 | #ifdef MVS_ENABLE_PERI | ||
2011 | if (mvi->peri_regs) | ||
2012 | iounmap(mvi->peri_regs); | ||
2013 | #endif | ||
2014 | if (mvi->regs) | ||
2015 | iounmap(mvi->regs); | ||
2016 | if (mvi->shost) | ||
2017 | scsi_host_put(mvi->shost); | ||
2018 | kfree(mvi->sas.sas_port); | ||
2019 | kfree(mvi->sas.sas_phy); | ||
2020 | kfree(mvi); | ||
2021 | } | ||
2022 | |||
2023 | /* FIXME: locking? */ | ||
2024 | static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
2025 | void *funcdata) | ||
2026 | { | ||
2027 | struct mvs_info *mvi = sas_phy->ha->lldd_ha; | ||
2028 | int rc = 0, phy_id = sas_phy->id; | ||
2029 | u32 tmp; | ||
2030 | |||
2031 | tmp = mvs_read_phy_ctl(mvi, phy_id); | ||
2032 | |||
2033 | switch (func) { | ||
2034 | case PHY_FUNC_SET_LINK_RATE:{ | ||
2035 | struct sas_phy_linkrates *rates = funcdata; | ||
2036 | u32 lrmin = 0, lrmax = 0; | ||
2037 | |||
2038 | lrmin = (rates->minimum_linkrate << 8); | ||
2039 | lrmax = (rates->maximum_linkrate << 12); | ||
2040 | |||
2041 | if (lrmin) { | ||
2042 | tmp &= ~(0xf << 8); | ||
2043 | tmp |= lrmin; | ||
2044 | } | ||
2045 | if (lrmax) { | ||
2046 | tmp &= ~(0xf << 12); | ||
2047 | tmp |= lrmax; | ||
2048 | } | ||
2049 | mvs_write_phy_ctl(mvi, phy_id, tmp); | ||
2050 | break; | ||
2051 | } | ||
2052 | |||
2053 | case PHY_FUNC_HARD_RESET: | ||
2054 | if (tmp & PHY_RST_HARD) | ||
2055 | break; | ||
2056 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); | ||
2057 | break; | ||
2058 | |||
2059 | case PHY_FUNC_LINK_RESET: | ||
2060 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); | ||
2061 | break; | ||
2062 | |||
2063 | case PHY_FUNC_DISABLE: | ||
2064 | case PHY_FUNC_RELEASE_SPINUP_HOLD: | ||
2065 | default: | ||
2066 | rc = -EOPNOTSUPP; | ||
2067 | } | ||
2068 | |||
2069 | return rc; | ||
2070 | } | ||
2071 | |||
2072 | static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) | ||
2073 | { | ||
2074 | struct mvs_phy *phy = &mvi->phy[phy_id]; | ||
2075 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
2076 | |||
2077 | sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; | ||
2078 | sas_phy->class = SAS; | ||
2079 | sas_phy->iproto = SAS_PROTOCOL_ALL; | ||
2080 | sas_phy->tproto = 0; | ||
2081 | sas_phy->type = PHY_TYPE_PHYSICAL; | ||
2082 | sas_phy->role = PHY_ROLE_INITIATOR; | ||
2083 | sas_phy->oob_mode = OOB_NOT_CONNECTED; | ||
2084 | sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; | ||
2085 | |||
2086 | sas_phy->id = phy_id; | ||
2087 | sas_phy->sas_addr = &mvi->sas_addr[0]; | ||
2088 | sas_phy->frame_rcvd = &phy->frame_rcvd[0]; | ||
2089 | sas_phy->ha = &mvi->sas; | ||
2090 | sas_phy->lldd_phy = phy; | ||
2091 | } | ||
2092 | |||
2093 | static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | ||
2094 | const struct pci_device_id *ent) | ||
2095 | { | ||
2096 | struct mvs_info *mvi; | ||
2097 | unsigned long res_start, res_len, res_flag; | ||
2098 | struct asd_sas_phy **arr_phy; | ||
2099 | struct asd_sas_port **arr_port; | ||
2100 | const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; | ||
2101 | int i; | ||
2102 | |||
2103 | /* | ||
2104 | * alloc and init our per-HBA mvs_info struct | ||
2105 | */ | ||
2106 | |||
2107 | mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); | ||
2108 | if (!mvi) | ||
2109 | return NULL; | ||
2110 | |||
2111 | spin_lock_init(&mvi->lock); | ||
2112 | mvi->pdev = pdev; | ||
2113 | mvi->chip = chip; | ||
2114 | |||
2115 | if (pdev->device == 0x6440 && pdev->revision == 0) | ||
2116 | mvi->flags |= MVF_PHY_PWR_FIX; | ||
2117 | |||
2118 | /* | ||
2119 | * alloc and init SCSI, SAS glue | ||
2120 | */ | ||
2121 | |||
2122 | mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); | ||
2123 | if (!mvi->shost) | ||
2124 | goto err_out; | ||
2125 | |||
2126 | arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); | ||
2127 | arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); | ||
2128 | if (!arr_phy || !arr_port) | ||
2129 | goto err_out; | ||
2130 | |||
2131 | for (i = 0; i < MVS_MAX_PHYS; i++) { | ||
2132 | mvs_phy_init(mvi, i); | ||
2133 | arr_phy[i] = &mvi->phy[i].sas_phy; | ||
2134 | arr_port[i] = &mvi->port[i].sas_port; | ||
2135 | } | ||
2136 | |||
2137 | SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; | ||
2138 | mvi->shost->transportt = mvs_stt; | ||
2139 | mvi->shost->max_id = 21; | ||
2140 | mvi->shost->max_lun = ~0; | ||
2141 | mvi->shost->max_channel = 0; | ||
2142 | mvi->shost->max_cmd_len = 16; | ||
2143 | |||
2144 | mvi->sas.sas_ha_name = DRV_NAME; | ||
2145 | mvi->sas.dev = &pdev->dev; | ||
2146 | mvi->sas.lldd_module = THIS_MODULE; | ||
2147 | mvi->sas.sas_addr = &mvi->sas_addr[0]; | ||
2148 | mvi->sas.sas_phy = arr_phy; | ||
2149 | mvi->sas.sas_port = arr_port; | ||
2150 | mvi->sas.num_phys = chip->n_phy; | ||
2151 | mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1; | ||
2152 | mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; | ||
2153 | mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1; | ||
2154 | mvi->sas.lldd_ha = mvi; | ||
2155 | mvi->sas.core.shost = mvi->shost; | ||
2156 | |||
2157 | mvs_tag_init(mvi); | ||
2158 | |||
2159 | /* | ||
2160 | * ioremap main and peripheral registers | ||
2161 | */ | ||
2162 | |||
2163 | #ifdef MVS_ENABLE_PERI | ||
2164 | res_start = pci_resource_start(pdev, 2); | ||
2165 | res_len = pci_resource_len(pdev, 2); | ||
2166 | if (!res_start || !res_len) | ||
2167 | goto err_out; | ||
2168 | |||
2169 | mvi->peri_regs = ioremap_nocache(res_start, res_len); | ||
2170 | if (!mvi->peri_regs) | ||
2171 | goto err_out; | ||
2172 | #endif | ||
2173 | |||
2174 | res_start = pci_resource_start(pdev, 4); | ||
2175 | res_len = pci_resource_len(pdev, 4); | ||
2176 | if (!res_start || !res_len) | ||
2177 | goto err_out; | ||
2178 | |||
2179 | res_flag = pci_resource_flags(pdev, 4); | ||
2180 | if (res_flag & IORESOURCE_CACHEABLE) | ||
2181 | mvi->regs = ioremap(res_start, res_len); | ||
2182 | else | ||
2183 | mvi->regs = ioremap_nocache(res_start, res_len); | ||
2184 | |||
2185 | if (!mvi->regs) | ||
2186 | goto err_out; | ||
2187 | |||
2188 | /* | ||
2189 | * alloc and init our DMA areas | ||
2190 | */ | ||
2191 | |||
2192 | mvi->tx = dma_alloc_coherent(&pdev->dev, | ||
2193 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, | ||
2194 | &mvi->tx_dma, GFP_KERNEL); | ||
2195 | if (!mvi->tx) | ||
2196 | goto err_out; | ||
2197 | memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); | ||
2198 | |||
2199 | mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, | ||
2200 | &mvi->rx_fis_dma, GFP_KERNEL); | ||
2201 | if (!mvi->rx_fis) | ||
2202 | goto err_out; | ||
2203 | memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); | ||
2204 | |||
2205 | mvi->rx = dma_alloc_coherent(&pdev->dev, | ||
2206 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | ||
2207 | &mvi->rx_dma, GFP_KERNEL); | ||
2208 | if (!mvi->rx) | ||
2209 | goto err_out; | ||
2210 | memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ); | ||
2211 | |||
2212 | mvi->rx[0] = cpu_to_le32(0xfff); | ||
2213 | mvi->rx_cons = 0xfff; | ||
2214 | |||
2215 | mvi->slot = dma_alloc_coherent(&pdev->dev, | ||
2216 | sizeof(*mvi->slot) * MVS_SLOTS, | ||
2217 | &mvi->slot_dma, GFP_KERNEL); | ||
2218 | if (!mvi->slot) | ||
2219 | goto err_out; | ||
2220 | memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); | ||
2221 | |||
2222 | for (i = 0; i < MVS_SLOTS; i++) { | ||
2223 | struct mvs_slot_info *slot = &mvi->slot_info[i]; | ||
2224 | |||
2225 | slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, | ||
2226 | &slot->buf_dma, GFP_KERNEL); | ||
2227 | if (!slot->buf) | ||
2228 | goto err_out; | ||
2229 | memset(slot->buf, 0, MVS_SLOT_BUF_SZ); | ||
2230 | } | ||
2231 | |||
2232 | /* finally, read NVRAM to get our SAS address */ | ||
2233 | if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) | ||
2234 | goto err_out; | ||
2235 | return mvi; | ||
2236 | |||
2237 | err_out: | ||
2238 | mvs_free(mvi); | ||
2239 | return NULL; | ||
2240 | } | ||
2241 | |||
2242 | static u32 mvs_cr32(void __iomem *regs, u32 addr) | ||
2243 | { | ||
2244 | mw32(CMD_ADDR, addr); | ||
2245 | return mr32(CMD_DATA); | ||
2246 | } | ||
2247 | |||
2248 | static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) | ||
2249 | { | ||
2250 | mw32(CMD_ADDR, addr); | ||
2251 | mw32(CMD_DATA, val); | ||
2252 | } | ||
2253 | |||
2254 | static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) | ||
2255 | { | ||
2256 | void __iomem *regs = mvi->regs; | ||
2257 | return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): | ||
2258 | mr32(P4_SER_CTLSTAT + (port - 4) * 4); | ||
2259 | } | ||
2260 | |||
2261 | static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) | ||
2262 | { | ||
2263 | void __iomem *regs = mvi->regs; | ||
2264 | if (port < 4) | ||
2265 | mw32(P0_SER_CTLSTAT + port * 4, val); | ||
2266 | else | ||
2267 | mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); | ||
2268 | } | ||
2269 | |||
2270 | static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) | ||
2271 | { | ||
2272 | void __iomem *regs = mvi->regs + off; | ||
2273 | void __iomem *regs2 = mvi->regs + off2; | ||
2274 | return (port < 4)?readl(regs + port * 8): | ||
2275 | readl(regs2 + (port - 4) * 8); | ||
2276 | } | ||
2277 | |||
2278 | static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, | ||
2279 | u32 port, u32 val) | ||
2280 | { | ||
2281 | void __iomem *regs = mvi->regs + off; | ||
2282 | void __iomem *regs2 = mvi->regs + off2; | ||
2283 | if (port < 4) | ||
2284 | writel(val, regs + port * 8); | ||
2285 | else | ||
2286 | writel(val, regs2 + (port - 4) * 8); | ||
2287 | } | ||
2288 | |||
2289 | static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) | ||
2290 | { | ||
2291 | return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); | ||
2292 | } | ||
2293 | |||
2294 | static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) | ||
2295 | { | ||
2296 | mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); | ||
2297 | } | ||
2298 | |||
2299 | static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) | ||
2300 | { | ||
2301 | mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); | ||
2302 | } | ||
2303 | |||
2304 | static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) | ||
2305 | { | ||
2306 | return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); | ||
2307 | } | ||
2308 | |||
2309 | static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) | ||
2310 | { | ||
2311 | mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); | ||
2312 | } | ||
2313 | |||
2314 | static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) | ||
2315 | { | ||
2316 | mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); | ||
2317 | } | ||
2318 | |||
2319 | static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) | ||
2320 | { | ||
2321 | return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); | ||
2322 | } | ||
2323 | |||
2324 | static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) | ||
2325 | { | ||
2326 | mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); | ||
2327 | } | ||
2328 | |||
2329 | static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) | ||
2330 | { | ||
2331 | return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); | ||
2332 | } | ||
2333 | |||
2334 | static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) | ||
2335 | { | ||
2336 | mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); | ||
2337 | } | ||
2338 | |||
2339 | static void __devinit mvs_phy_hacks(struct mvs_info *mvi) | ||
2340 | { | ||
2341 | void __iomem *regs = mvi->regs; | ||
2342 | u32 tmp; | ||
2343 | |||
2344 | /* workaround for SATA R-ERR, to ignore phy glitch */ | ||
2345 | tmp = mvs_cr32(regs, CMD_PHY_TIMER); | ||
2346 | tmp &= ~(1 << 9); | ||
2347 | tmp |= (1 << 10); | ||
2348 | mvs_cw32(regs, CMD_PHY_TIMER, tmp); | ||
2349 | |||
2350 | /* enable retry 127 times */ | ||
2351 | mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); | ||
2352 | |||
2353 | /* extend open frame timeout to max */ | ||
2354 | tmp = mvs_cr32(regs, CMD_SAS_CTL0); | ||
2355 | tmp &= ~0xffff; | ||
2356 | tmp |= 0x3fff; | ||
2357 | mvs_cw32(regs, CMD_SAS_CTL0, tmp); | ||
2358 | |||
2359 | /* workaround for WDTIMEOUT , set to 550 ms */ | ||
2360 | mvs_cw32(regs, CMD_WD_TIMER, 0xffffff); | ||
2361 | |||
2362 | /* not to halt for different port op during wideport link change */ | ||
2363 | mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); | ||
2364 | |||
2365 | /* workaround for Seagate disk not-found OOB sequence, recv | ||
2366 | * COMINIT before sending out COMWAKE */ | ||
2367 | tmp = mvs_cr32(regs, CMD_PHY_MODE_21); | ||
2368 | tmp &= 0x0000ffff; | ||
2369 | tmp |= 0x00fa0000; | ||
2370 | mvs_cw32(regs, CMD_PHY_MODE_21, tmp); | ||
2371 | |||
2372 | tmp = mvs_cr32(regs, CMD_PHY_TIMER); | ||
2373 | tmp &= 0x1fffffff; | ||
2374 | tmp |= (2U << 29); /* 8 ms retry */ | ||
2375 | mvs_cw32(regs, CMD_PHY_TIMER, tmp); | ||
2376 | |||
2377 | /* TEST - for phy decoding error, adjust voltage levels */ | ||
2378 | mw32(P0_VSR_ADDR + 0, 0x8); | ||
2379 | mw32(P0_VSR_DATA + 0, 0x2F0); | ||
2380 | |||
2381 | mw32(P0_VSR_ADDR + 8, 0x8); | ||
2382 | mw32(P0_VSR_DATA + 8, 0x2F0); | ||
2383 | |||
2384 | mw32(P0_VSR_ADDR + 16, 0x8); | ||
2385 | mw32(P0_VSR_DATA + 16, 0x2F0); | ||
2386 | |||
2387 | mw32(P0_VSR_ADDR + 24, 0x8); | ||
2388 | mw32(P0_VSR_DATA + 24, 0x2F0); | ||
2389 | |||
2390 | } | ||
2391 | |||
2392 | static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) | ||
2393 | { | ||
2394 | void __iomem *regs = mvi->regs; | ||
2395 | u32 tmp; | ||
2396 | |||
2397 | tmp = mr32(PCS); | ||
2398 | if (mvi->chip->n_phy <= 4) | ||
2399 | tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); | ||
2400 | else | ||
2401 | tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); | ||
2402 | mw32(PCS, tmp); | ||
2403 | } | ||
2404 | |||
2405 | static void mvs_detect_porttype(struct mvs_info *mvi, int i) | ||
2406 | { | ||
2407 | void __iomem *regs = mvi->regs; | ||
2408 | u32 reg; | ||
2409 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2410 | |||
2411 | /* TODO check & save device type */ | ||
2412 | reg = mr32(GBL_PORT_TYPE); | ||
2413 | |||
2414 | if (reg & MODE_SAS_SATA & (1 << i)) | ||
2415 | phy->phy_type |= PORT_TYPE_SAS; | ||
2416 | else | ||
2417 | phy->phy_type |= PORT_TYPE_SATA; | ||
2418 | } | ||
2419 | |||
2420 | static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) | ||
2421 | { | ||
2422 | u32 *s = (u32 *) buf; | ||
2423 | |||
2424 | if (!s) | ||
2425 | return NULL; | ||
2426 | |||
2427 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); | ||
2428 | s[3] = mvs_read_port_cfg_data(mvi, i); | ||
2429 | |||
2430 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); | ||
2431 | s[2] = mvs_read_port_cfg_data(mvi, i); | ||
2432 | |||
2433 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); | ||
2434 | s[1] = mvs_read_port_cfg_data(mvi, i); | ||
2435 | |||
2436 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); | ||
2437 | s[0] = mvs_read_port_cfg_data(mvi, i); | ||
2438 | |||
2439 | return (void *)s; | ||
2440 | } | ||
2441 | |||
2442 | static u32 mvs_is_sig_fis_received(u32 irq_status) | ||
2443 | { | ||
2444 | return irq_status & PHYEV_SIG_FIS; | ||
2445 | } | ||
2446 | |||
2447 | static void mvs_update_wideport(struct mvs_info *mvi, int i) | ||
2448 | { | ||
2449 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2450 | struct mvs_port *port = phy->port; | ||
2451 | int j, no; | ||
2452 | |||
2453 | for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) | ||
2454 | if (no & 1) { | ||
2455 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | ||
2456 | mvs_write_port_cfg_data(mvi, no, | ||
2457 | port->wide_port_phymap); | ||
2458 | } else { | ||
2459 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | ||
2460 | mvs_write_port_cfg_data(mvi, no, 0); | ||
2461 | } | ||
2462 | } | ||
2463 | |||
2464 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) | ||
2465 | { | ||
2466 | u32 tmp; | ||
2467 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2468 | struct mvs_port *port; | ||
2469 | |||
2470 | tmp = mvs_read_phy_ctl(mvi, i); | ||
2471 | |||
2472 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { | ||
2473 | if (!phy->port) | ||
2474 | phy->phy_attached = 1; | ||
2475 | return tmp; | ||
2476 | } | ||
2477 | |||
2478 | port = phy->port; | ||
2479 | if (port) { | ||
2480 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2481 | port->wide_port_phymap &= ~(1U << i); | ||
2482 | if (!port->wide_port_phymap) | ||
2483 | port->port_attached = 0; | ||
2484 | mvs_update_wideport(mvi, i); | ||
2485 | } else if (phy->phy_type & PORT_TYPE_SATA) | ||
2486 | port->port_attached = 0; | ||
2487 | mvs_free_reg_set(mvi, phy->port); | ||
2488 | phy->port = NULL; | ||
2489 | phy->phy_attached = 0; | ||
2490 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); | ||
2491 | } | ||
2492 | return 0; | ||
2493 | } | ||
2494 | |||
2495 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, | ||
2496 | int get_st) | ||
2497 | { | ||
2498 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2499 | struct pci_dev *pdev = mvi->pdev; | ||
2500 | u32 tmp, j; | ||
2501 | u64 tmp64; | ||
2502 | |||
2503 | mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); | ||
2504 | phy->dev_info = mvs_read_port_cfg_data(mvi, i); | ||
2505 | |||
2506 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
2507 | phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; | ||
2508 | |||
2509 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | ||
2510 | phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | ||
2511 | |||
2512 | if (get_st) { | ||
2513 | phy->irq_status = mvs_read_port_irq_stat(mvi, i); | ||
2514 | phy->phy_status = mvs_is_phy_ready(mvi, i); | ||
2515 | } | ||
2516 | |||
2517 | if (phy->phy_status) { | ||
2518 | u32 phy_st; | ||
2519 | struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; | ||
2520 | |||
2521 | mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); | ||
2522 | phy_st = mvs_read_port_cfg_data(mvi, i); | ||
2523 | |||
2524 | sas_phy->linkrate = | ||
2525 | (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
2526 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; | ||
2527 | |||
2528 | /* Updated attached_sas_addr */ | ||
2529 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); | ||
2530 | phy->att_dev_sas_addr = | ||
2531 | (u64) mvs_read_port_cfg_data(mvi, i) << 32; | ||
2532 | |||
2533 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); | ||
2534 | phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | ||
2535 | |||
2536 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2537 | "phy[%d] Get Attached Address 0x%llX ," | ||
2538 | " SAS Address 0x%llX\n", | ||
2539 | i, phy->att_dev_sas_addr, phy->dev_sas_addr); | ||
2540 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2541 | "Rate = %x , type = %d\n", | ||
2542 | sas_phy->linkrate, phy->phy_type); | ||
2543 | |||
2544 | #if 1 | ||
2545 | /* | ||
2546 | * If the device is capable of supporting a wide port | ||
2547 | * on its phys, it may configure the phys as a wide port. | ||
2548 | */ | ||
2549 | if (phy->phy_type & PORT_TYPE_SAS) | ||
2550 | for (j = 0; j < mvi->chip->n_phy && j != i; ++j) { | ||
2551 | if ((mvi->phy[j].phy_attached) && | ||
2552 | (mvi->phy[j].phy_type & PORT_TYPE_SAS)) | ||
2553 | if (phy->att_dev_sas_addr == | ||
2554 | mvi->phy[j].att_dev_sas_addr - 1) { | ||
2555 | phy->att_dev_sas_addr = | ||
2556 | mvi->phy[j].att_dev_sas_addr; | ||
2557 | break; | ||
2558 | } | ||
2559 | } | ||
2560 | |||
2561 | #endif | ||
2562 | |||
2563 | tmp64 = cpu_to_be64(phy->att_dev_sas_addr); | ||
2564 | memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); | ||
2565 | |||
2566 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2567 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); | ||
2568 | phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); | ||
2569 | phy->identify.device_type = | ||
2570 | phy->att_dev_info & PORT_DEV_TYPE_MASK; | ||
2571 | |||
2572 | if (phy->identify.device_type == SAS_END_DEV) | ||
2573 | phy->identify.target_port_protocols = | ||
2574 | SAS_PROTOCOL_SSP; | ||
2575 | else if (phy->identify.device_type != NO_DEVICE) | ||
2576 | phy->identify.target_port_protocols = | ||
2577 | SAS_PROTOCOL_SMP; | ||
2578 | if (phy_st & PHY_OOB_DTCTD) | ||
2579 | sas_phy->oob_mode = SAS_OOB_MODE; | ||
2580 | phy->frame_rcvd_size = | ||
2581 | sizeof(struct sas_identify_frame); | ||
2582 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
2583 | phy->identify.target_port_protocols = SAS_PROTOCOL_STP; | ||
2584 | if (mvs_is_sig_fis_received(phy->irq_status)) { | ||
2585 | if (phy_st & PHY_OOB_DTCTD) | ||
2586 | sas_phy->oob_mode = SATA_OOB_MODE; | ||
2587 | phy->frame_rcvd_size = | ||
2588 | sizeof(struct dev_to_host_fis); | ||
2589 | mvs_get_d2h_reg(mvi, i, | ||
2590 | (void *)sas_phy->frame_rcvd); | ||
2591 | } else { | ||
2592 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2593 | "No sig fis\n"); | ||
2594 | } | ||
2595 | } | ||
2596 | /* workaround for HW phy decoding error on 1.5g disk drive */ | ||
2597 | mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); | ||
2598 | tmp = mvs_read_port_vsr_data(mvi, i); | ||
2599 | if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
2600 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == | ||
2601 | SAS_LINK_RATE_1_5_GBPS) | ||
2602 | tmp &= ~PHY_MODE6_DTL_SPEED; | ||
2603 | else | ||
2604 | tmp |= PHY_MODE6_DTL_SPEED; | ||
2605 | mvs_write_port_vsr_data(mvi, i, tmp); | ||
2606 | |||
2607 | } | ||
2608 | if (get_st) | ||
2609 | mvs_write_port_irq_stat(mvi, i, phy->irq_status); | ||
2610 | } | ||
2611 | |||
2612 | static void mvs_port_formed(struct asd_sas_phy *sas_phy) | ||
2613 | { | ||
2614 | struct sas_ha_struct *sas_ha = sas_phy->ha; | ||
2615 | struct mvs_info *mvi = sas_ha->lldd_ha; | ||
2616 | struct asd_sas_port *sas_port = sas_phy->port; | ||
2617 | struct mvs_phy *phy = sas_phy->lldd_phy; | ||
2618 | struct mvs_port *port = &mvi->port[sas_port->id]; | ||
2619 | unsigned long flags; | ||
2620 | |||
2621 | spin_lock_irqsave(&mvi->lock, flags); | ||
2622 | port->port_attached = 1; | ||
2623 | phy->port = port; | ||
2624 | port->taskfileset = MVS_ID_NOT_MAPPED; | ||
2625 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2626 | port->wide_port_phymap = sas_port->phy_mask; | ||
2627 | mvs_update_wideport(mvi, sas_phy->id); | ||
2628 | } | ||
2629 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
2630 | } | ||
2631 | |||
2632 | static int __devinit mvs_hw_init(struct mvs_info *mvi) | ||
2633 | { | ||
2634 | void __iomem *regs = mvi->regs; | ||
2635 | int i; | ||
2636 | u32 tmp, cctl; | ||
2637 | |||
2638 | /* make sure interrupts are masked immediately (paranoia) */ | ||
2639 | mw32(GBL_CTL, 0); | ||
2640 | tmp = mr32(GBL_CTL); | ||
2641 | |||
2642 | /* Reset Controller */ | ||
2643 | if (!(tmp & HBA_RST)) { | ||
2644 | if (mvi->flags & MVF_PHY_PWR_FIX) { | ||
2645 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
2646 | tmp &= ~PCTL_PWR_ON; | ||
2647 | tmp |= PCTL_OFF; | ||
2648 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
2649 | |||
2650 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
2651 | tmp &= ~PCTL_PWR_ON; | ||
2652 | tmp |= PCTL_OFF; | ||
2653 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
2654 | } | ||
2655 | |||
2656 | /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ | ||
2657 | mw32_f(GBL_CTL, HBA_RST); | ||
2658 | } | ||
2659 | |||
2660 | /* wait for reset to finish; timeout is just a guess */ | ||
2661 | i = 1000; | ||
2662 | while (i-- > 0) { | ||
2663 | msleep(10); | ||
2664 | |||
2665 | if (!(mr32(GBL_CTL) & HBA_RST)) | ||
2666 | break; | ||
2667 | } | ||
2668 | if (mr32(GBL_CTL) & HBA_RST) { | ||
2669 | dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); | ||
2670 | return -EBUSY; | ||
2671 | } | ||
2672 | |||
2673 | /* Init Chip */ | ||
2674 | /* make sure RST is set; HBA_RST /should/ have done that for us */ | ||
2675 | cctl = mr32(CTL); | ||
2676 | if (cctl & CCTL_RST) | ||
2677 | cctl &= ~CCTL_RST; | ||
2678 | else | ||
2679 | mw32_f(CTL, cctl | CCTL_RST); | ||
2680 | |||
2681 | /* write to device control _AND_ device status register? - A.C. */ | ||
2682 | pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); | ||
2683 | tmp &= ~PRD_REQ_MASK; | ||
2684 | tmp |= PRD_REQ_SIZE; | ||
2685 | pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); | ||
2686 | |||
2687 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
2688 | tmp |= PCTL_PWR_ON; | ||
2689 | tmp &= ~PCTL_OFF; | ||
2690 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
2691 | |||
2692 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
2693 | tmp |= PCTL_PWR_ON; | ||
2694 | tmp &= ~PCTL_OFF; | ||
2695 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
2696 | |||
2697 | mw32_f(CTL, cctl); | ||
2698 | |||
2699 | /* reset control */ | ||
2700 | mw32(PCS, 0); /*MVS_PCS */ | ||
2701 | |||
2702 | mvs_phy_hacks(mvi); | ||
2703 | |||
2704 | mw32(CMD_LIST_LO, mvi->slot_dma); | ||
2705 | mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); | ||
2706 | |||
2707 | mw32(RX_FIS_LO, mvi->rx_fis_dma); | ||
2708 | mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); | ||
2709 | |||
2710 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ); | ||
2711 | mw32(TX_LO, mvi->tx_dma); | ||
2712 | mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); | ||
2713 | |||
2714 | mw32(RX_CFG, MVS_RX_RING_SZ); | ||
2715 | mw32(RX_LO, mvi->rx_dma); | ||
2716 | mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); | ||
2717 | |||
2718 | /* enable auto port detection */ | ||
2719 | mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); | ||
2720 | msleep(100); | ||
2721 | /* init and reset phys */ | ||
2722 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
2723 | /* FIXME: is this the correct dword order? */ | ||
2724 | u32 lo = *((u32 *)&mvi->sas_addr[0]); | ||
2725 | u32 hi = *((u32 *)&mvi->sas_addr[4]); | ||
2726 | |||
2727 | mvs_detect_porttype(mvi, i); | ||
2728 | |||
2729 | /* set phy local SAS address */ | ||
2730 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | ||
2731 | mvs_write_port_cfg_data(mvi, i, lo); | ||
2732 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
2733 | mvs_write_port_cfg_data(mvi, i, hi); | ||
2734 | |||
2735 | /* reset phy */ | ||
2736 | tmp = mvs_read_phy_ctl(mvi, i); | ||
2737 | tmp |= PHY_RST; | ||
2738 | mvs_write_phy_ctl(mvi, i, tmp); | ||
2739 | } | ||
2740 | |||
2741 | msleep(100); | ||
2742 | |||
2743 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
2744 | /* clear phy int status */ | ||
2745 | tmp = mvs_read_port_irq_stat(mvi, i); | ||
2746 | tmp &= ~PHYEV_SIG_FIS; | ||
2747 | mvs_write_port_irq_stat(mvi, i, tmp); | ||
2748 | |||
2749 | /* set phy int mask */ | ||
2750 | tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | | ||
2751 | PHYEV_ID_DONE | PHYEV_DEC_ERR; | ||
2752 | mvs_write_port_irq_mask(mvi, i, tmp); | ||
2753 | |||
2754 | msleep(100); | ||
2755 | mvs_update_phyinfo(mvi, i, 1); | ||
2756 | mvs_enable_xmt(mvi, i); | ||
2757 | } | ||
2758 | |||
2759 | /* FIXME: update wide port bitmaps */ | ||
2760 | |||
2761 | /* little endian for open address and command table, etc. */ | ||
2762 | /* A.C. | ||
2763 | * it seems that ( from the spec ) turning on big-endian won't | ||
2764 | * do us any good on big-endian machines, need further confirmation | ||
2765 | */ | ||
2766 | cctl = mr32(CTL); | ||
2767 | cctl |= CCTL_ENDIAN_CMD; | ||
2768 | cctl |= CCTL_ENDIAN_DATA; | ||
2769 | cctl &= ~CCTL_ENDIAN_OPEN; | ||
2770 | cctl |= CCTL_ENDIAN_RSP; | ||
2771 | mw32_f(CTL, cctl); | ||
2772 | |||
2773 | /* reset CMD queue */ | ||
2774 | tmp = mr32(PCS); | ||
2775 | tmp |= PCS_CMD_RST; | ||
2776 | mw32(PCS, tmp); | ||
2777 | /* interrupt coalescing may cause missing HW interrput in some case, | ||
2778 | * and the max count is 0x1ff, while our max slot is 0x200, | ||
2779 | * it will make count 0. | ||
2780 | */ | ||
2781 | tmp = 0; | ||
2782 | mw32(INT_COAL, tmp); | ||
2783 | |||
2784 | tmp = 0x100; | ||
2785 | mw32(INT_COAL_TMOUT, tmp); | ||
2786 | |||
2787 | /* ladies and gentlemen, start your engines */ | ||
2788 | mw32(TX_CFG, 0); | ||
2789 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); | ||
2790 | mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); | ||
2791 | /* enable CMD/CMPL_Q/RESP mode */ | ||
2792 | mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); | ||
2793 | |||
2794 | /* re-enable interrupts globally */ | ||
2795 | mvs_hba_interrupt_enable(mvi); | ||
2796 | |||
2797 | /* enable completion queue interrupt */ | ||
2798 | tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM); | ||
2799 | mw32(INT_MASK, tmp); | ||
2800 | |||
2801 | return 0; | ||
2802 | } | ||
2803 | |||
2804 | static void __devinit mvs_print_info(struct mvs_info *mvi) | ||
2805 | { | ||
2806 | struct pci_dev *pdev = mvi->pdev; | ||
2807 | static int printed_version; | ||
2808 | |||
2809 | if (!printed_version++) | ||
2810 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | ||
2811 | |||
2812 | dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", | ||
2813 | mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); | ||
2814 | } | ||
2815 | |||
2816 | static int __devinit mvs_pci_init(struct pci_dev *pdev, | ||
2817 | const struct pci_device_id *ent) | ||
2818 | { | ||
2819 | int rc; | ||
2820 | struct mvs_info *mvi; | ||
2821 | irq_handler_t irq_handler = mvs_interrupt; | ||
2822 | |||
2823 | rc = pci_enable_device(pdev); | ||
2824 | if (rc) | ||
2825 | return rc; | ||
2826 | |||
2827 | pci_set_master(pdev); | ||
2828 | |||
2829 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2830 | if (rc) | ||
2831 | goto err_out_disable; | ||
2832 | |||
2833 | rc = pci_go_64(pdev); | ||
2834 | if (rc) | ||
2835 | goto err_out_regions; | ||
2836 | |||
2837 | mvi = mvs_alloc(pdev, ent); | ||
2838 | if (!mvi) { | ||
2839 | rc = -ENOMEM; | ||
2840 | goto err_out_regions; | ||
2841 | } | ||
2842 | |||
2843 | rc = mvs_hw_init(mvi); | ||
2844 | if (rc) | ||
2845 | goto err_out_mvi; | ||
2846 | |||
2847 | #ifndef MVS_DISABLE_MSI | ||
2848 | if (!pci_enable_msi(pdev)) { | ||
2849 | u32 tmp; | ||
2850 | void __iomem *regs = mvi->regs; | ||
2851 | mvi->flags |= MVF_MSI; | ||
2852 | irq_handler = mvs_msi_interrupt; | ||
2853 | tmp = mr32(PCS); | ||
2854 | mw32(PCS, tmp | PCS_SELF_CLEAR); | ||
2855 | } | ||
2856 | #endif | ||
2857 | |||
2858 | rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); | ||
2859 | if (rc) | ||
2860 | goto err_out_msi; | ||
2861 | |||
2862 | rc = scsi_add_host(mvi->shost, &pdev->dev); | ||
2863 | if (rc) | ||
2864 | goto err_out_irq; | ||
2865 | |||
2866 | rc = sas_register_ha(&mvi->sas); | ||
2867 | if (rc) | ||
2868 | goto err_out_shost; | ||
2869 | |||
2870 | pci_set_drvdata(pdev, mvi); | ||
2871 | |||
2872 | mvs_print_info(mvi); | ||
2873 | |||
2874 | scsi_scan_host(mvi->shost); | ||
2875 | |||
2876 | return 0; | ||
2877 | |||
2878 | err_out_shost: | ||
2879 | scsi_remove_host(mvi->shost); | ||
2880 | err_out_irq: | ||
2881 | free_irq(pdev->irq, mvi); | ||
2882 | err_out_msi: | ||
2883 | if (mvi->flags |= MVF_MSI) | ||
2884 | pci_disable_msi(pdev); | ||
2885 | err_out_mvi: | ||
2886 | mvs_free(mvi); | ||
2887 | err_out_regions: | ||
2888 | pci_release_regions(pdev); | ||
2889 | err_out_disable: | ||
2890 | pci_disable_device(pdev); | ||
2891 | return rc; | ||
2892 | } | ||
2893 | |||
2894 | static void __devexit mvs_pci_remove(struct pci_dev *pdev) | ||
2895 | { | ||
2896 | struct mvs_info *mvi = pci_get_drvdata(pdev); | ||
2897 | |||
2898 | pci_set_drvdata(pdev, NULL); | ||
2899 | |||
2900 | if (mvi) { | ||
2901 | sas_unregister_ha(&mvi->sas); | ||
2902 | mvs_hba_interrupt_disable(mvi); | ||
2903 | sas_remove_host(mvi->shost); | ||
2904 | scsi_remove_host(mvi->shost); | ||
2905 | |||
2906 | free_irq(pdev->irq, mvi); | ||
2907 | if (mvi->flags & MVF_MSI) | ||
2908 | pci_disable_msi(pdev); | ||
2909 | mvs_free(mvi); | ||
2910 | pci_release_regions(pdev); | ||
2911 | } | ||
2912 | pci_disable_device(pdev); | ||
2913 | } | ||
2914 | |||
2915 | static struct sas_domain_function_template mvs_transport_ops = { | ||
2916 | .lldd_execute_task = mvs_task_exec, | ||
2917 | .lldd_control_phy = mvs_phy_control, | ||
2918 | .lldd_abort_task = mvs_task_abort, | ||
2919 | .lldd_port_formed = mvs_port_formed | ||
2920 | }; | ||
2921 | |||
2922 | static struct pci_device_id __devinitdata mvs_pci_table[] = { | ||
2923 | { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, | ||
2924 | { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, | ||
2925 | { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, | ||
2926 | { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, | ||
2927 | |||
2928 | { } /* terminate list */ | ||
2929 | }; | ||
2930 | |||
2931 | static struct pci_driver mvs_pci_driver = { | ||
2932 | .name = DRV_NAME, | ||
2933 | .id_table = mvs_pci_table, | ||
2934 | .probe = mvs_pci_init, | ||
2935 | .remove = __devexit_p(mvs_pci_remove), | ||
2936 | }; | ||
2937 | |||
2938 | static int __init mvs_init(void) | ||
2939 | { | ||
2940 | int rc; | ||
2941 | |||
2942 | mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); | ||
2943 | if (!mvs_stt) | ||
2944 | return -ENOMEM; | ||
2945 | |||
2946 | rc = pci_register_driver(&mvs_pci_driver); | ||
2947 | if (rc) | ||
2948 | goto err_out; | ||
2949 | |||
2950 | return 0; | ||
2951 | |||
2952 | err_out: | ||
2953 | sas_release_transport(mvs_stt); | ||
2954 | return rc; | ||
2955 | } | ||
2956 | |||
2957 | static void __exit mvs_exit(void) | ||
2958 | { | ||
2959 | pci_unregister_driver(&mvs_pci_driver); | ||
2960 | sas_release_transport(mvs_stt); | ||
2961 | } | ||
2962 | |||
2963 | module_init(mvs_init); | ||
2964 | module_exit(mvs_exit); | ||
2965 | |||
2966 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); | ||
2967 | MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); | ||
2968 | MODULE_VERSION(DRV_VERSION); | ||
2969 | MODULE_LICENSE("GPL"); | ||
2970 | MODULE_DEVICE_TABLE(pci, mvs_pci_table); | ||
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 1479c60441c8..2cd899bfe84b 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -23,7 +23,7 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
23 | mutex_lock(&ha->fce_mutex); | 23 | mutex_lock(&ha->fce_mutex); |
24 | 24 | ||
25 | seq_printf(s, "FCE Trace Buffer\n"); | 25 | seq_printf(s, "FCE Trace Buffer\n"); |
26 | seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); | 26 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); |
27 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); | 27 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); |
28 | seq_printf(s, "FCE Enable Registers\n"); | 28 | seq_printf(s, "FCE Enable Registers\n"); |
29 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", | 29 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 0f029d0d7315..fc84db4069f4 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -100,8 +100,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
100 | 100 | ||
101 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { | 101 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { |
102 | scsi_set_resid(cmd, residual); | 102 | scsi_set_resid(cmd, residual); |
103 | if (!scsi_status && ((scsi_bufflen(cmd) - residual) < | 103 | if ((scsi_bufflen(cmd) - residual) < cmd->underflow) { |
104 | cmd->underflow)) { | ||
105 | 104 | ||
106 | cmd->result = DID_ERROR << 16; | 105 | cmd->result = DID_ERROR << 16; |
107 | 106 | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 65455ab1f3b9..4a1cf6377f6c 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -651,7 +651,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti) | |||
651 | 651 | ||
652 | static irqreturn_t qpti_intr(int irq, void *dev_id); | 652 | static irqreturn_t qpti_intr(int irq, void *dev_id); |
653 | 653 | ||
654 | static void __init qpti_chain_add(struct qlogicpti *qpti) | 654 | static void __devinit qpti_chain_add(struct qlogicpti *qpti) |
655 | { | 655 | { |
656 | spin_lock_irq(&qptichain_lock); | 656 | spin_lock_irq(&qptichain_lock); |
657 | if (qptichain != NULL) { | 657 | if (qptichain != NULL) { |
@@ -667,7 +667,7 @@ static void __init qpti_chain_add(struct qlogicpti *qpti) | |||
667 | spin_unlock_irq(&qptichain_lock); | 667 | spin_unlock_irq(&qptichain_lock); |
668 | } | 668 | } |
669 | 669 | ||
670 | static void __init qpti_chain_del(struct qlogicpti *qpti) | 670 | static void __devexit qpti_chain_del(struct qlogicpti *qpti) |
671 | { | 671 | { |
672 | spin_lock_irq(&qptichain_lock); | 672 | spin_lock_irq(&qptichain_lock); |
673 | if (qptichain == qpti) { | 673 | if (qptichain == qpti) { |
@@ -682,7 +682,7 @@ static void __init qpti_chain_del(struct qlogicpti *qpti) | |||
682 | spin_unlock_irq(&qptichain_lock); | 682 | spin_unlock_irq(&qptichain_lock); |
683 | } | 683 | } |
684 | 684 | ||
685 | static int __init qpti_map_regs(struct qlogicpti *qpti) | 685 | static int __devinit qpti_map_regs(struct qlogicpti *qpti) |
686 | { | 686 | { |
687 | struct sbus_dev *sdev = qpti->sdev; | 687 | struct sbus_dev *sdev = qpti->sdev; |
688 | 688 | ||
@@ -705,7 +705,7 @@ static int __init qpti_map_regs(struct qlogicpti *qpti) | |||
705 | return 0; | 705 | return 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | static int __init qpti_register_irq(struct qlogicpti *qpti) | 708 | static int __devinit qpti_register_irq(struct qlogicpti *qpti) |
709 | { | 709 | { |
710 | struct sbus_dev *sdev = qpti->sdev; | 710 | struct sbus_dev *sdev = qpti->sdev; |
711 | 711 | ||
@@ -730,7 +730,7 @@ fail: | |||
730 | return -1; | 730 | return -1; |
731 | } | 731 | } |
732 | 732 | ||
733 | static void __init qpti_get_scsi_id(struct qlogicpti *qpti) | 733 | static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) |
734 | { | 734 | { |
735 | qpti->scsi_id = prom_getintdefault(qpti->prom_node, | 735 | qpti->scsi_id = prom_getintdefault(qpti->prom_node, |
736 | "initiator-id", | 736 | "initiator-id", |
@@ -783,7 +783,7 @@ static void qpti_get_clock(struct qlogicpti *qpti) | |||
783 | /* The request and response queues must each be aligned | 783 | /* The request and response queues must each be aligned |
784 | * on a page boundary. | 784 | * on a page boundary. |
785 | */ | 785 | */ |
786 | static int __init qpti_map_queues(struct qlogicpti *qpti) | 786 | static int __devinit qpti_map_queues(struct qlogicpti *qpti) |
787 | { | 787 | { |
788 | struct sbus_dev *sdev = qpti->sdev; | 788 | struct sbus_dev *sdev = qpti->sdev; |
789 | 789 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1541c174937a..d1777a9a9625 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -222,7 +222,7 @@ static struct scsi_host_template sdebug_driver_template = { | |||
222 | .cmd_per_lun = 16, | 222 | .cmd_per_lun = 16, |
223 | .max_sectors = 0xffff, | 223 | .max_sectors = 0xffff, |
224 | .unchecked_isa_dma = 0, | 224 | .unchecked_isa_dma = 0, |
225 | .use_clustering = ENABLE_CLUSTERING, | 225 | .use_clustering = DISABLE_CLUSTERING, |
226 | .module = THIS_MODULE, | 226 | .module = THIS_MODULE, |
227 | }; | 227 | }; |
228 | 228 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 135c1d054701..ba21d97d1855 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1014,10 +1014,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | req->buffer = NULL; | 1016 | req->buffer = NULL; |
1017 | if (blk_pc_request(req)) | ||
1018 | sdb->length = req->data_len; | ||
1019 | else | ||
1020 | sdb->length = req->nr_sectors << 9; | ||
1021 | 1017 | ||
1022 | /* | 1018 | /* |
1023 | * Next, walk the list, and fill in the addresses and sizes of | 1019 | * Next, walk the list, and fill in the addresses and sizes of |
@@ -1026,6 +1022,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
1026 | count = blk_rq_map_sg(req->q, req, sdb->table.sgl); | 1022 | count = blk_rq_map_sg(req->q, req, sdb->table.sgl); |
1027 | BUG_ON(count > sdb->table.nents); | 1023 | BUG_ON(count > sdb->table.nents); |
1028 | sdb->table.nents = count; | 1024 | sdb->table.nents = count; |
1025 | if (blk_pc_request(req)) | ||
1026 | sdb->length = req->data_len; | ||
1027 | else | ||
1028 | sdb->length = req->nr_sectors << 9; | ||
1029 | return BLKPREP_OK; | 1029 | return BLKPREP_OK; |
1030 | } | 1030 | } |
1031 | 1031 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index fac7534f3ec4..9981682d5302 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -231,7 +231,7 @@ static struct { | |||
231 | { ISCSI_SESSION_FREE, "FREE" }, | 231 | { ISCSI_SESSION_FREE, "FREE" }, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | const char *iscsi_session_state_name(int state) | 234 | static const char *iscsi_session_state_name(int state) |
235 | { | 235 | { |
236 | int i; | 236 | int i; |
237 | char *name = NULL; | 237 | char *name = NULL; |
@@ -373,7 +373,7 @@ static void session_recovery_timedout(struct work_struct *work) | |||
373 | scsi_target_unblock(&session->dev); | 373 | scsi_target_unblock(&session->dev); |
374 | } | 374 | } |
375 | 375 | ||
376 | void __iscsi_unblock_session(struct iscsi_cls_session *session) | 376 | static void __iscsi_unblock_session(struct iscsi_cls_session *session) |
377 | { | 377 | { |
378 | if (!cancel_delayed_work(&session->recovery_work)) | 378 | if (!cancel_delayed_work(&session->recovery_work)) |
379 | flush_workqueue(iscsi_eh_timer_workq); | 379 | flush_workqueue(iscsi_eh_timer_workq); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 37df8bbe7f46..7aee64dbfbeb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1835,8 +1835,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg) | |||
1835 | goto done; | 1835 | goto done; |
1836 | } | 1836 | } |
1837 | 1837 | ||
1838 | if (mesg.event == PM_EVENT_SUSPEND && | 1838 | if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) { |
1839 | sdkp->device->manage_start_stop) { | ||
1840 | sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); | 1839 | sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); |
1841 | ret = sd_start_stop_device(sdkp, 0); | 1840 | ret = sd_start_stop_device(sdkp, 0); |
1842 | } | 1841 | } |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 2a6e4f472eaa..a6d96694d0a5 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -33,9 +33,9 @@ | |||
33 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
34 | 34 | ||
35 | struct ses_device { | 35 | struct ses_device { |
36 | char *page1; | 36 | unsigned char *page1; |
37 | char *page2; | 37 | unsigned char *page2; |
38 | char *page10; | 38 | unsigned char *page10; |
39 | short page1_len; | 39 | short page1_len; |
40 | short page2_len; | 40 | short page2_len; |
41 | short page10_len; | 41 | short page10_len; |
@@ -67,7 +67,7 @@ static int ses_probe(struct device *dev) | |||
67 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, | 67 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
68 | void *buf, int bufflen) | 68 | void *buf, int bufflen) |
69 | { | 69 | { |
70 | char cmd[] = { | 70 | unsigned char cmd[] = { |
71 | RECEIVE_DIAGNOSTIC, | 71 | RECEIVE_DIAGNOSTIC, |
72 | 1, /* Set PCV bit */ | 72 | 1, /* Set PCV bit */ |
73 | page_code, | 73 | page_code, |
@@ -85,7 +85,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code, | |||
85 | { | 85 | { |
86 | u32 result; | 86 | u32 result; |
87 | 87 | ||
88 | char cmd[] = { | 88 | unsigned char cmd[] = { |
89 | SEND_DIAGNOSTIC, | 89 | SEND_DIAGNOSTIC, |
90 | 0x10, /* Set PF bit */ | 90 | 0x10, /* Set PF bit */ |
91 | 0, | 91 | 0, |
@@ -104,13 +104,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code, | |||
104 | 104 | ||
105 | static int ses_set_page2_descriptor(struct enclosure_device *edev, | 105 | static int ses_set_page2_descriptor(struct enclosure_device *edev, |
106 | struct enclosure_component *ecomp, | 106 | struct enclosure_component *ecomp, |
107 | char *desc) | 107 | unsigned char *desc) |
108 | { | 108 | { |
109 | int i, j, count = 0, descriptor = ecomp->number; | 109 | int i, j, count = 0, descriptor = ecomp->number; |
110 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); | 110 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); |
111 | struct ses_device *ses_dev = edev->scratch; | 111 | struct ses_device *ses_dev = edev->scratch; |
112 | char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 112 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
113 | char *desc_ptr = ses_dev->page2 + 8; | 113 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
114 | 114 | ||
115 | /* Clear everything */ | 115 | /* Clear everything */ |
116 | memset(desc_ptr, 0, ses_dev->page2_len - 8); | 116 | memset(desc_ptr, 0, ses_dev->page2_len - 8); |
@@ -133,14 +133,14 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, | |||
133 | return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 133 | return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
134 | } | 134 | } |
135 | 135 | ||
136 | static char *ses_get_page2_descriptor(struct enclosure_device *edev, | 136 | static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, |
137 | struct enclosure_component *ecomp) | 137 | struct enclosure_component *ecomp) |
138 | { | 138 | { |
139 | int i, j, count = 0, descriptor = ecomp->number; | 139 | int i, j, count = 0, descriptor = ecomp->number; |
140 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); | 140 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); |
141 | struct ses_device *ses_dev = edev->scratch; | 141 | struct ses_device *ses_dev = edev->scratch; |
142 | char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 142 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
143 | char *desc_ptr = ses_dev->page2 + 8; | 143 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
144 | 144 | ||
145 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 145 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
146 | 146 | ||
@@ -160,17 +160,18 @@ static char *ses_get_page2_descriptor(struct enclosure_device *edev, | |||
160 | static void ses_get_fault(struct enclosure_device *edev, | 160 | static void ses_get_fault(struct enclosure_device *edev, |
161 | struct enclosure_component *ecomp) | 161 | struct enclosure_component *ecomp) |
162 | { | 162 | { |
163 | char *desc; | 163 | unsigned char *desc; |
164 | 164 | ||
165 | desc = ses_get_page2_descriptor(edev, ecomp); | 165 | desc = ses_get_page2_descriptor(edev, ecomp); |
166 | ecomp->fault = (desc[3] & 0x60) >> 4; | 166 | if (desc) |
167 | ecomp->fault = (desc[3] & 0x60) >> 4; | ||
167 | } | 168 | } |
168 | 169 | ||
169 | static int ses_set_fault(struct enclosure_device *edev, | 170 | static int ses_set_fault(struct enclosure_device *edev, |
170 | struct enclosure_component *ecomp, | 171 | struct enclosure_component *ecomp, |
171 | enum enclosure_component_setting val) | 172 | enum enclosure_component_setting val) |
172 | { | 173 | { |
173 | char desc[4] = {0 }; | 174 | unsigned char desc[4] = {0 }; |
174 | 175 | ||
175 | switch (val) { | 176 | switch (val) { |
176 | case ENCLOSURE_SETTING_DISABLED: | 177 | case ENCLOSURE_SETTING_DISABLED: |
@@ -190,26 +191,28 @@ static int ses_set_fault(struct enclosure_device *edev, | |||
190 | static void ses_get_status(struct enclosure_device *edev, | 191 | static void ses_get_status(struct enclosure_device *edev, |
191 | struct enclosure_component *ecomp) | 192 | struct enclosure_component *ecomp) |
192 | { | 193 | { |
193 | char *desc; | 194 | unsigned char *desc; |
194 | 195 | ||
195 | desc = ses_get_page2_descriptor(edev, ecomp); | 196 | desc = ses_get_page2_descriptor(edev, ecomp); |
196 | ecomp->status = (desc[0] & 0x0f); | 197 | if (desc) |
198 | ecomp->status = (desc[0] & 0x0f); | ||
197 | } | 199 | } |
198 | 200 | ||
199 | static void ses_get_locate(struct enclosure_device *edev, | 201 | static void ses_get_locate(struct enclosure_device *edev, |
200 | struct enclosure_component *ecomp) | 202 | struct enclosure_component *ecomp) |
201 | { | 203 | { |
202 | char *desc; | 204 | unsigned char *desc; |
203 | 205 | ||
204 | desc = ses_get_page2_descriptor(edev, ecomp); | 206 | desc = ses_get_page2_descriptor(edev, ecomp); |
205 | ecomp->locate = (desc[2] & 0x02) ? 1 : 0; | 207 | if (desc) |
208 | ecomp->locate = (desc[2] & 0x02) ? 1 : 0; | ||
206 | } | 209 | } |
207 | 210 | ||
208 | static int ses_set_locate(struct enclosure_device *edev, | 211 | static int ses_set_locate(struct enclosure_device *edev, |
209 | struct enclosure_component *ecomp, | 212 | struct enclosure_component *ecomp, |
210 | enum enclosure_component_setting val) | 213 | enum enclosure_component_setting val) |
211 | { | 214 | { |
212 | char desc[4] = {0 }; | 215 | unsigned char desc[4] = {0 }; |
213 | 216 | ||
214 | switch (val) { | 217 | switch (val) { |
215 | case ENCLOSURE_SETTING_DISABLED: | 218 | case ENCLOSURE_SETTING_DISABLED: |
@@ -229,7 +232,7 @@ static int ses_set_active(struct enclosure_device *edev, | |||
229 | struct enclosure_component *ecomp, | 232 | struct enclosure_component *ecomp, |
230 | enum enclosure_component_setting val) | 233 | enum enclosure_component_setting val) |
231 | { | 234 | { |
232 | char desc[4] = {0 }; | 235 | unsigned char desc[4] = {0 }; |
233 | 236 | ||
234 | switch (val) { | 237 | switch (val) { |
235 | case ENCLOSURE_SETTING_DISABLED: | 238 | case ENCLOSURE_SETTING_DISABLED: |
@@ -409,18 +412,18 @@ static int ses_intf_add(struct class_device *cdev, | |||
409 | { | 412 | { |
410 | struct scsi_device *sdev = to_scsi_device(cdev->dev); | 413 | struct scsi_device *sdev = to_scsi_device(cdev->dev); |
411 | struct scsi_device *tmp_sdev; | 414 | struct scsi_device *tmp_sdev; |
412 | unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, | 415 | unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL, |
413 | *addl_desc_ptr; | 416 | *addl_desc_ptr = NULL; |
414 | struct ses_device *ses_dev; | 417 | struct ses_device *ses_dev; |
415 | u32 result; | 418 | u32 result; |
416 | int i, j, types, len, components = 0; | 419 | int i, j, types, len, page7_len = 0, components = 0; |
417 | int err = -ENOMEM; | 420 | int err = -ENOMEM; |
418 | struct enclosure_device *edev; | 421 | struct enclosure_device *edev; |
419 | struct ses_component *scomp; | 422 | struct ses_component *scomp = NULL; |
420 | 423 | ||
421 | if (!scsi_device_enclosure(sdev)) { | 424 | if (!scsi_device_enclosure(sdev)) { |
422 | /* not an enclosure, but might be in one */ | 425 | /* not an enclosure, but might be in one */ |
423 | edev = enclosure_find(&sdev->host->shost_gendev); | 426 | edev = enclosure_find(&sdev->host->shost_gendev); |
424 | if (edev) { | 427 | if (edev) { |
425 | ses_match_to_enclosure(edev, sdev); | 428 | ses_match_to_enclosure(edev, sdev); |
426 | class_device_put(&edev->cdev); | 429 | class_device_put(&edev->cdev); |
@@ -447,7 +450,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
447 | * traversal routines more complex */ | 450 | * traversal routines more complex */ |
448 | sdev_printk(KERN_ERR, sdev, | 451 | sdev_printk(KERN_ERR, sdev, |
449 | "FIXME driver has no support for subenclosures (%d)\n", | 452 | "FIXME driver has no support for subenclosures (%d)\n", |
450 | buf[1]); | 453 | hdr_buf[1]); |
451 | goto err_free; | 454 | goto err_free; |
452 | } | 455 | } |
453 | 456 | ||
@@ -456,23 +459,22 @@ static int ses_intf_add(struct class_device *cdev, | |||
456 | if (!buf) | 459 | if (!buf) |
457 | goto err_free; | 460 | goto err_free; |
458 | 461 | ||
459 | ses_dev->page1 = buf; | ||
460 | ses_dev->page1_len = len; | ||
461 | |||
462 | result = ses_recv_diag(sdev, 1, buf, len); | 462 | result = ses_recv_diag(sdev, 1, buf, len); |
463 | if (result) | 463 | if (result) |
464 | goto recv_failed; | 464 | goto recv_failed; |
465 | 465 | ||
466 | types = buf[10]; | 466 | types = buf[10]; |
467 | len = buf[11]; | ||
468 | 467 | ||
469 | type_ptr = buf + 12 + len; | 468 | type_ptr = buf + 12 + buf[11]; |
470 | 469 | ||
471 | for (i = 0; i < types; i++, type_ptr += 4) { | 470 | for (i = 0; i < types; i++, type_ptr += 4) { |
472 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | 471 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
473 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 472 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) |
474 | components += type_ptr[1]; | 473 | components += type_ptr[1]; |
475 | } | 474 | } |
475 | ses_dev->page1 = buf; | ||
476 | ses_dev->page1_len = len; | ||
477 | buf = NULL; | ||
476 | 478 | ||
477 | result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); | 479 | result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); |
478 | if (result) | 480 | if (result) |
@@ -489,28 +491,29 @@ static int ses_intf_add(struct class_device *cdev, | |||
489 | goto recv_failed; | 491 | goto recv_failed; |
490 | ses_dev->page2 = buf; | 492 | ses_dev->page2 = buf; |
491 | ses_dev->page2_len = len; | 493 | ses_dev->page2_len = len; |
494 | buf = NULL; | ||
492 | 495 | ||
493 | /* The additional information page --- allows us | 496 | /* The additional information page --- allows us |
494 | * to match up the devices */ | 497 | * to match up the devices */ |
495 | result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); | 498 | result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); |
496 | if (result) | 499 | if (!result) { |
497 | goto no_page10; | 500 | |
498 | 501 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | |
499 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 502 | buf = kzalloc(len, GFP_KERNEL); |
500 | buf = kzalloc(len, GFP_KERNEL); | 503 | if (!buf) |
501 | if (!buf) | 504 | goto err_free; |
502 | goto err_free; | 505 | |
503 | 506 | result = ses_recv_diag(sdev, 10, buf, len); | |
504 | result = ses_recv_diag(sdev, 10, buf, len); | 507 | if (result) |
505 | if (result) | 508 | goto recv_failed; |
506 | goto recv_failed; | 509 | ses_dev->page10 = buf; |
507 | ses_dev->page10 = buf; | 510 | ses_dev->page10_len = len; |
508 | ses_dev->page10_len = len; | 511 | buf = NULL; |
512 | } | ||
509 | 513 | ||
510 | no_page10: | 514 | scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); |
511 | scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL); | ||
512 | if (!scomp) | 515 | if (!scomp) |
513 | goto err_free; | 516 | goto err_free; |
514 | 517 | ||
515 | edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, | 518 | edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, |
516 | components, &ses_enclosure_callbacks); | 519 | components, &ses_enclosure_callbacks); |
@@ -521,17 +524,18 @@ static int ses_intf_add(struct class_device *cdev, | |||
521 | 524 | ||
522 | edev->scratch = ses_dev; | 525 | edev->scratch = ses_dev; |
523 | for (i = 0; i < components; i++) | 526 | for (i = 0; i < components; i++) |
524 | edev->component[i].scratch = scomp++; | 527 | edev->component[i].scratch = scomp + i; |
525 | 528 | ||
526 | /* Page 7 for the descriptors is optional */ | 529 | /* Page 7 for the descriptors is optional */ |
527 | buf = NULL; | ||
528 | result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); | 530 | result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); |
529 | if (result) | 531 | if (result) |
530 | goto simple_populate; | 532 | goto simple_populate; |
531 | 533 | ||
532 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 534 | page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
533 | /* add 1 for trailing '\0' we'll use */ | 535 | /* add 1 for trailing '\0' we'll use */ |
534 | buf = kzalloc(len + 1, GFP_KERNEL); | 536 | buf = kzalloc(len + 1, GFP_KERNEL); |
537 | if (!buf) | ||
538 | goto simple_populate; | ||
535 | result = ses_recv_diag(sdev, 7, buf, len); | 539 | result = ses_recv_diag(sdev, 7, buf, len); |
536 | if (result) { | 540 | if (result) { |
537 | simple_populate: | 541 | simple_populate: |
@@ -544,7 +548,8 @@ static int ses_intf_add(struct class_device *cdev, | |||
544 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 548 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
545 | /* skip past overall descriptor */ | 549 | /* skip past overall descriptor */ |
546 | desc_ptr += len + 4; | 550 | desc_ptr += len + 4; |
547 | addl_desc_ptr = ses_dev->page10 + 8; | 551 | if (ses_dev->page10) |
552 | addl_desc_ptr = ses_dev->page10 + 8; | ||
548 | } | 553 | } |
549 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 554 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
550 | components = 0; | 555 | components = 0; |
@@ -554,29 +559,35 @@ static int ses_intf_add(struct class_device *cdev, | |||
554 | struct enclosure_component *ecomp; | 559 | struct enclosure_component *ecomp; |
555 | 560 | ||
556 | if (desc_ptr) { | 561 | if (desc_ptr) { |
557 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 562 | if (desc_ptr >= buf + page7_len) { |
558 | desc_ptr += 4; | 563 | desc_ptr = NULL; |
559 | /* Add trailing zero - pushes into | 564 | } else { |
560 | * reserved space */ | 565 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
561 | desc_ptr[len] = '\0'; | 566 | desc_ptr += 4; |
562 | name = desc_ptr; | 567 | /* Add trailing zero - pushes into |
568 | * reserved space */ | ||
569 | desc_ptr[len] = '\0'; | ||
570 | name = desc_ptr; | ||
571 | } | ||
563 | } | 572 | } |
564 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 573 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
565 | type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 574 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { |
566 | continue; | 575 | |
567 | ecomp = enclosure_component_register(edev, | 576 | ecomp = enclosure_component_register(edev, |
568 | components++, | 577 | components++, |
569 | type_ptr[0], | 578 | type_ptr[0], |
570 | name); | 579 | name); |
571 | if (desc_ptr) { | 580 | |
572 | desc_ptr += len; | 581 | if (!IS_ERR(ecomp) && addl_desc_ptr) |
573 | if (!IS_ERR(ecomp)) | ||
574 | ses_process_descriptor(ecomp, | 582 | ses_process_descriptor(ecomp, |
575 | addl_desc_ptr); | 583 | addl_desc_ptr); |
576 | |||
577 | if (addl_desc_ptr) | ||
578 | addl_desc_ptr += addl_desc_ptr[1] + 2; | ||
579 | } | 584 | } |
585 | if (desc_ptr) | ||
586 | desc_ptr += len; | ||
587 | |||
588 | if (addl_desc_ptr) | ||
589 | addl_desc_ptr += addl_desc_ptr[1] + 2; | ||
590 | |||
580 | } | 591 | } |
581 | } | 592 | } |
582 | kfree(buf); | 593 | kfree(buf); |
@@ -598,6 +609,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
598 | err = -ENODEV; | 609 | err = -ENODEV; |
599 | err_free: | 610 | err_free: |
600 | kfree(buf); | 611 | kfree(buf); |
612 | kfree(scomp); | ||
601 | kfree(ses_dev->page10); | 613 | kfree(ses_dev->page10); |
602 | kfree(ses_dev->page2); | 614 | kfree(ses_dev->page2); |
603 | kfree(ses_dev->page1); | 615 | kfree(ses_dev->page1); |
@@ -630,6 +642,7 @@ static void ses_intf_remove(struct class_device *cdev, | |||
630 | ses_dev = edev->scratch; | 642 | ses_dev = edev->scratch; |
631 | edev->scratch = NULL; | 643 | edev->scratch = NULL; |
632 | 644 | ||
645 | kfree(ses_dev->page10); | ||
633 | kfree(ses_dev->page1); | 646 | kfree(ses_dev->page1); |
634 | kfree(ses_dev->page2); | 647 | kfree(ses_dev->page2); |
635 | kfree(ses_dev); | 648 | kfree(ses_dev); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 71952703125a..0a52d9d2da2c 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -17,7 +17,7 @@ | |||
17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support | 17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support |
18 | */ | 18 | */ |
19 | 19 | ||
20 | static const char *verstr = "20080117"; | 20 | static const char *verstr = "20080221"; |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
@@ -1172,7 +1172,7 @@ static int st_open(struct inode *inode, struct file *filp) | |||
1172 | STp->try_dio_now = STp->try_dio; | 1172 | STp->try_dio_now = STp->try_dio; |
1173 | STp->recover_count = 0; | 1173 | STp->recover_count = 0; |
1174 | DEB( STp->nbr_waits = STp->nbr_finished = 0; | 1174 | DEB( STp->nbr_waits = STp->nbr_finished = 0; |
1175 | STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) | 1175 | STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; ) |
1176 | 1176 | ||
1177 | retval = check_tape(STp, filp); | 1177 | retval = check_tape(STp, filp); |
1178 | if (retval < 0) | 1178 | if (retval < 0) |
@@ -1226,8 +1226,8 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | DEBC( if (STp->nbr_requests) | 1228 | DEBC( if (STp->nbr_requests) |
1229 | printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", | 1229 | printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n", |
1230 | name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); | 1230 | name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages)); |
1231 | 1231 | ||
1232 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { | 1232 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { |
1233 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; | 1233 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; |
@@ -1422,9 +1422,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf, | |||
1422 | if (STbp->do_dio) { | 1422 | if (STbp->do_dio) { |
1423 | STp->nbr_dio++; | 1423 | STp->nbr_dio++; |
1424 | STp->nbr_pages += STbp->do_dio; | 1424 | STp->nbr_pages += STbp->do_dio; |
1425 | for (i=1; i < STbp->do_dio; i++) | ||
1426 | if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1) | ||
1427 | STp->nbr_combinable++; | ||
1428 | } | 1425 | } |
1429 | ) | 1426 | ) |
1430 | } else | 1427 | } else |
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 6c8075712974..5931726fcf93 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h | |||
@@ -164,7 +164,6 @@ struct scsi_tape { | |||
164 | int nbr_requests; | 164 | int nbr_requests; |
165 | int nbr_dio; | 165 | int nbr_dio; |
166 | int nbr_pages; | 166 | int nbr_pages; |
167 | int nbr_combinable; | ||
168 | unsigned char last_cmnd[6]; | 167 | unsigned char last_cmnd[6]; |
169 | unsigned char last_sense[16]; | 168 | unsigned char last_sense[16]; |
170 | #endif | 169 | #endif |
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 72f6d8015358..654430edf74d 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -461,30 +461,14 @@ static void stex_internal_copy(struct scsi_cmnd *cmd, | |||
461 | } | 461 | } |
462 | } | 462 | } |
463 | 463 | ||
464 | static int stex_direct_copy(struct scsi_cmnd *cmd, | ||
465 | const void *src, size_t count) | ||
466 | { | ||
467 | size_t cp_len = count; | ||
468 | int n_elem = 0; | ||
469 | |||
470 | n_elem = scsi_dma_map(cmd); | ||
471 | if (n_elem < 0) | ||
472 | return 0; | ||
473 | |||
474 | stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); | ||
475 | |||
476 | scsi_dma_unmap(cmd); | ||
477 | |||
478 | return cp_len == count; | ||
479 | } | ||
480 | |||
481 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | 464 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) |
482 | { | 465 | { |
483 | struct st_frame *p; | 466 | struct st_frame *p; |
484 | size_t count = sizeof(struct st_frame); | 467 | size_t count = sizeof(struct st_frame); |
485 | 468 | ||
486 | p = hba->copy_buffer; | 469 | p = hba->copy_buffer; |
487 | stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD); | 470 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), |
471 | ST_FROM_CMD); | ||
488 | memset(p->base, 0, sizeof(u32)*6); | 472 | memset(p->base, 0, sizeof(u32)*6); |
489 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); | 473 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); |
490 | p->rom_addr = 0; | 474 | p->rom_addr = 0; |
@@ -502,7 +486,8 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | |||
502 | p->subid = | 486 | p->subid = |
503 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; | 487 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; |
504 | 488 | ||
505 | stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD); | 489 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), |
490 | ST_TO_CMD); | ||
506 | } | 491 | } |
507 | 492 | ||
508 | static void | 493 | static void |
@@ -569,8 +554,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
569 | unsigned char page; | 554 | unsigned char page; |
570 | page = cmd->cmnd[2] & 0x3f; | 555 | page = cmd->cmnd[2] & 0x3f; |
571 | if (page == 0x8 || page == 0x3f) { | 556 | if (page == 0x8 || page == 0x3f) { |
572 | stex_direct_copy(cmd, ms10_caching_page, | 557 | size_t cp_len = sizeof(ms10_caching_page); |
573 | sizeof(ms10_caching_page)); | 558 | stex_internal_copy(cmd, ms10_caching_page, |
559 | &cp_len, scsi_sg_count(cmd), | ||
560 | ST_TO_CMD); | ||
574 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 561 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
575 | done(cmd); | 562 | done(cmd); |
576 | } else | 563 | } else |
@@ -599,8 +586,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
599 | if (id != host->max_id - 1) | 586 | if (id != host->max_id - 1) |
600 | break; | 587 | break; |
601 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | 588 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { |
602 | stex_direct_copy(cmd, console_inq_page, | 589 | size_t cp_len = sizeof(console_inq_page); |
603 | sizeof(console_inq_page)); | 590 | stex_internal_copy(cmd, console_inq_page, |
591 | &cp_len, scsi_sg_count(cmd), | ||
592 | ST_TO_CMD); | ||
604 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 593 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
605 | done(cmd); | 594 | done(cmd); |
606 | } else | 595 | } else |
@@ -609,6 +598,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
609 | case PASSTHRU_CMD: | 598 | case PASSTHRU_CMD: |
610 | if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { | 599 | if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { |
611 | struct st_drvver ver; | 600 | struct st_drvver ver; |
601 | size_t cp_len = sizeof(ver); | ||
612 | ver.major = ST_VER_MAJOR; | 602 | ver.major = ST_VER_MAJOR; |
613 | ver.minor = ST_VER_MINOR; | 603 | ver.minor = ST_VER_MINOR; |
614 | ver.oem = ST_OEM; | 604 | ver.oem = ST_OEM; |
@@ -616,7 +606,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
616 | ver.signature[0] = PASSTHRU_SIGNATURE; | 606 | ver.signature[0] = PASSTHRU_SIGNATURE; |
617 | ver.console_id = host->max_id - 1; | 607 | ver.console_id = host->max_id - 1; |
618 | ver.host_no = hba->host->host_no; | 608 | ver.host_no = hba->host->host_no; |
619 | cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? | 609 | stex_internal_copy(cmd, &ver, &cp_len, |
610 | scsi_sg_count(cmd), ST_TO_CMD); | ||
611 | cmd->result = sizeof(ver) == cp_len ? | ||
620 | DID_OK << 16 | COMMAND_COMPLETE << 8 : | 612 | DID_OK << 16 | COMMAND_COMPLETE << 8 : |
621 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; | 613 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; |
622 | done(cmd); | 614 | done(cmd); |
@@ -709,7 +701,7 @@ static void stex_copy_data(struct st_ccb *ccb, | |||
709 | if (ccb->cmd == NULL) | 701 | if (ccb->cmd == NULL) |
710 | return; | 702 | return; |
711 | stex_internal_copy(ccb->cmd, | 703 | stex_internal_copy(ccb->cmd, |
712 | resp->variable, &count, ccb->sg_count, ST_TO_CMD); | 704 | resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD); |
713 | } | 705 | } |
714 | 706 | ||
715 | static void stex_ys_commands(struct st_hba *hba, | 707 | static void stex_ys_commands(struct st_hba *hba, |
@@ -734,7 +726,7 @@ static void stex_ys_commands(struct st_hba *hba, | |||
734 | 726 | ||
735 | count = STEX_EXTRA_SIZE; | 727 | count = STEX_EXTRA_SIZE; |
736 | stex_internal_copy(ccb->cmd, hba->copy_buffer, | 728 | stex_internal_copy(ccb->cmd, hba->copy_buffer, |
737 | &count, ccb->sg_count, ST_FROM_CMD); | 729 | &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD); |
738 | inq_data = (ST_INQ *)hba->copy_buffer; | 730 | inq_data = (ST_INQ *)hba->copy_buffer; |
739 | if (inq_data->DeviceTypeQualifier != 0) | 731 | if (inq_data->DeviceTypeQualifier != 0) |
740 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; | 732 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; |
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index 6325901e5093..f7d279542fa5 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c | |||
@@ -187,10 +187,10 @@ | |||
187 | #define sym53c416_base_2 sym53c416_2 | 187 | #define sym53c416_base_2 sym53c416_2 |
188 | #define sym53c416_base_3 sym53c416_3 | 188 | #define sym53c416_base_3 sym53c416_3 |
189 | 189 | ||
190 | static unsigned int sym53c416_base[2] = {0,0}; | 190 | static unsigned int sym53c416_base[2]; |
191 | static unsigned int sym53c416_base_1[2] = {0,0}; | 191 | static unsigned int sym53c416_base_1[2]; |
192 | static unsigned int sym53c416_base_2[2] = {0,0}; | 192 | static unsigned int sym53c416_base_2[2]; |
193 | static unsigned int sym53c416_base_3[2] = {0,0}; | 193 | static unsigned int sym53c416_base_3[2]; |
194 | 194 | ||
195 | #endif | 195 | #endif |
196 | 196 | ||
@@ -621,25 +621,25 @@ int __init sym53c416_detect(struct scsi_host_template *tpnt) | |||
621 | int ints[3]; | 621 | int ints[3]; |
622 | 622 | ||
623 | ints[0] = 2; | 623 | ints[0] = 2; |
624 | if(sym53c416_base) | 624 | if(sym53c416_base[0]) |
625 | { | 625 | { |
626 | ints[1] = sym53c416_base[0]; | 626 | ints[1] = sym53c416_base[0]; |
627 | ints[2] = sym53c416_base[1]; | 627 | ints[2] = sym53c416_base[1]; |
628 | sym53c416_setup(NULL, ints); | 628 | sym53c416_setup(NULL, ints); |
629 | } | 629 | } |
630 | if(sym53c416_base_1) | 630 | if(sym53c416_base_1[0]) |
631 | { | 631 | { |
632 | ints[1] = sym53c416_base_1[0]; | 632 | ints[1] = sym53c416_base_1[0]; |
633 | ints[2] = sym53c416_base_1[1]; | 633 | ints[2] = sym53c416_base_1[1]; |
634 | sym53c416_setup(NULL, ints); | 634 | sym53c416_setup(NULL, ints); |
635 | } | 635 | } |
636 | if(sym53c416_base_2) | 636 | if(sym53c416_base_2[0]) |
637 | { | 637 | { |
638 | ints[1] = sym53c416_base_2[0]; | 638 | ints[1] = sym53c416_base_2[0]; |
639 | ints[2] = sym53c416_base_2[1]; | 639 | ints[2] = sym53c416_base_2[1]; |
640 | sym53c416_setup(NULL, ints); | 640 | sym53c416_setup(NULL, ints); |
641 | } | 641 | } |
642 | if(sym53c416_base_3) | 642 | if(sym53c416_base_3[0]) |
643 | { | 643 | { |
644 | ints[1] = sym53c416_base_3[0]; | 644 | ints[1] = sym53c416_base_3[0]; |
645 | ints[2] = sym53c416_base_3[1]; | 645 | ints[2] = sym53c416_base_3[1]; |