diff options
Diffstat (limited to 'drivers/scsi')
167 files changed, 5465 insertions, 5996 deletions
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 4de346017e9f..6da6cec9a651 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
| @@ -683,14 +683,13 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) | |||
| 683 | unsigned long *cpu_addr; | 683 | unsigned long *cpu_addr; |
| 684 | int retval = 1; | 684 | int retval = 1; |
| 685 | 685 | ||
| 686 | cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); | 686 | cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH, |
| 687 | &dma_handle); | ||
| 687 | if (!cpu_addr) { | 688 | if (!cpu_addr) { |
| 688 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); | 689 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); |
| 689 | goto out; | 690 | goto out; |
| 690 | } | 691 | } |
| 691 | 692 | ||
| 692 | memset(cpu_addr, 0, size*TW_Q_LENGTH); | ||
| 693 | |||
| 694 | for (i = 0; i < TW_Q_LENGTH; i++) { | 693 | for (i = 0; i < TW_Q_LENGTH; i++) { |
| 695 | switch(which) { | 694 | switch(which) { |
| 696 | case 0: | 695 | case 0: |
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 49dcf03c631a..29b0b84ed69e 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h | |||
| @@ -392,6 +392,8 @@ typedef struct TAG_TW_Passthru | |||
| 392 | unsigned char padding[12]; | 392 | unsigned char padding[12]; |
| 393 | } TW_Passthru; | 393 | } TW_Passthru; |
| 394 | 394 | ||
| 395 | #pragma pack() | ||
| 396 | |||
| 395 | typedef struct TAG_TW_Device_Extension { | 397 | typedef struct TAG_TW_Device_Extension { |
| 396 | u32 base_addr; | 398 | u32 base_addr; |
| 397 | unsigned long *alignment_virtual_address[TW_Q_LENGTH]; | 399 | unsigned long *alignment_virtual_address[TW_Q_LENGTH]; |
| @@ -430,6 +432,4 @@ typedef struct TAG_TW_Device_Extension { | |||
| 430 | wait_queue_head_t ioctl_wqueue; | 432 | wait_queue_head_t ioctl_wqueue; |
| 431 | } TW_Device_Extension; | 433 | } TW_Device_Extension; |
| 432 | 434 | ||
| 433 | #pragma pack() | ||
| 434 | |||
| 435 | #endif /* _3W_XXXX_H */ | 435 | #endif /* _3W_XXXX_H */ |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index a3adfb4357f5..fabd4be2c985 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
| @@ -1005,7 +1005,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, | |||
| 1005 | DMA_TO_DEVICE); | 1005 | DMA_TO_DEVICE); |
| 1006 | 1006 | ||
| 1007 | cmnd[0] = REQUEST_SENSE; | 1007 | cmnd[0] = REQUEST_SENSE; |
| 1008 | cmnd[1] = (SCp->device->lun & 0x7) << 5; | 1008 | cmnd[1] = (lun & 0x7) << 5; |
| 1009 | cmnd[2] = 0; | 1009 | cmnd[2] = 0; |
| 1010 | cmnd[3] = 0; | 1010 | cmnd[3] = 0; |
| 1011 | cmnd[4] = SCSI_SENSE_BUFFERSIZE; | 1011 | cmnd[4] = SCSI_SENSE_BUFFERSIZE; |
| @@ -1396,7 +1396,8 @@ NCR_700_start_command(struct scsi_cmnd *SCp) | |||
| 1396 | struct NCR_700_Host_Parameters *hostdata = | 1396 | struct NCR_700_Host_Parameters *hostdata = |
| 1397 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; | 1397 | (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; |
| 1398 | __u16 count = 1; /* for IDENTIFY message */ | 1398 | __u16 count = 1; /* for IDENTIFY message */ |
| 1399 | 1399 | u8 lun = SCp->device->lun; | |
| 1400 | |||
| 1400 | if(hostdata->state != NCR_700_HOST_FREE) { | 1401 | if(hostdata->state != NCR_700_HOST_FREE) { |
| 1401 | /* keep this inside the lock to close the race window where | 1402 | /* keep this inside the lock to close the race window where |
| 1402 | * the running command finishes on another CPU while we don't | 1403 | * the running command finishes on another CPU while we don't |
| @@ -1415,7 +1416,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp) | |||
| 1415 | 1416 | ||
| 1416 | hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE && | 1417 | hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE && |
| 1417 | slot->flags != NCR_700_FLAG_AUTOSENSE), | 1418 | slot->flags != NCR_700_FLAG_AUTOSENSE), |
| 1418 | SCp->device->lun); | 1419 | lun); |
| 1419 | /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure | 1420 | /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure |
| 1420 | * if the negotiated transfer parameters still hold, so | 1421 | * if the negotiated transfer parameters still hold, so |
| 1421 | * always renegotiate them */ | 1422 | * always renegotiate them */ |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 972f8176665f..64c75143c89a 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
| @@ -3893,7 +3893,7 @@ __setup("BusLogic=", blogic_setup); | |||
| 3893 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 3893 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
| 3894 | { } | 3894 | { } |
| 3895 | };*/ | 3895 | };*/ |
| 3896 | static DEFINE_PCI_DEVICE_TABLE(blogic_pci_tbl) = { | 3896 | static const struct pci_device_id blogic_pci_tbl[] = { |
| 3897 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, | 3897 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, |
| 3898 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, | 3898 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, |
| 3899 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, | 3899 | {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index baca5897039f..18a3358eb1d4 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -40,13 +40,6 @@ config SCSI_DMA | |||
| 40 | bool | 40 | bool |
| 41 | default n | 41 | default n |
| 42 | 42 | ||
| 43 | config SCSI_TGT | ||
| 44 | tristate "SCSI target support" | ||
| 45 | depends on SCSI | ||
| 46 | ---help--- | ||
| 47 | If you want to use SCSI target mode drivers enable this option. | ||
| 48 | If you choose M, the module will be called scsi_tgt. | ||
| 49 | |||
| 50 | config SCSI_NETLINK | 43 | config SCSI_NETLINK |
| 51 | bool | 44 | bool |
| 52 | default n | 45 | default n |
| @@ -197,20 +190,6 @@ config SCSI_ENCLOSURE | |||
| 197 | it has an enclosure device. Selecting this option will just allow | 190 | it has an enclosure device. Selecting this option will just allow |
| 198 | certain enclosure conditions to be reported and is not required. | 191 | certain enclosure conditions to be reported and is not required. |
| 199 | 192 | ||
| 200 | config SCSI_MULTI_LUN | ||
| 201 | bool "Probe all LUNs on each SCSI device" | ||
| 202 | depends on SCSI | ||
| 203 | help | ||
| 204 | Some devices support more than one LUN (Logical Unit Number) in order | ||
| 205 | to allow access to several media, e.g. CD jukebox, USB card reader, | ||
| 206 | mobile phone in mass storage mode. This option forces the kernel to | ||
| 207 | probe for all LUNs by default. This setting can be overridden by | ||
| 208 | max_luns boot/module parameter. Note that this option does not affect | ||
| 209 | devices conforming to SCSI-3 or higher as they can explicitly report | ||
| 210 | their number of LUNs. It is safe to say Y here unless you have one of | ||
| 211 | those rare devices which reacts in an unexpected way when probed for | ||
| 212 | multiple LUNs. | ||
| 213 | |||
| 214 | config SCSI_CONSTANTS | 193 | config SCSI_CONSTANTS |
| 215 | bool "Verbose SCSI error reporting (kernel size +=12K)" | 194 | bool "Verbose SCSI error reporting (kernel size +=12K)" |
| 216 | depends on SCSI | 195 | depends on SCSI |
| @@ -285,13 +264,6 @@ config SCSI_FC_ATTRS | |||
| 285 | each attached FiberChannel device to sysfs, say Y. | 264 | each attached FiberChannel device to sysfs, say Y. |
| 286 | Otherwise, say N. | 265 | Otherwise, say N. |
| 287 | 266 | ||
| 288 | config SCSI_FC_TGT_ATTRS | ||
| 289 | bool "SCSI target support for FiberChannel Transport Attributes" | ||
| 290 | depends on SCSI_FC_ATTRS | ||
| 291 | depends on SCSI_TGT = y || SCSI_TGT = SCSI_FC_ATTRS | ||
| 292 | help | ||
| 293 | If you want to use SCSI target mode drivers enable this option. | ||
| 294 | |||
| 295 | config SCSI_ISCSI_ATTRS | 267 | config SCSI_ISCSI_ATTRS |
| 296 | tristate "iSCSI Transport Attributes" | 268 | tristate "iSCSI Transport Attributes" |
| 297 | depends on SCSI && NET | 269 | depends on SCSI && NET |
| @@ -318,13 +290,6 @@ config SCSI_SRP_ATTRS | |||
| 318 | If you wish to export transport-specific information about | 290 | If you wish to export transport-specific information about |
| 319 | each attached SRP device to sysfs, say Y. | 291 | each attached SRP device to sysfs, say Y. |
| 320 | 292 | ||
| 321 | config SCSI_SRP_TGT_ATTRS | ||
| 322 | bool "SCSI target support for SRP Transport Attributes" | ||
| 323 | depends on SCSI_SRP_ATTRS | ||
| 324 | depends on SCSI_TGT = y || SCSI_TGT = SCSI_SRP_ATTRS | ||
| 325 | help | ||
| 326 | If you want to use SCSI target mode drivers enable this option. | ||
| 327 | |||
| 328 | endmenu | 293 | endmenu |
| 329 | 294 | ||
| 330 | menuconfig SCSI_LOWLEVEL | 295 | menuconfig SCSI_LOWLEVEL |
| @@ -528,7 +493,7 @@ config SCSI_DPT_I2O | |||
| 528 | 493 | ||
| 529 | config SCSI_ADVANSYS | 494 | config SCSI_ADVANSYS |
| 530 | tristate "AdvanSys SCSI support" | 495 | tristate "AdvanSys SCSI support" |
| 531 | depends on SCSI && VIRT_TO_BUS | 496 | depends on SCSI && VIRT_TO_BUS && !ARM |
| 532 | depends on ISA || EISA || PCI | 497 | depends on ISA || EISA || PCI |
| 533 | help | 498 | help |
| 534 | This is a driver for all SCSI host adapters manufactured by | 499 | This is a driver for all SCSI host adapters manufactured by |
| @@ -848,20 +813,6 @@ config SCSI_IBMVSCSI | |||
| 848 | To compile this driver as a module, choose M here: the | 813 | To compile this driver as a module, choose M here: the |
| 849 | module will be called ibmvscsi. | 814 | module will be called ibmvscsi. |
| 850 | 815 | ||
| 851 | config SCSI_IBMVSCSIS | ||
| 852 | tristate "IBM Virtual SCSI Server support" | ||
| 853 | depends on PPC_PSERIES && SCSI_SRP && SCSI_SRP_TGT_ATTRS | ||
| 854 | help | ||
| 855 | This is the SRP target driver for IBM pSeries virtual environments. | ||
| 856 | |||
| 857 | The userspace component needed to initialize the driver and | ||
| 858 | documentation can be found: | ||
| 859 | |||
| 860 | http://stgt.berlios.de/ | ||
| 861 | |||
| 862 | To compile this driver as a module, choose M here: the | ||
| 863 | module will be called ibmvstgt. | ||
| 864 | |||
| 865 | config SCSI_IBMVFC | 816 | config SCSI_IBMVFC |
| 866 | tristate "IBM Virtual FC support" | 817 | tristate "IBM Virtual FC support" |
| 867 | depends on PPC_PSERIES && SCSI | 818 | depends on PPC_PSERIES && SCSI |
| @@ -1750,16 +1701,6 @@ config SCSI_PM8001 | |||
| 1750 | This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip | 1701 | This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip |
| 1751 | based host adapters. | 1702 | based host adapters. |
| 1752 | 1703 | ||
| 1753 | config SCSI_SRP | ||
| 1754 | tristate "SCSI RDMA Protocol helper library" | ||
| 1755 | depends on SCSI && PCI | ||
| 1756 | select SCSI_TGT | ||
| 1757 | help | ||
| 1758 | If you wish to use SRP target drivers, say Y. | ||
| 1759 | |||
| 1760 | To compile this driver as a module, choose M here: the | ||
| 1761 | module will be called libsrp. | ||
| 1762 | |||
| 1763 | config SCSI_BFA_FC | 1704 | config SCSI_BFA_FC |
| 1764 | tristate "Brocade BFA Fibre Channel Support" | 1705 | tristate "Brocade BFA Fibre Channel Support" |
| 1765 | depends on PCI && SCSI | 1706 | depends on PCI && SCSI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index e172d4f8e02f..5f0d299b0093 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
| @@ -20,7 +20,6 @@ CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS | |||
| 20 | obj-$(CONFIG_PCMCIA) += pcmcia/ | 20 | obj-$(CONFIG_PCMCIA) += pcmcia/ |
| 21 | 21 | ||
| 22 | obj-$(CONFIG_SCSI) += scsi_mod.o | 22 | obj-$(CONFIG_SCSI) += scsi_mod.o |
| 23 | obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o | ||
| 24 | 23 | ||
| 25 | obj-$(CONFIG_RAID_ATTRS) += raid_class.o | 24 | obj-$(CONFIG_RAID_ATTRS) += raid_class.o |
| 26 | 25 | ||
| @@ -127,9 +126,7 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o | |||
| 127 | obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o | 126 | obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o |
| 128 | obj-$(CONFIG_SCSI_NSP32) += nsp32.o | 127 | obj-$(CONFIG_SCSI_NSP32) += nsp32.o |
| 129 | obj-$(CONFIG_SCSI_IPR) += ipr.o | 128 | obj-$(CONFIG_SCSI_IPR) += ipr.o |
| 130 | obj-$(CONFIG_SCSI_SRP) += libsrp.o | ||
| 131 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ | 129 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ |
| 132 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ | ||
| 133 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ | 130 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ |
| 134 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | 131 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o |
| 135 | obj-$(CONFIG_SCSI_STEX) += stex.o | 132 | obj-$(CONFIG_SCSI_STEX) += stex.o |
| @@ -173,8 +170,6 @@ scsi_mod-$(CONFIG_PM) += scsi_pm.o | |||
| 173 | 170 | ||
| 174 | hv_storvsc-y := storvsc_drv.o | 171 | hv_storvsc-y := storvsc_drv.o |
| 175 | 172 | ||
| 176 | scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o | ||
| 177 | |||
| 178 | sd_mod-objs := sd.o | 173 | sd_mod-objs := sd.o |
| 179 | sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o | 174 | sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o |
| 180 | 175 | ||
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 93d13fc9a293..45da3c823322 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
| @@ -762,7 +762,7 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, | |||
| 762 | 762 | ||
| 763 | static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) | 763 | static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) |
| 764 | { | 764 | { |
| 765 | SPRINTF("scsi%d : destination target %d, lun %d\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); | 765 | SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); |
| 766 | SPRINTF(" command = "); | 766 | SPRINTF(" command = "); |
| 767 | lprint_command(cmd->cmnd, m); | 767 | lprint_command(cmd->cmnd, m); |
| 768 | } | 768 | } |
| @@ -1039,9 +1039,10 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1039 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) | 1039 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) |
| 1040 | { | 1040 | { |
| 1041 | if (prev != tmp) | 1041 | if (prev != tmp) |
| 1042 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); | 1042 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); |
| 1043 | /* When we find one, remove it from the issue queue. */ | 1043 | /* When we find one, remove it from the issue queue. */ |
| 1044 | if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { | 1044 | if (!(hostdata->busy[tmp->device->id] & |
| 1045 | (1 << (u8)(tmp->device->lun & 0xff)))) { | ||
| 1045 | if (prev) { | 1046 | if (prev) { |
| 1046 | REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); | 1047 | REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); |
| 1047 | prev->host_scribble = tmp->host_scribble; | 1048 | prev->host_scribble = tmp->host_scribble; |
| @@ -1057,7 +1058,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1057 | * On failure, we must add the command back to the | 1058 | * On failure, we must add the command back to the |
| 1058 | * issue queue so we can keep trying. | 1059 | * issue queue so we can keep trying. |
| 1059 | */ | 1060 | */ |
| 1060 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); | 1061 | dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %llu removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); |
| 1061 | 1062 | ||
| 1062 | /* | 1063 | /* |
| 1063 | * A successful selection is defined as one that | 1064 | * A successful selection is defined as one that |
| @@ -1524,7 +1525,7 @@ part2: | |||
| 1524 | dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); | 1525 | dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); |
| 1525 | /* XXX need to handle errors here */ | 1526 | /* XXX need to handle errors here */ |
| 1526 | hostdata->connected = cmd; | 1527 | hostdata->connected = cmd; |
| 1527 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 1528 | hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); |
| 1528 | 1529 | ||
| 1529 | initialize_SCp(cmd); | 1530 | initialize_SCp(cmd); |
| 1530 | 1531 | ||
| @@ -2210,14 +2211,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2210 | case LINKED_FLG_CMD_COMPLETE: | 2211 | case LINKED_FLG_CMD_COMPLETE: |
| 2211 | /* Accept message by clearing ACK */ | 2212 | /* Accept message by clearing ACK */ |
| 2212 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2213 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2213 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); | 2214 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2214 | /* | 2215 | /* |
| 2215 | * Sanity check : A linked command should only terminate with | 2216 | * Sanity check : A linked command should only terminate with |
| 2216 | * one of these messages if there are more linked commands | 2217 | * one of these messages if there are more linked commands |
| 2217 | * available. | 2218 | * available. |
| 2218 | */ | 2219 | */ |
| 2219 | if (!cmd->next_link) { | 2220 | if (!cmd->next_link) { |
| 2220 | printk("scsi%d : target %d lun %d linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun); | 2221 | printk("scsi%d : target %d lun %llu linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun); |
| 2221 | sink = 1; | 2222 | sink = 1; |
| 2222 | do_abort(instance); | 2223 | do_abort(instance); |
| 2223 | return; | 2224 | return; |
| @@ -2226,7 +2227,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2226 | /* The next command is still part of this process */ | 2227 | /* The next command is still part of this process */ |
| 2227 | cmd->next_link->tag = cmd->tag; | 2228 | cmd->next_link->tag = cmd->tag; |
| 2228 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2229 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2229 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); | 2230 | dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2230 | collect_stats(hostdata, cmd); | 2231 | collect_stats(hostdata, cmd); |
| 2231 | cmd->scsi_done(cmd); | 2232 | cmd->scsi_done(cmd); |
| 2232 | cmd = hostdata->connected; | 2233 | cmd = hostdata->connected; |
| @@ -2238,8 +2239,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2238 | sink = 1; | 2239 | sink = 1; |
| 2239 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2240 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2240 | hostdata->connected = NULL; | 2241 | hostdata->connected = NULL; |
| 2241 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun); | 2242 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %llu completed\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2242 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2243 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); |
| 2243 | 2244 | ||
| 2244 | /* | 2245 | /* |
| 2245 | * I'm not sure what the correct thing to do here is : | 2246 | * I'm not sure what the correct thing to do here is : |
| @@ -2304,7 +2305,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2304 | case ORDERED_QUEUE_TAG: | 2305 | case ORDERED_QUEUE_TAG: |
| 2305 | case SIMPLE_QUEUE_TAG: | 2306 | case SIMPLE_QUEUE_TAG: |
| 2306 | cmd->device->simple_tags = 0; | 2307 | cmd->device->simple_tags = 0; |
| 2307 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2308 | hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); |
| 2308 | break; | 2309 | break; |
| 2309 | default: | 2310 | default: |
| 2310 | break; | 2311 | break; |
| @@ -2318,7 +2319,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2318 | hostdata->disconnected_queue; | 2319 | hostdata->disconnected_queue; |
| 2319 | hostdata->connected = NULL; | 2320 | hostdata->connected = NULL; |
| 2320 | hostdata->disconnected_queue = cmd; | 2321 | hostdata->disconnected_queue = cmd; |
| 2321 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); | 2322 | dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %llu was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); |
| 2322 | /* | 2323 | /* |
| 2323 | * Restore phase bits to 0 so an interrupted selection, | 2324 | * Restore phase bits to 0 so an interrupted selection, |
| 2324 | * arbitration can resume. | 2325 | * arbitration can resume. |
| @@ -2426,7 +2427,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { | |||
| 2426 | hostdata->last_message = msgout; | 2427 | hostdata->last_message = msgout; |
| 2427 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2428 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
| 2428 | if (msgout == ABORT) { | 2429 | if (msgout == ABORT) { |
| 2429 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2430 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); |
| 2430 | hostdata->connected = NULL; | 2431 | hostdata->connected = NULL; |
| 2431 | cmd->result = DID_ERROR << 16; | 2432 | cmd->result = DID_ERROR << 16; |
| 2432 | collect_stats(hostdata, cmd); | 2433 | collect_stats(hostdata, cmd); |
| @@ -2562,7 +2563,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { | |||
| 2562 | 2563 | ||
| 2563 | 2564 | ||
| 2564 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) | 2565 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) |
| 2565 | if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) | 2566 | if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun) |
| 2566 | ) { | 2567 | ) { |
| 2567 | if (prev) { | 2568 | if (prev) { |
| 2568 | REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); | 2569 | REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble); |
| @@ -2588,7 +2589,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { | |||
| 2588 | do_abort(instance); | 2589 | do_abort(instance); |
| 2589 | } else { | 2590 | } else { |
| 2590 | hostdata->connected = tmp; | 2591 | hostdata->connected = tmp; |
| 2591 | dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); | 2592 | dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %llu, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2592 | } | 2593 | } |
| 2593 | } | 2594 | } |
| 2594 | 2595 | ||
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c index c91888a0a23c..42c7161474f7 100644 --- a/drivers/scsi/NCR53c406a.c +++ b/drivers/scsi/NCR53c406a.c | |||
| @@ -595,7 +595,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost) | |||
| 595 | { | 595 | { |
| 596 | if (shost->irq) | 596 | if (shost->irq) |
| 597 | free_irq(shost->irq, NULL); | 597 | free_irq(shost->irq, NULL); |
| 598 | #ifdef USE_DMA | 598 | #if USE_DMA |
| 599 | if (shost->dma_channel != 0xff) | 599 | if (shost->dma_channel != 0xff) |
| 600 | free_dma(shost->dma_channel); | 600 | free_dma(shost->dma_channel); |
| 601 | #endif | 601 | #endif |
| @@ -698,7 +698,7 @@ static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) | |||
| 698 | int i; | 698 | int i; |
| 699 | 699 | ||
| 700 | VDEB(printk("NCR53c406a_queue called\n")); | 700 | VDEB(printk("NCR53c406a_queue called\n")); |
| 701 | DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt))); | 701 | DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->target, (u8)SCpnt->device->lun, scsi_bufflen(SCpnt))); |
| 702 | 702 | ||
| 703 | #if 0 | 703 | #if 0 |
| 704 | VDEB(for (i = 0; i < SCpnt->cmd_len; i++) | 704 | VDEB(for (i = 0; i < SCpnt->cmd_len; i++) |
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index 0163457c12bb..7e33a61c1ba4 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c | |||
| @@ -891,7 +891,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc | |||
| 891 | printk("max cdb length= %x\b", cmd->cmd_len); | 891 | printk("max cdb length= %x\b", cmd->cmd_len); |
| 892 | scb->cdb_len = IMAX_CDB; | 892 | scb->cdb_len = IMAX_CDB; |
| 893 | } | 893 | } |
| 894 | scb->ident = cmd->device->lun | DISC_ALLOW; | 894 | scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW; |
| 895 | if (cmd->device->tagged_supported) { /* Tag Support */ | 895 | if (cmd->device->tagged_supported) { /* Tag Support */ |
| 896 | scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ | 896 | scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ |
| 897 | } else { | 897 | } else { |
| @@ -1125,23 +1125,19 @@ static int inia100_probe_one(struct pci_dev *pdev, | |||
| 1125 | 1125 | ||
| 1126 | /* Get total memory needed for SCB */ | 1126 | /* Get total memory needed for SCB */ |
| 1127 | sz = ORC_MAXQUEUE * sizeof(struct orc_scb); | 1127 | sz = ORC_MAXQUEUE * sizeof(struct orc_scb); |
| 1128 | host->scb_virt = pci_alloc_consistent(pdev, sz, | 1128 | host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys); |
| 1129 | &host->scb_phys); | ||
| 1130 | if (!host->scb_virt) { | 1129 | if (!host->scb_virt) { |
| 1131 | printk("inia100: SCB memory allocation error\n"); | 1130 | printk("inia100: SCB memory allocation error\n"); |
| 1132 | goto out_host_put; | 1131 | goto out_host_put; |
| 1133 | } | 1132 | } |
| 1134 | memset(host->scb_virt, 0, sz); | ||
| 1135 | 1133 | ||
| 1136 | /* Get total memory needed for ESCB */ | 1134 | /* Get total memory needed for ESCB */ |
| 1137 | sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); | 1135 | sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); |
| 1138 | host->escb_virt = pci_alloc_consistent(pdev, sz, | 1136 | host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys); |
| 1139 | &host->escb_phys); | ||
| 1140 | if (!host->escb_virt) { | 1137 | if (!host->escb_virt) { |
| 1141 | printk("inia100: ESCB memory allocation error\n"); | 1138 | printk("inia100: ESCB memory allocation error\n"); |
| 1142 | goto out_free_scb_array; | 1139 | goto out_free_scb_array; |
| 1143 | } | 1140 | } |
| 1144 | memset(host->escb_virt, 0, sz); | ||
| 1145 | 1141 | ||
| 1146 | biosaddr = host->BIOScfg; | 1142 | biosaddr = host->BIOScfg; |
| 1147 | biosaddr = (biosaddr << 4); | 1143 | biosaddr = (biosaddr << 4); |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4921ed19a027..63f576c9300a 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
| @@ -551,7 +551,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) | |||
| 551 | int count; | 551 | int count; |
| 552 | int ret = FAILED; | 552 | int ret = FAILED; |
| 553 | 553 | ||
| 554 | printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n", | 554 | printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n", |
| 555 | AAC_DRIVERNAME, | 555 | AAC_DRIVERNAME, |
| 556 | host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); | 556 | host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); |
| 557 | switch (cmd->cmnd[0]) { | 557 | switch (cmd->cmnd[0]) { |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index d8145888e66a..43761c1c46f0 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
| @@ -2512,7 +2512,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s) | |||
| 2512 | 2512 | ||
| 2513 | printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); | 2513 | printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); |
| 2514 | printk(" host_busy %u, host_no %d,\n", | 2514 | printk(" host_busy %u, host_no %d,\n", |
| 2515 | s->host_busy, s->host_no); | 2515 | atomic_read(&s->host_busy), s->host_no); |
| 2516 | 2516 | ||
| 2517 | printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", | 2517 | printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", |
| 2518 | (ulong)s->base, (ulong)s->io_port, boardp->irq); | 2518 | (ulong)s->base, (ulong)s->io_port, boardp->irq); |
| @@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) | |||
| 3345 | shost->host_no); | 3345 | shost->host_no); |
| 3346 | 3346 | ||
| 3347 | seq_printf(m, | 3347 | seq_printf(m, |
| 3348 | " host_busy %u, max_id %u, max_lun %u, max_channel %u\n", | 3348 | " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n", |
| 3349 | shost->host_busy, shost->max_id, | 3349 | atomic_read(&shost->host_busy), shost->max_id, |
| 3350 | shost->max_lun, shost->max_channel); | 3350 | shost->max_lun, shost->max_channel); |
| 3351 | 3351 | ||
| 3352 | seq_printf(m, | 3352 | seq_printf(m, |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index e86eb6a921fc..e77b72f78006 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
| @@ -321,7 +321,7 @@ static LIST_HEAD(aha152x_host_list); | |||
| 321 | #define CMDINFO(cmd) \ | 321 | #define CMDINFO(cmd) \ |
| 322 | (cmd) ? ((cmd)->device->host->host_no) : -1, \ | 322 | (cmd) ? ((cmd)->device->host->host_no) : -1, \ |
| 323 | (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ | 323 | (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ |
| 324 | (cmd) ? ((cmd)->device->lun & 0x07) : -1 | 324 | (cmd) ? ((u8)(cmd)->device->lun & 0x07) : -1 |
| 325 | 325 | ||
| 326 | static inline void | 326 | static inline void |
| 327 | CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) | 327 | CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) |
| @@ -1602,7 +1602,7 @@ static void busfree_run(struct Scsi_Host *shpnt) | |||
| 1602 | #if defined(AHA152X_DEBUG) | 1602 | #if defined(AHA152X_DEBUG) |
| 1603 | int hostno=DONE_SC->device->host->host_no; | 1603 | int hostno=DONE_SC->device->host->host_no; |
| 1604 | int id=DONE_SC->device->id & 0xf; | 1604 | int id=DONE_SC->device->id & 0xf; |
| 1605 | int lun=DONE_SC->device->lun & 0x7; | 1605 | int lun=((u8)DONE_SC->device->lun) & 0x7; |
| 1606 | #endif | 1606 | #endif |
| 1607 | Scsi_Cmnd *ptr = DONE_SC; | 1607 | Scsi_Cmnd *ptr = DONE_SC; |
| 1608 | DONE_SC=NULL; | 1608 | DONE_SC=NULL; |
| @@ -2984,7 +2984,7 @@ static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) | |||
| 2984 | int i; | 2984 | int i; |
| 2985 | 2985 | ||
| 2986 | SPRINTF("%p: target=%d; lun=%d; cmnd=( ", | 2986 | SPRINTF("%p: target=%d; lun=%d; cmnd=( ", |
| 2987 | ptr, ptr->device->id, ptr->device->lun); | 2987 | ptr, ptr->device->id, (u8)ptr->device->lun); |
| 2988 | 2988 | ||
| 2989 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) | 2989 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) |
| 2990 | SPRINTF("0x%02x ", ptr->cmnd[i]); | 2990 | SPRINTF("0x%02x ", ptr->cmnd[i]); |
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 0cb8ef64b5ce..3d401d02c019 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c | |||
| @@ -85,10 +85,9 @@ aic7770_probe(struct device *dev) | |||
| 85 | int error; | 85 | int error; |
| 86 | 86 | ||
| 87 | sprintf(buf, "ahc_eisa:%d", eisaBase >> 12); | 87 | sprintf(buf, "ahc_eisa:%d", eisaBase >> 12); |
| 88 | name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); | 88 | name = kstrdup(buf, GFP_ATOMIC); |
| 89 | if (name == NULL) | 89 | if (name == NULL) |
| 90 | return (ENOMEM); | 90 | return (ENOMEM); |
| 91 | strcpy(name, buf); | ||
| 92 | ahc = ahc_alloc(&aic7xxx_driver_template, name); | 91 | ahc = ahc_alloc(&aic7xxx_driver_template, name); |
| 93 | if (ahc == NULL) | 92 | if (ahc == NULL) |
| 94 | return (ENOMEM); | 93 | return (ENOMEM); |
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index 113874c1284b..df2e0e5367d2 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h | |||
| @@ -115,7 +115,7 @@ struct scb_platform_data; | |||
| 115 | #endif | 115 | #endif |
| 116 | 116 | ||
| 117 | #define AHD_BUILD_COL_IDX(target, lun) \ | 117 | #define AHD_BUILD_COL_IDX(target, lun) \ |
| 118 | (((lun) << 4) | target) | 118 | ((((u8)lun) << 4) | target) |
| 119 | 119 | ||
| 120 | #define AHD_GET_SCB_COL_IDX(ahd, scb) \ | 120 | #define AHD_GET_SCB_COL_IDX(ahd, scb) \ |
| 121 | ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb)) | 121 | ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb)) |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 69d5c43a65e5..ed333669a7dc 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c | |||
| @@ -2137,7 +2137,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd) | |||
| 2137 | if (do_fallback) { | 2137 | if (do_fallback) { |
| 2138 | printk("%s: device overrun (status %x) on %d:%d:%d\n", | 2138 | printk("%s: device overrun (status %x) on %d:%d:%d\n", |
| 2139 | ahd_name(ahd), status, cmd->device->channel, | 2139 | ahd_name(ahd), status, cmd->device->channel, |
| 2140 | cmd->device->id, cmd->device->lun); | 2140 | cmd->device->id, (u8)cmd->device->lun); |
| 2141 | } | 2141 | } |
| 2142 | 2142 | ||
| 2143 | ahd_cmd_set_transaction_status(cmd, new_status); | 2143 | ahd_cmd_set_transaction_status(cmd, new_status); |
| @@ -2253,13 +2253,13 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd) | |||
| 2253 | disconnected = TRUE; | 2253 | disconnected = TRUE; |
| 2254 | if (ahd_search_qinfifo(ahd, cmd->device->id, | 2254 | if (ahd_search_qinfifo(ahd, cmd->device->id, |
| 2255 | cmd->device->channel + 'A', | 2255 | cmd->device->channel + 'A', |
| 2256 | cmd->device->lun, | 2256 | cmd->device->lun, |
| 2257 | pending_scb->hscb->tag, | 2257 | pending_scb->hscb->tag, |
| 2258 | ROLE_INITIATOR, CAM_REQ_ABORTED, | 2258 | ROLE_INITIATOR, CAM_REQ_ABORTED, |
| 2259 | SEARCH_COMPLETE) > 0) { | 2259 | SEARCH_COMPLETE) > 0) { |
| 2260 | printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", | 2260 | printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", |
| 2261 | ahd_name(ahd), cmd->device->channel, | 2261 | ahd_name(ahd), cmd->device->channel, |
| 2262 | cmd->device->id, cmd->device->lun); | 2262 | cmd->device->id, (u8)cmd->device->lun); |
| 2263 | retval = SUCCESS; | 2263 | retval = SUCCESS; |
| 2264 | goto done; | 2264 | goto done; |
| 2265 | } | 2265 | } |
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 3c85873b14b9..8466aa784ec1 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c | |||
| @@ -178,10 +178,9 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 178 | ahd_get_pci_bus(pci), | 178 | ahd_get_pci_bus(pci), |
| 179 | ahd_get_pci_slot(pci), | 179 | ahd_get_pci_slot(pci), |
| 180 | ahd_get_pci_function(pci)); | 180 | ahd_get_pci_function(pci)); |
| 181 | name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); | 181 | name = kstrdup(buf, GFP_ATOMIC); |
| 182 | if (name == NULL) | 182 | if (name == NULL) |
| 183 | return (-ENOMEM); | 183 | return (-ENOMEM); |
| 184 | strcpy(name, buf); | ||
| 185 | ahd = ahd_alloc(NULL, name); | 184 | ahd = ahd_alloc(NULL, name); |
| 186 | if (ahd == NULL) | 185 | if (ahd == NULL) |
| 187 | return (-ENOMEM); | 186 | return (-ENOMEM); |
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c index e9778b4f7e32..27dbfccea774 100644 --- a/drivers/scsi/aic7xxx/aic79xx_proc.c +++ b/drivers/scsi/aic7xxx/aic79xx_proc.c | |||
| @@ -197,7 +197,7 @@ ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev) | |||
| 197 | 197 | ||
| 198 | seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", | 198 | seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", |
| 199 | sdev->sdev_target->channel + 'A', | 199 | sdev->sdev_target->channel + 'A', |
| 200 | sdev->sdev_target->id, sdev->lun); | 200 | sdev->sdev_target->id, (u8)sdev->lun); |
| 201 | 201 | ||
| 202 | seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); | 202 | seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); |
| 203 | seq_printf(m, "\t\tCommands Active %d\n", dev->active); | 203 | seq_printf(m, "\t\tCommands Active %d\n", dev->active); |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 114ff0c6e311..d2c9bf39033d 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c | |||
| @@ -2110,7 +2110,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) | |||
| 2110 | */ | 2110 | */ |
| 2111 | printk("%s:%d:%d:%d: Is not an active device\n", | 2111 | printk("%s:%d:%d:%d: Is not an active device\n", |
| 2112 | ahc_name(ahc), cmd->device->channel, cmd->device->id, | 2112 | ahc_name(ahc), cmd->device->channel, cmd->device->id, |
| 2113 | cmd->device->lun); | 2113 | (u8)cmd->device->lun); |
| 2114 | retval = SUCCESS; | 2114 | retval = SUCCESS; |
| 2115 | goto no_cmd; | 2115 | goto no_cmd; |
| 2116 | } | 2116 | } |
| @@ -2118,11 +2118,11 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) | |||
| 2118 | if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 | 2118 | if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 |
| 2119 | && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, | 2119 | && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, |
| 2120 | cmd->device->channel + 'A', | 2120 | cmd->device->channel + 'A', |
| 2121 | cmd->device->lun, | 2121 | (u8)cmd->device->lun, |
| 2122 | CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { | 2122 | CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) { |
| 2123 | printk("%s:%d:%d:%d: Command found on untagged queue\n", | 2123 | printk("%s:%d:%d:%d: Command found on untagged queue\n", |
| 2124 | ahc_name(ahc), cmd->device->channel, cmd->device->id, | 2124 | ahc_name(ahc), cmd->device->channel, cmd->device->id, |
| 2125 | cmd->device->lun); | 2125 | (u8)cmd->device->lun); |
| 2126 | retval = SUCCESS; | 2126 | retval = SUCCESS; |
| 2127 | goto done; | 2127 | goto done; |
| 2128 | } | 2128 | } |
| @@ -2188,13 +2188,14 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag) | |||
| 2188 | SEARCH_COMPLETE) > 0) { | 2188 | SEARCH_COMPLETE) > 0) { |
| 2189 | printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", | 2189 | printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n", |
| 2190 | ahc_name(ahc), cmd->device->channel, | 2190 | ahc_name(ahc), cmd->device->channel, |
| 2191 | cmd->device->id, cmd->device->lun); | 2191 | cmd->device->id, (u8)cmd->device->lun); |
| 2192 | retval = SUCCESS; | 2192 | retval = SUCCESS; |
| 2193 | goto done; | 2193 | goto done; |
| 2194 | } | 2194 | } |
| 2195 | } else if (ahc_search_qinfifo(ahc, cmd->device->id, | 2195 | } else if (ahc_search_qinfifo(ahc, cmd->device->id, |
| 2196 | cmd->device->channel + 'A', | 2196 | cmd->device->channel + 'A', |
| 2197 | cmd->device->lun, pending_scb->hscb->tag, | 2197 | cmd->device->lun, |
| 2198 | pending_scb->hscb->tag, | ||
| 2198 | ROLE_INITIATOR, /*status*/0, | 2199 | ROLE_INITIATOR, /*status*/0, |
| 2199 | SEARCH_COUNT) > 0) { | 2200 | SEARCH_COUNT) > 0) { |
| 2200 | disconnected = FALSE; | 2201 | disconnected = FALSE; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index ee05e8410754..0fc14dac7070 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | |||
| @@ -225,10 +225,9 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 225 | ahc_get_pci_bus(pci), | 225 | ahc_get_pci_bus(pci), |
| 226 | ahc_get_pci_slot(pci), | 226 | ahc_get_pci_slot(pci), |
| 227 | ahc_get_pci_function(pci)); | 227 | ahc_get_pci_function(pci)); |
| 228 | name = kmalloc(strlen(buf) + 1, GFP_ATOMIC); | 228 | name = kstrdup(buf, GFP_ATOMIC); |
| 229 | if (name == NULL) | 229 | if (name == NULL) |
| 230 | return (-ENOMEM); | 230 | return (-ENOMEM); |
| 231 | strcpy(name, buf); | ||
| 232 | ahc = ahc_alloc(NULL, name); | 231 | ahc = ahc_alloc(NULL, name); |
| 233 | if (ahc == NULL) | 232 | if (ahc == NULL) |
| 234 | return (-ENOMEM); | 233 | return (-ENOMEM); |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c index 383a3d11652d..64eec6c07a83 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_proc.c +++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c | |||
| @@ -175,7 +175,7 @@ ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev) | |||
| 175 | 175 | ||
| 176 | seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", | 176 | seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", |
| 177 | sdev->sdev_target->channel + 'A', | 177 | sdev->sdev_target->channel + 'A', |
| 178 | sdev->sdev_target->id, sdev->lun); | 178 | sdev->sdev_target->id, (u8)sdev->lun); |
| 179 | 179 | ||
| 180 | seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); | 180 | seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); |
| 181 | seq_printf(m, "\t\tCommands Active %d\n", dev->active); | 181 | seq_printf(m, "\t\tCommands Active %d\n", dev->active); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 652b41b4ddbd..b13764ca23fd 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
| @@ -2335,7 +2335,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | |||
| 2335 | " poll command abort successfully \n" | 2335 | " poll command abort successfully \n" |
| 2336 | , acb->host->host_no | 2336 | , acb->host->host_no |
| 2337 | , ccb->pcmd->device->id | 2337 | , ccb->pcmd->device->id |
| 2338 | , ccb->pcmd->device->lun | 2338 | , (u32)ccb->pcmd->device->lun |
| 2339 | , ccb); | 2339 | , ccb); |
| 2340 | ccb->pcmd->result = DID_ABORT << 16; | 2340 | ccb->pcmd->result = DID_ABORT << 16; |
| 2341 | arcmsr_ccb_complete(ccb); | 2341 | arcmsr_ccb_complete(ccb); |
| @@ -2399,7 +2399,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, | |||
| 2399 | " poll command abort successfully \n" | 2399 | " poll command abort successfully \n" |
| 2400 | ,acb->host->host_no | 2400 | ,acb->host->host_no |
| 2401 | ,ccb->pcmd->device->id | 2401 | ,ccb->pcmd->device->id |
| 2402 | ,ccb->pcmd->device->lun | 2402 | ,(u32)ccb->pcmd->device->lun |
| 2403 | ,ccb); | 2403 | ,ccb); |
| 2404 | ccb->pcmd->result = DID_ABORT << 16; | 2404 | ccb->pcmd->result = DID_ABORT << 16; |
| 2405 | arcmsr_ccb_complete(ccb); | 2405 | arcmsr_ccb_complete(ccb); |
| @@ -2456,7 +2456,7 @@ polling_hbc_ccb_retry: | |||
| 2456 | " poll command abort successfully \n" | 2456 | " poll command abort successfully \n" |
| 2457 | , acb->host->host_no | 2457 | , acb->host->host_no |
| 2458 | , pCCB->pcmd->device->id | 2458 | , pCCB->pcmd->device->id |
| 2459 | , pCCB->pcmd->device->lun | 2459 | , (u32)pCCB->pcmd->device->lun |
| 2460 | , pCCB); | 2460 | , pCCB); |
| 2461 | pCCB->pcmd->result = DID_ABORT << 16; | 2461 | pCCB->pcmd->result = DID_ABORT << 16; |
| 2462 | arcmsr_ccb_complete(pCCB); | 2462 | arcmsr_ccb_complete(pCCB); |
| @@ -3058,7 +3058,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd) | |||
| 3058 | int rtn = FAILED; | 3058 | int rtn = FAILED; |
| 3059 | printk(KERN_NOTICE | 3059 | printk(KERN_NOTICE |
| 3060 | "arcmsr%d: abort device command of scsi id = %d lun = %d \n", | 3060 | "arcmsr%d: abort device command of scsi id = %d lun = %d \n", |
| 3061 | acb->host->host_no, cmd->device->id, cmd->device->lun); | 3061 | acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); |
| 3062 | acb->acb_flags |= ACB_F_ABORT; | 3062 | acb->acb_flags |= ACB_F_ABORT; |
| 3063 | acb->num_aborts++; | 3063 | acb->num_aborts++; |
| 3064 | /* | 3064 | /* |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 2e797a367608..d89b9b4deb3c 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
| @@ -760,7 +760,8 @@ intr_ret_t acornscsi_kick(AS_Host *host) | |||
| 760 | SCpnt->tag = SCpnt->device->current_tag; | 760 | SCpnt->tag = SCpnt->device->current_tag; |
| 761 | } else | 761 | } else |
| 762 | #endif | 762 | #endif |
| 763 | set_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns); | 763 | set_bit(SCpnt->device->id * 8 + |
| 764 | (u8)(SCpnt->device->lun & 0x07), host->busyluns); | ||
| 764 | 765 | ||
| 765 | host->stats.removes += 1; | 766 | host->stats.removes += 1; |
| 766 | 767 | ||
| @@ -863,7 +864,8 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, | |||
| 863 | if (!SCpnt->scsi_done) | 864 | if (!SCpnt->scsi_done) |
| 864 | panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no); | 865 | panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no); |
| 865 | 866 | ||
| 866 | clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns); | 867 | clear_bit(SCpnt->device->id * 8 + |
| 868 | (u8)(SCpnt->device->lun & 0x7), host->busyluns); | ||
| 867 | 869 | ||
| 868 | SCpnt->scsi_done(SCpnt); | 870 | SCpnt->scsi_done(SCpnt); |
| 869 | } else | 871 | } else |
| @@ -1576,7 +1578,8 @@ void acornscsi_message(AS_Host *host) | |||
| 1576 | printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n", | 1578 | printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n", |
| 1577 | host->host->host_no, acornscsi_target(host)); | 1579 | host->host->host_no, acornscsi_target(host)); |
| 1578 | host->SCpnt->device->simple_tags = 0; | 1580 | host->SCpnt->device->simple_tags = 0; |
| 1579 | set_bit(host->SCpnt->device->id * 8 + host->SCpnt->device->lun, host->busyluns); | 1581 | set_bit(host->SCpnt->device->id * 8 + |
| 1582 | (u8)(host->SCpnt->device->lun & 0x7), host->busyluns); | ||
| 1580 | break; | 1583 | break; |
| 1581 | #endif | 1584 | #endif |
| 1582 | case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): | 1585 | case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): |
| @@ -2671,7 +2674,8 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) | |||
| 2671 | //#if (DEBUG & DEBUG_ABORT) | 2674 | //#if (DEBUG & DEBUG_ABORT) |
| 2672 | printk("clear "); | 2675 | printk("clear "); |
| 2673 | //#endif | 2676 | //#endif |
| 2674 | clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns); | 2677 | clear_bit(SCpnt->device->id * 8 + |
| 2678 | (u8)(SCpnt->device->lun & 0x7), host->busyluns); | ||
| 2675 | 2679 | ||
| 2676 | /* | 2680 | /* |
| 2677 | * We found the command, and cleared it out. Either | 2681 | * We found the command, and cleared it out. Either |
| @@ -2853,7 +2857,7 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2853 | 2857 | ||
| 2854 | shost_for_each_device(scd, instance) { | 2858 | shost_for_each_device(scd, instance) { |
| 2855 | seq_printf(m, "Device/Lun TaggedQ Sync\n"); | 2859 | seq_printf(m, "Device/Lun TaggedQ Sync\n"); |
| 2856 | seq_printf(m, " %d/%d ", scd->id, scd->lun); | 2860 | seq_printf(m, " %d/%llu ", scd->id, scd->lun); |
| 2857 | if (scd->tagged_supported) | 2861 | if (scd->tagged_supported) |
| 2858 | seq_printf(m, "%3sabled(%3d) ", | 2862 | seq_printf(m, "%3sabled(%3d) ", |
| 2859 | scd->simple_tags ? "en" : "dis", | 2863 | scd->simple_tags ? "en" : "dis", |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index b46a6f6c0eb3..71cfb1e504c4 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
| @@ -1821,7 +1821,8 @@ static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) | |||
| 1821 | SCpnt->tag = SCpnt->device->current_tag; | 1821 | SCpnt->tag = SCpnt->device->current_tag; |
| 1822 | } else | 1822 | } else |
| 1823 | #endif | 1823 | #endif |
| 1824 | set_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns); | 1824 | set_bit(SCpnt->device->id * 8 + |
| 1825 | (u8)(SCpnt->device->lun & 0x7), info->busyluns); | ||
| 1825 | 1826 | ||
| 1826 | info->stats.removes += 1; | 1827 | info->stats.removes += 1; |
| 1827 | switch (SCpnt->cmnd[0]) { | 1828 | switch (SCpnt->cmnd[0]) { |
| @@ -2171,7 +2172,8 @@ static void fas216_done(FAS216_Info *info, unsigned int result) | |||
| 2171 | * status. | 2172 | * status. |
| 2172 | */ | 2173 | */ |
| 2173 | info->device[SCpnt->device->id].parity_check = 0; | 2174 | info->device[SCpnt->device->id].parity_check = 0; |
| 2174 | clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns); | 2175 | clear_bit(SCpnt->device->id * 8 + |
| 2176 | (u8)(SCpnt->device->lun & 0x7), info->busyluns); | ||
| 2175 | 2177 | ||
| 2176 | fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; | 2178 | fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble; |
| 2177 | fn(info, SCpnt, result); | 2179 | fn(info, SCpnt, result); |
| @@ -2398,7 +2400,8 @@ static enum res_find fas216_find_command(FAS216_Info *info, | |||
| 2398 | * been set. | 2400 | * been set. |
| 2399 | */ | 2401 | */ |
| 2400 | info->origSCpnt = NULL; | 2402 | info->origSCpnt = NULL; |
| 2401 | clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns); | 2403 | clear_bit(SCpnt->device->id * 8 + |
| 2404 | (u8)(SCpnt->device->lun & 0x7), info->busyluns); | ||
| 2402 | printk("waiting for execution "); | 2405 | printk("waiting for execution "); |
| 2403 | res = res_success; | 2406 | res = res_success; |
| 2404 | } else | 2407 | } else |
| @@ -3000,7 +3003,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
| 3000 | 3003 | ||
| 3001 | shost_for_each_device(scd, info->host) { | 3004 | shost_for_each_device(scd, info->host) { |
| 3002 | dev = &info->device[scd->id]; | 3005 | dev = &info->device[scd->id]; |
| 3003 | seq_printf(m, " %d/%d ", scd->id, scd->lun); | 3006 | seq_printf(m, " %d/%llu ", scd->id, scd->lun); |
| 3004 | if (scd->tagged_supported) | 3007 | if (scd->tagged_supported) |
| 3005 | seq_printf(m, "%3sabled(%3d) ", | 3008 | seq_printf(m, "%3sabled(%3d) ", |
| 3006 | scd->simple_tags ? "en" : "dis", | 3009 | scd->simple_tags ? "en" : "dis", |
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c index cb11ccef54e5..3441ce3ebabf 100644 --- a/drivers/scsi/arm/queue.c +++ b/drivers/scsi/arm/queue.c | |||
| @@ -167,7 +167,8 @@ struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) | |||
| 167 | spin_lock_irqsave(&queue->queue_lock, flags); | 167 | spin_lock_irqsave(&queue->queue_lock, flags); |
| 168 | list_for_each(l, &queue->head) { | 168 | list_for_each(l, &queue->head) { |
| 169 | QE_t *q = list_entry(l, QE_t, list); | 169 | QE_t *q = list_entry(l, QE_t, list); |
| 170 | if (!test_bit(q->SCpnt->device->id * 8 + q->SCpnt->device->lun, exclude)) { | 170 | if (!test_bit(q->SCpnt->device->id * 8 + |
| 171 | (u8)(q->SCpnt->device->lun & 0x7), exclude)) { | ||
| 171 | SCpnt = __queue_remove(queue, l); | 172 | SCpnt = __queue_remove(queue, l); |
| 172 | break; | 173 | break; |
| 173 | } | 174 | } |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 1814aa20b724..79e6f045c2a9 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
| @@ -361,17 +361,18 @@ static void __init init_tags(void) | |||
| 361 | 361 | ||
| 362 | static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) | 362 | static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) |
| 363 | { | 363 | { |
| 364 | u8 lun = cmd->device->lun; | ||
| 364 | SETUP_HOSTDATA(cmd->device->host); | 365 | SETUP_HOSTDATA(cmd->device->host); |
| 365 | 366 | ||
| 366 | if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) | 367 | if (hostdata->busy[cmd->device->id] & (1 << lun)) |
| 367 | return 1; | 368 | return 1; |
| 368 | if (!should_be_tagged || | 369 | if (!should_be_tagged || |
| 369 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) | 370 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) |
| 370 | return 0; | 371 | return 0; |
| 371 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= | 372 | if (TagAlloc[cmd->device->id][lun].nr_allocated >= |
| 372 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { | 373 | TagAlloc[cmd->device->id][lun].queue_size) { |
| 373 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", | 374 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", |
| 374 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 375 | H_NO(cmd), cmd->device->id, lun); |
| 375 | return 1; | 376 | return 1; |
| 376 | } | 377 | } |
| 377 | return 0; | 378 | return 0; |
| @@ -385,6 +386,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 385 | 386 | ||
| 386 | static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | 387 | static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) |
| 387 | { | 388 | { |
| 389 | u8 lun = cmd->device->lun; | ||
| 388 | SETUP_HOSTDATA(cmd->device->host); | 390 | SETUP_HOSTDATA(cmd->device->host); |
| 389 | 391 | ||
| 390 | /* If we or the target don't support tagged queuing, allocate the LUN for | 392 | /* If we or the target don't support tagged queuing, allocate the LUN for |
| @@ -393,11 +395,11 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 393 | if (!should_be_tagged || | 395 | if (!should_be_tagged || |
| 394 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { | 396 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { |
| 395 | cmd->tag = TAG_NONE; | 397 | cmd->tag = TAG_NONE; |
| 396 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 398 | hostdata->busy[cmd->device->id] |= (1 << lun); |
| 397 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " | 399 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " |
| 398 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); | 400 | "command\n", H_NO(cmd), cmd->device->id, lun); |
| 399 | } else { | 401 | } else { |
| 400 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 402 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; |
| 401 | 403 | ||
| 402 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); | 404 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); |
| 403 | set_bit(cmd->tag, ta->allocated); | 405 | set_bit(cmd->tag, ta->allocated); |
| @@ -405,7 +407,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 405 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " | 407 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " |
| 406 | "(now %d tags in use)\n", | 408 | "(now %d tags in use)\n", |
| 407 | H_NO(cmd), cmd->tag, cmd->device->id, | 409 | H_NO(cmd), cmd->tag, cmd->device->id, |
| 408 | cmd->device->lun, ta->nr_allocated); | 410 | lun, ta->nr_allocated); |
| 409 | } | 411 | } |
| 410 | } | 412 | } |
| 411 | 413 | ||
| @@ -416,21 +418,22 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) | |||
| 416 | 418 | ||
| 417 | static void cmd_free_tag(Scsi_Cmnd *cmd) | 419 | static void cmd_free_tag(Scsi_Cmnd *cmd) |
| 418 | { | 420 | { |
| 421 | u8 lun = cmd->device->lun; | ||
| 419 | SETUP_HOSTDATA(cmd->device->host); | 422 | SETUP_HOSTDATA(cmd->device->host); |
| 420 | 423 | ||
| 421 | if (cmd->tag == TAG_NONE) { | 424 | if (cmd->tag == TAG_NONE) { |
| 422 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 425 | hostdata->busy[cmd->device->id] &= ~(1 << lun); |
| 423 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", | 426 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", |
| 424 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 427 | H_NO(cmd), cmd->device->id, lun); |
| 425 | } else if (cmd->tag >= MAX_TAGS) { | 428 | } else if (cmd->tag >= MAX_TAGS) { |
| 426 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", | 429 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", |
| 427 | H_NO(cmd), cmd->tag); | 430 | H_NO(cmd), cmd->tag); |
| 428 | } else { | 431 | } else { |
| 429 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 432 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; |
| 430 | clear_bit(cmd->tag, ta->allocated); | 433 | clear_bit(cmd->tag, ta->allocated); |
| 431 | ta->nr_allocated--; | 434 | ta->nr_allocated--; |
| 432 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", | 435 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", |
| 433 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); | 436 | H_NO(cmd), cmd->tag, cmd->device->id, lun); |
| 434 | } | 437 | } |
| 435 | } | 438 | } |
| 436 | 439 | ||
| @@ -713,7 +716,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) | |||
| 713 | { | 716 | { |
| 714 | int i, s; | 717 | int i, s; |
| 715 | unsigned char *command; | 718 | unsigned char *command; |
| 716 | printk("scsi%d: destination target %d, lun %d\n", | 719 | printk("scsi%d: destination target %d, lun %llu\n", |
| 717 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 720 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 718 | printk(KERN_CONT " command = "); | 721 | printk(KERN_CONT " command = "); |
| 719 | command = cmd->cmnd; | 722 | command = cmd->cmnd; |
| @@ -759,7 +762,7 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) | |||
| 759 | { | 762 | { |
| 760 | int i, s; | 763 | int i, s; |
| 761 | unsigned char *command; | 764 | unsigned char *command; |
| 762 | seq_printf(m, "scsi%d: destination target %d, lun %d\n", | 765 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", |
| 763 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 766 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 764 | seq_printf(m, " command = "); | 767 | seq_printf(m, " command = "); |
| 765 | command = cmd->cmnd; | 768 | command = cmd->cmnd; |
| @@ -1060,12 +1063,13 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1060 | #endif | 1063 | #endif |
| 1061 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, | 1064 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, |
| 1062 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { | 1065 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { |
| 1066 | u8 lun = tmp->device->lun; | ||
| 1063 | 1067 | ||
| 1064 | #if (NDEBUG & NDEBUG_LISTS) | 1068 | #if (NDEBUG & NDEBUG_LISTS) |
| 1065 | if (prev != tmp) | 1069 | if (prev != tmp) |
| 1066 | printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", | 1070 | printk("MAIN tmp=%p target=%d busy=%d lun=%llu\n", |
| 1067 | tmp, tmp->device->id, hostdata->busy[tmp->device->id], | 1071 | tmp, tmp->device->id, hostdata->busy[tmp->device->id], |
| 1068 | tmp->device->lun); | 1072 | lun); |
| 1069 | #endif | 1073 | #endif |
| 1070 | /* When we find one, remove it from the issue queue. */ | 1074 | /* When we find one, remove it from the issue queue. */ |
| 1071 | /* ++guenther: possible race with Falcon locking */ | 1075 | /* ++guenther: possible race with Falcon locking */ |
| @@ -1073,7 +1077,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1073 | #ifdef SUPPORT_TAGS | 1077 | #ifdef SUPPORT_TAGS |
| 1074 | !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) | 1078 | !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) |
| 1075 | #else | 1079 | #else |
| 1076 | !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) | 1080 | !(hostdata->busy[tmp->device->id] & (1 << lun)) |
| 1077 | #endif | 1081 | #endif |
| 1078 | ) { | 1082 | ) { |
| 1079 | /* ++guenther: just to be sure, this must be atomic */ | 1083 | /* ++guenther: just to be sure, this must be atomic */ |
| @@ -1099,7 +1103,7 @@ static void NCR5380_main(struct work_struct *work) | |||
| 1099 | */ | 1103 | */ |
| 1100 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " | 1104 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " |
| 1101 | "lun %d removed from issue_queue\n", | 1105 | "lun %d removed from issue_queue\n", |
| 1102 | HOSTNO, tmp->device->id, tmp->device->lun); | 1106 | HOSTNO, tmp->device->id, lun); |
| 1103 | /* | 1107 | /* |
| 1104 | * REQUEST SENSE commands are issued without tagged | 1108 | * REQUEST SENSE commands are issued without tagged |
| 1105 | * queueing, even on SCSI-II devices because the | 1109 | * queueing, even on SCSI-II devices because the |
| @@ -2061,7 +2065,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2061 | * accesses to this device will use the | 2065 | * accesses to this device will use the |
| 2062 | * polled-IO. */ | 2066 | * polled-IO. */ |
| 2063 | printk(KERN_NOTICE "scsi%d: switching target %d " | 2067 | printk(KERN_NOTICE "scsi%d: switching target %d " |
| 2064 | "lun %d to slow handshake\n", HOSTNO, | 2068 | "lun %llu to slow handshake\n", HOSTNO, |
| 2065 | cmd->device->id, cmd->device->lun); | 2069 | cmd->device->id, cmd->device->lun); |
| 2066 | cmd->device->borken = 1; | 2070 | cmd->device->borken = 1; |
| 2067 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 2071 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| @@ -2113,7 +2117,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2113 | /* Accept message by clearing ACK */ | 2117 | /* Accept message by clearing ACK */ |
| 2114 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2118 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2115 | 2119 | ||
| 2116 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " | 2120 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " |
| 2117 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2121 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2118 | 2122 | ||
| 2119 | /* Enable reselect interrupts */ | 2123 | /* Enable reselect interrupts */ |
| @@ -2125,7 +2129,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2125 | */ | 2129 | */ |
| 2126 | 2130 | ||
| 2127 | if (!cmd->next_link) { | 2131 | if (!cmd->next_link) { |
| 2128 | printk(KERN_NOTICE "scsi%d: target %d lun %d " | 2132 | printk(KERN_NOTICE "scsi%d: target %d lun %llu " |
| 2129 | "linked command complete, no next_link\n", | 2133 | "linked command complete, no next_link\n", |
| 2130 | HOSTNO, cmd->device->id, cmd->device->lun); | 2134 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2131 | sink = 1; | 2135 | sink = 1; |
| @@ -2138,7 +2142,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2138 | * and don't free it! */ | 2142 | * and don't free it! */ |
| 2139 | cmd->next_link->tag = cmd->tag; | 2143 | cmd->next_link->tag = cmd->tag; |
| 2140 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2144 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2141 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " | 2145 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " |
| 2142 | "done, calling scsi_done().\n", | 2146 | "done, calling scsi_done().\n", |
| 2143 | HOSTNO, cmd->device->id, cmd->device->lun); | 2147 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2144 | #ifdef NCR5380_STATS | 2148 | #ifdef NCR5380_STATS |
| @@ -2155,7 +2159,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2155 | /* ++guenther: possible race with Falcon locking */ | 2159 | /* ++guenther: possible race with Falcon locking */ |
| 2156 | falcon_dont_release++; | 2160 | falcon_dont_release++; |
| 2157 | hostdata->connected = NULL; | 2161 | hostdata->connected = NULL; |
| 2158 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " | 2162 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " |
| 2159 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2163 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2160 | #ifdef SUPPORT_TAGS | 2164 | #ifdef SUPPORT_TAGS |
| 2161 | cmd_free_tag(cmd); | 2165 | cmd_free_tag(cmd); |
| @@ -2169,7 +2173,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2169 | /* ++Andreas: the mid level code knows about | 2173 | /* ++Andreas: the mid level code knows about |
| 2170 | QUEUE_FULL now. */ | 2174 | QUEUE_FULL now. */ |
| 2171 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 2175 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 2172 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " | 2176 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " |
| 2173 | "QUEUE_FULL after %d commands\n", | 2177 | "QUEUE_FULL after %d commands\n", |
| 2174 | HOSTNO, cmd->device->id, cmd->device->lun, | 2178 | HOSTNO, cmd->device->id, cmd->device->lun, |
| 2175 | ta->nr_allocated); | 2179 | ta->nr_allocated); |
| @@ -2267,7 +2271,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2267 | cmd->device->tagged_supported = 0; | 2271 | cmd->device->tagged_supported = 0; |
| 2268 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2272 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 2269 | cmd->tag = TAG_NONE; | 2273 | cmd->tag = TAG_NONE; |
| 2270 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " | 2274 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " |
| 2271 | "QUEUE_TAG message; tagged queuing " | 2275 | "QUEUE_TAG message; tagged queuing " |
| 2272 | "disabled\n", | 2276 | "disabled\n", |
| 2273 | HOSTNO, cmd->device->id, cmd->device->lun); | 2277 | HOSTNO, cmd->device->id, cmd->device->lun); |
| @@ -2284,7 +2288,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2284 | hostdata->connected = NULL; | 2288 | hostdata->connected = NULL; |
| 2285 | hostdata->disconnected_queue = cmd; | 2289 | hostdata->disconnected_queue = cmd; |
| 2286 | local_irq_restore(flags); | 2290 | local_irq_restore(flags); |
| 2287 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " | 2291 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " |
| 2288 | "moved from connected to the " | 2292 | "moved from connected to the " |
| 2289 | "disconnected_queue\n", HOSTNO, | 2293 | "disconnected_queue\n", HOSTNO, |
| 2290 | cmd->device->id, cmd->device->lun); | 2294 | cmd->device->id, cmd->device->lun); |
| @@ -2385,12 +2389,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) | |||
| 2385 | printk("\n"); | 2389 | printk("\n"); |
| 2386 | } else if (tmp != EXTENDED_MESSAGE) | 2390 | } else if (tmp != EXTENDED_MESSAGE) |
| 2387 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2391 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
| 2388 | "message %02x from target %d, lun %d\n", | 2392 | "message %02x from target %d, lun %llu\n", |
| 2389 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); | 2393 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); |
| 2390 | else | 2394 | else |
| 2391 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2395 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
| 2392 | "extended message " | 2396 | "extended message " |
| 2393 | "code %02x, length %d from target %d, lun %d\n", | 2397 | "code %02x, length %d from target %d, lun %llu\n", |
| 2394 | HOSTNO, extended_msg[1], extended_msg[0], | 2398 | HOSTNO, extended_msg[1], extended_msg[0], |
| 2395 | cmd->device->id, cmd->device->lun); | 2399 | cmd->device->id, cmd->device->lun); |
| 2396 | 2400 | ||
| @@ -2588,7 +2592,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) | |||
| 2588 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2592 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2589 | 2593 | ||
| 2590 | hostdata->connected = tmp; | 2594 | hostdata->connected = tmp; |
| 2591 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", | 2595 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", |
| 2592 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); | 2596 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2593 | falcon_dont_release--; | 2597 | falcon_dont_release--; |
| 2594 | } | 2598 | } |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index fd284ff36ecf..86162811812d 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
| @@ -914,7 +914,7 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, | |||
| 914 | stats->r2t_pdus = conn->r2t_pdus_cnt; | 914 | stats->r2t_pdus = conn->r2t_pdus_cnt; |
| 915 | stats->digest_err = 0; | 915 | stats->digest_err = 0; |
| 916 | stats->timeout_err = 0; | 916 | stats->timeout_err = 0; |
| 917 | stats->custom_length = 0; | 917 | stats->custom_length = 1; |
| 918 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); | 918 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); |
| 919 | stats->custom[0].value = conn->eh_abort_cnt; | 919 | stats->custom[0].value = conn->eh_abort_cnt; |
| 920 | } | 920 | } |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 56467df3d6de..915c26b23ab6 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
| @@ -539,7 +539,7 @@ static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) | |||
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | /*------------------- PCI Driver operations and data ----------------- */ | 541 | /*------------------- PCI Driver operations and data ----------------- */ |
| 542 | static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { | 542 | static const struct pci_device_id beiscsi_pci_id_table[] = { |
| 543 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | 543 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, |
| 544 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, | 544 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, |
| 545 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | 545 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, |
| @@ -3538,10 +3538,9 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, | |||
| 3538 | q->len = len; | 3538 | q->len = len; |
| 3539 | q->entry_size = entry_size; | 3539 | q->entry_size = entry_size; |
| 3540 | mem->size = len * entry_size; | 3540 | mem->size = len * entry_size; |
| 3541 | mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); | 3541 | mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); |
| 3542 | if (!mem->va) | 3542 | if (!mem->va) |
| 3543 | return -ENOMEM; | 3543 | return -ENOMEM; |
| 3544 | memset(mem->va, 0, mem->size); | ||
| 3545 | return 0; | 3544 | return 0; |
| 3546 | } | 3545 | } |
| 3547 | 3546 | ||
| @@ -4320,9 +4319,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) | |||
| 4320 | "BM_%d : No boot session\n"); | 4319 | "BM_%d : No boot session\n"); |
| 4321 | return ret; | 4320 | return ret; |
| 4322 | } | 4321 | } |
| 4323 | nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, | 4322 | nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, |
| 4324 | sizeof(*session_resp), | 4323 | sizeof(*session_resp), |
| 4325 | &nonemb_cmd.dma); | 4324 | &nonemb_cmd.dma); |
| 4326 | if (nonemb_cmd.va == NULL) { | 4325 | if (nonemb_cmd.va == NULL) { |
| 4327 | beiscsi_log(phba, KERN_ERR, | 4326 | beiscsi_log(phba, KERN_ERR, |
| 4328 | BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, | 4327 | BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, |
| @@ -4332,7 +4331,6 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) | |||
| 4332 | return -ENOMEM; | 4331 | return -ENOMEM; |
| 4333 | } | 4332 | } |
| 4334 | 4333 | ||
| 4335 | memset(nonemb_cmd.va, 0, sizeof(*session_resp)); | ||
| 4336 | tag = mgmt_get_session_info(phba, s_handle, | 4334 | tag = mgmt_get_session_info(phba, s_handle, |
| 4337 | &nonemb_cmd); | 4335 | &nonemb_cmd); |
| 4338 | if (!tag) { | 4336 | if (!tag) { |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 07934b0b9ee1..665afcb74a56 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
| @@ -900,13 +900,12 @@ free_cmd: | |||
| 900 | static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, | 900 | static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, |
| 901 | int iscsi_cmd, int size) | 901 | int iscsi_cmd, int size) |
| 902 | { | 902 | { |
| 903 | cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma); | 903 | cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma); |
| 904 | if (!cmd->va) { | 904 | if (!cmd->va) { |
| 905 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, | 905 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, |
| 906 | "BG_%d : Failed to allocate memory for if info\n"); | 906 | "BG_%d : Failed to allocate memory for if info\n"); |
| 907 | return -ENOMEM; | 907 | return -ENOMEM; |
| 908 | } | 908 | } |
| 909 | memset(cmd->va, 0, size); | ||
| 910 | cmd->size = size; | 909 | cmd->size = size; |
| 911 | be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); | 910 | be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); |
| 912 | return 0; | 911 | return 0; |
| @@ -1015,7 +1014,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1015 | if (if_info->dhcp_state) { | 1014 | if (if_info->dhcp_state) { |
| 1016 | beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, | 1015 | beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, |
| 1017 | "BG_%d : DHCP Already Enabled\n"); | 1016 | "BG_%d : DHCP Already Enabled\n"); |
| 1018 | return 0; | 1017 | goto exit; |
| 1019 | } | 1018 | } |
| 1020 | /* The ip_param->len is 1 in DHCP case. Setting | 1019 | /* The ip_param->len is 1 in DHCP case. Setting |
| 1021 | proper IP len as this it is used while | 1020 | proper IP len as this it is used while |
| @@ -1033,7 +1032,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1033 | sizeof(*reldhcp)); | 1032 | sizeof(*reldhcp)); |
| 1034 | 1033 | ||
| 1035 | if (rc) | 1034 | if (rc) |
| 1036 | return rc; | 1035 | goto exit; |
| 1037 | 1036 | ||
| 1038 | reldhcp = nonemb_cmd.va; | 1037 | reldhcp = nonemb_cmd.va; |
| 1039 | reldhcp->interface_hndl = phba->interface_handle; | 1038 | reldhcp->interface_hndl = phba->interface_handle; |
| @@ -1044,7 +1043,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1044 | beiscsi_log(phba, KERN_WARNING, | 1043 | beiscsi_log(phba, KERN_WARNING, |
| 1045 | BEISCSI_LOG_CONFIG, | 1044 | BEISCSI_LOG_CONFIG, |
| 1046 | "BG_%d : Failed to Delete existing dhcp\n"); | 1045 | "BG_%d : Failed to Delete existing dhcp\n"); |
| 1047 | return rc; | 1046 | goto exit; |
| 1048 | } | 1047 | } |
| 1049 | } | 1048 | } |
| 1050 | } | 1049 | } |
| @@ -1054,7 +1053,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1054 | rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, | 1053 | rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, |
| 1055 | IP_ACTION_DEL); | 1054 | IP_ACTION_DEL); |
| 1056 | if (rc) | 1055 | if (rc) |
| 1057 | return rc; | 1056 | goto exit; |
| 1058 | } | 1057 | } |
| 1059 | 1058 | ||
| 1060 | /* Delete the Gateway settings if mode change is to DHCP */ | 1059 | /* Delete the Gateway settings if mode change is to DHCP */ |
| @@ -1064,7 +1063,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1064 | if (rc) { | 1063 | if (rc) { |
| 1065 | beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, | 1064 | beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, |
| 1066 | "BG_%d : Failed to Get Gateway Addr\n"); | 1065 | "BG_%d : Failed to Get Gateway Addr\n"); |
| 1067 | return rc; | 1066 | goto exit; |
| 1068 | } | 1067 | } |
| 1069 | 1068 | ||
| 1070 | if (gtway_addr_set.ip_addr.addr[0]) { | 1069 | if (gtway_addr_set.ip_addr.addr[0]) { |
| @@ -1076,7 +1075,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1076 | beiscsi_log(phba, KERN_WARNING, | 1075 | beiscsi_log(phba, KERN_WARNING, |
| 1077 | BEISCSI_LOG_CONFIG, | 1076 | BEISCSI_LOG_CONFIG, |
| 1078 | "BG_%d : Failed to clear Gateway Addr Set\n"); | 1077 | "BG_%d : Failed to clear Gateway Addr Set\n"); |
| 1079 | return rc; | 1078 | goto exit; |
| 1080 | } | 1079 | } |
| 1081 | } | 1080 | } |
| 1082 | } | 1081 | } |
| @@ -1087,7 +1086,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1087 | OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, | 1086 | OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, |
| 1088 | sizeof(*dhcpreq)); | 1087 | sizeof(*dhcpreq)); |
| 1089 | if (rc) | 1088 | if (rc) |
| 1090 | return rc; | 1089 | goto exit; |
| 1091 | 1090 | ||
| 1092 | dhcpreq = nonemb_cmd.va; | 1091 | dhcpreq = nonemb_cmd.va; |
| 1093 | dhcpreq->flags = BLOCKING; | 1092 | dhcpreq->flags = BLOCKING; |
| @@ -1095,12 +1094,14 @@ int mgmt_set_ip(struct beiscsi_hba *phba, | |||
| 1095 | dhcpreq->interface_hndl = phba->interface_handle; | 1094 | dhcpreq->interface_hndl = phba->interface_handle; |
| 1096 | dhcpreq->ip_type = BE2_DHCP_V4; | 1095 | dhcpreq->ip_type = BE2_DHCP_V4; |
| 1097 | 1096 | ||
| 1098 | return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); | 1097 | rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); |
| 1099 | } else { | 1098 | } else { |
| 1100 | return mgmt_static_ip_modify(phba, if_info, ip_param, | 1099 | rc = mgmt_static_ip_modify(phba, if_info, ip_param, |
| 1101 | subnet_param, IP_ACTION_ADD); | 1100 | subnet_param, IP_ACTION_ADD); |
| 1102 | } | 1101 | } |
| 1103 | 1102 | ||
| 1103 | exit: | ||
| 1104 | kfree(if_info); | ||
| 1104 | return rc; | 1105 | return rc; |
| 1105 | } | 1106 | } |
| 1106 | 1107 | ||
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index a3ab5cce4208..0f19455951ec 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c | |||
| @@ -81,7 +81,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, | |||
| 81 | bfa->fcs = BFA_TRUE; | 81 | bfa->fcs = BFA_TRUE; |
| 82 | fcbuild_init(); | 82 | fcbuild_init(); |
| 83 | 83 | ||
| 84 | for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { | 84 | for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { |
| 85 | mod = &fcs_modules[i]; | 85 | mod = &fcs_modules[i]; |
| 86 | if (mod->attach) | 86 | if (mod->attach) |
| 87 | mod->attach(fcs); | 87 | mod->attach(fcs); |
| @@ -97,7 +97,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs) | |||
| 97 | int i; | 97 | int i; |
| 98 | struct bfa_fcs_mod_s *mod; | 98 | struct bfa_fcs_mod_s *mod; |
| 99 | 99 | ||
| 100 | for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { | 100 | for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { |
| 101 | mod = &fcs_modules[i]; | 101 | mod = &fcs_modules[i]; |
| 102 | if (mod->modinit) | 102 | if (mod->modinit) |
| 103 | mod->modinit(fcs); | 103 | mod->modinit(fcs); |
| @@ -184,7 +184,7 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs) | |||
| 184 | 184 | ||
| 185 | bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); | 185 | bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); |
| 186 | 186 | ||
| 187 | nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); | 187 | nmods = ARRAY_SIZE(fcs_modules); |
| 188 | 188 | ||
| 189 | for (i = 0; i < nmods; i++) { | 189 | for (i = 0; i < nmods; i++) { |
| 190 | 190 | ||
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 2e28392c2fb6..a38aafa030b3 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h | |||
| @@ -72,7 +72,7 @@ struct bfa_sge_s { | |||
| 72 | } while (0) | 72 | } while (0) |
| 73 | 73 | ||
| 74 | #define bfa_swap_words(_x) ( \ | 74 | #define bfa_swap_words(_x) ( \ |
| 75 | ((_x) << 32) | ((_x) >> 32)) | 75 | ((u64)(_x) << 32) | ((u64)(_x) >> 32)) |
| 76 | 76 | ||
| 77 | #ifdef __BIG_ENDIAN | 77 | #ifdef __BIG_ENDIAN |
| 78 | #define bfa_sge_to_be(_x) | 78 | #define bfa_sge_to_be(_x) |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 7593b7c1d336..e90a3742f09d 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
| @@ -1219,7 +1219,7 @@ bfad_install_msix_handler(struct bfad_s *bfad) | |||
| 1219 | int | 1219 | int |
| 1220 | bfad_setup_intr(struct bfad_s *bfad) | 1220 | bfad_setup_intr(struct bfad_s *bfad) |
| 1221 | { | 1221 | { |
| 1222 | int error = 0; | 1222 | int error; |
| 1223 | u32 mask = 0, i, num_bit = 0, max_bit = 0; | 1223 | u32 mask = 0, i, num_bit = 0, max_bit = 0; |
| 1224 | struct msix_entry msix_entries[MAX_MSIX_ENTRY]; | 1224 | struct msix_entry msix_entries[MAX_MSIX_ENTRY]; |
| 1225 | struct pci_dev *pdev = bfad->pcidev; | 1225 | struct pci_dev *pdev = bfad->pcidev; |
| @@ -1234,34 +1234,24 @@ bfad_setup_intr(struct bfad_s *bfad) | |||
| 1234 | if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || | 1234 | if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || |
| 1235 | (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { | 1235 | (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { |
| 1236 | 1236 | ||
| 1237 | error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); | 1237 | error = pci_enable_msix_exact(bfad->pcidev, |
| 1238 | if (error) { | 1238 | msix_entries, bfad->nvec); |
| 1239 | /* In CT1 & CT2, try to allocate just one vector */ | 1239 | /* In CT1 & CT2, try to allocate just one vector */ |
| 1240 | if (bfa_asic_id_ctc(pdev->device)) { | 1240 | if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) { |
| 1241 | printk(KERN_WARNING "bfa %s: trying one msix " | 1241 | printk(KERN_WARNING "bfa %s: trying one msix " |
| 1242 | "vector failed to allocate %d[%d]\n", | 1242 | "vector failed to allocate %d[%d]\n", |
| 1243 | bfad->pci_name, bfad->nvec, error); | 1243 | bfad->pci_name, bfad->nvec, error); |
| 1244 | bfad->nvec = 1; | 1244 | bfad->nvec = 1; |
| 1245 | error = pci_enable_msix(bfad->pcidev, | 1245 | error = pci_enable_msix_exact(bfad->pcidev, |
| 1246 | msix_entries, bfad->nvec); | 1246 | msix_entries, 1); |
| 1247 | } | 1247 | } |
| 1248 | 1248 | ||
| 1249 | /* | 1249 | if (error) { |
| 1250 | * Only error number of vector is available. | 1250 | printk(KERN_WARNING "bfad%d: " |
| 1251 | * We don't have a mechanism to map multiple | 1251 | "pci_enable_msix_exact failed (%d), " |
| 1252 | * interrupts into one vector, so even if we | 1252 | "use line based.\n", |
| 1253 | * can try to request less vectors, we don't | 1253 | bfad->inst_no, error); |
| 1254 | * know how to associate interrupt events to | 1254 | goto line_based; |
| 1255 | * vectors. Linux doesn't duplicate vectors | ||
| 1256 | * in the MSIX table for this case. | ||
| 1257 | */ | ||
| 1258 | if (error) { | ||
| 1259 | printk(KERN_WARNING "bfad%d: " | ||
| 1260 | "pci_enable_msix failed (%d), " | ||
| 1261 | "use line based.\n", | ||
| 1262 | bfad->inst_no, error); | ||
| 1263 | goto line_based; | ||
| 1264 | } | ||
| 1265 | } | 1255 | } |
| 1266 | 1256 | ||
| 1267 | /* Disable INTX in MSI-X mode */ | 1257 | /* Disable INTX in MSI-X mode */ |
| @@ -1281,20 +1271,18 @@ bfad_setup_intr(struct bfad_s *bfad) | |||
| 1281 | 1271 | ||
| 1282 | bfad->bfad_flags |= BFAD_MSIX_ON; | 1272 | bfad->bfad_flags |= BFAD_MSIX_ON; |
| 1283 | 1273 | ||
| 1284 | return error; | 1274 | return 0; |
| 1285 | } | 1275 | } |
| 1286 | 1276 | ||
| 1287 | line_based: | 1277 | line_based: |
| 1288 | error = 0; | 1278 | error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx, |
| 1289 | if (request_irq | 1279 | BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad); |
| 1290 | (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, | 1280 | if (error) |
| 1291 | BFAD_DRIVER_NAME, bfad) != 0) { | 1281 | return error; |
| 1292 | /* Enable interrupt handler failed */ | 1282 | |
| 1293 | return 1; | ||
| 1294 | } | ||
| 1295 | bfad->bfad_flags |= BFAD_INTX_ON; | 1283 | bfad->bfad_flags |= BFAD_INTX_ON; |
| 1296 | 1284 | ||
| 1297 | return error; | 1285 | return 0; |
| 1298 | } | 1286 | } |
| 1299 | 1287 | ||
| 1300 | void | 1288 | void |
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 8994fb857ee9..023b9d42ad9a 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c | |||
| @@ -26,7 +26,6 @@ int | |||
| 26 | bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) | 26 | bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) |
| 27 | { | 27 | { |
| 28 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; | 28 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; |
| 29 | int rc = 0; | ||
| 30 | unsigned long flags; | 29 | unsigned long flags; |
| 31 | 30 | ||
| 32 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 31 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| @@ -34,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) | |||
| 34 | if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { | 33 | if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { |
| 35 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 34 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 36 | iocmd->status = BFA_STATUS_OK; | 35 | iocmd->status = BFA_STATUS_OK; |
| 37 | return rc; | 36 | return 0; |
| 38 | } | 37 | } |
| 39 | 38 | ||
| 40 | init_completion(&bfad->enable_comp); | 39 | init_completion(&bfad->enable_comp); |
| @@ -43,21 +42,20 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) | |||
| 43 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 42 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 44 | wait_for_completion(&bfad->enable_comp); | 43 | wait_for_completion(&bfad->enable_comp); |
| 45 | 44 | ||
| 46 | return rc; | 45 | return 0; |
| 47 | } | 46 | } |
| 48 | 47 | ||
| 49 | int | 48 | int |
| 50 | bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) | 49 | bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) |
| 51 | { | 50 | { |
| 52 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; | 51 | struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; |
| 53 | int rc = 0; | ||
| 54 | unsigned long flags; | 52 | unsigned long flags; |
| 55 | 53 | ||
| 56 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 54 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
| 57 | if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { | 55 | if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) { |
| 58 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 56 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
| 59 | iocmd->status = BFA_STATUS_OK; | 57 | iocmd->status = BFA_STATUS_OK; |
| 60 | return rc; | 58 | return 0; |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 63 | if (bfad->disable_active) { | 61 | if (bfad->disable_active) { |
| @@ -74,7 +72,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) | |||
| 74 | bfad->disable_active = BFA_FALSE; | 72 | bfad->disable_active = BFA_FALSE; |
| 75 | iocmd->status = BFA_STATUS_OK; | 73 | iocmd->status = BFA_STATUS_OK; |
| 76 | 74 | ||
| 77 | return rc; | 75 | return 0; |
| 78 | } | 76 | } |
| 79 | 77 | ||
| 80 | static int | 78 | static int |
| @@ -3270,13 +3268,13 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, | |||
| 3270 | /* Allocate dma coherent memory */ | 3268 | /* Allocate dma coherent memory */ |
| 3271 | buf_info = buf_base; | 3269 | buf_info = buf_base; |
| 3272 | buf_info->size = payload_len; | 3270 | buf_info->size = payload_len; |
| 3273 | buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, | 3271 | buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, |
| 3274 | &buf_info->phys, GFP_KERNEL); | 3272 | buf_info->size, &buf_info->phys, |
| 3273 | GFP_KERNEL); | ||
| 3275 | if (!buf_info->virt) | 3274 | if (!buf_info->virt) |
| 3276 | goto out_free_mem; | 3275 | goto out_free_mem; |
| 3277 | 3276 | ||
| 3278 | /* copy the linear bsg buffer to buf_info */ | 3277 | /* copy the linear bsg buffer to buf_info */ |
| 3279 | memset(buf_info->virt, 0, buf_info->size); | ||
| 3280 | memcpy(buf_info->virt, payload_kbuf, buf_info->size); | 3278 | memcpy(buf_info->virt, payload_kbuf, buf_info->size); |
| 3281 | 3279 | ||
| 3282 | /* | 3280 | /* |
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h index e1f1e3448f98..fe2106c91c08 100644 --- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | |||
| @@ -1,3 +1,16 @@ | |||
| 1 | /* 57xx_hsi_bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver. | ||
| 2 | * Handles operations such as session offload/upload etc, and manages | ||
| 3 | * session resources such as connection id and qp resources. | ||
| 4 | * | ||
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | ||
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 1 | #ifndef __57XX_FCOE_HSI_LINUX_LE__ | 14 | #ifndef __57XX_FCOE_HSI_LINUX_LE__ |
| 2 | #define __57XX_FCOE_HSI_LINUX_LE__ | 15 | #define __57XX_FCOE_HSI_LINUX_LE__ |
| 3 | 16 | ||
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index cfcad8bde7cf..f245d543d7b1 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | config SCSI_BNX2X_FCOE | 1 | config SCSI_BNX2X_FCOE |
| 2 | tristate "Broadcom NetXtreme II FCoE support" | 2 | tristate "QLogic NetXtreme II FCoE support" |
| 3 | depends on PCI | 3 | depends on PCI |
| 4 | select NETDEVICES | 4 | select NETDEVICES |
| 5 | select ETHERNET | 5 | select ETHERNET |
| @@ -8,5 +8,5 @@ config SCSI_BNX2X_FCOE | |||
| 8 | select LIBFCOE | 8 | select LIBFCOE |
| 9 | select CNIC | 9 | select CNIC |
| 10 | ---help--- | 10 | ---help--- |
| 11 | This driver supports FCoE offload for the Broadcom NetXtreme II | 11 | This driver supports FCoE offload for the QLogic NetXtreme II |
| 12 | devices. | 12 | devices. |
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 6a976657b475..1346e052e03c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | #ifndef _BNX2FC_H_ | 1 | /* bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver. |
| 2 | #define _BNX2FC_H_ | ||
| 3 | /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. | ||
| 4 | * | 2 | * |
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 5 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| @@ -11,6 +10,8 @@ | |||
| 11 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) | 10 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) |
| 12 | */ | 11 | */ |
| 13 | 12 | ||
| 13 | #ifndef _BNX2FC_H_ | ||
| 14 | #define _BNX2FC_H_ | ||
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | 16 | ||
| 16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h index dad9924abbbb..e147cc7ee36c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_constants.h +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h | |||
| @@ -1,3 +1,16 @@ | |||
| 1 | /* bnx2fc_constants.h: QLogic NetXtreme II Linux FCoE offload driver. | ||
| 2 | * Handles operations such as session offload/upload etc, and manages | ||
| 3 | * session resources such as connection id and qp resources. | ||
| 4 | * | ||
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | ||
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 1 | #ifndef __BNX2FC_CONSTANTS_H_ | 14 | #ifndef __BNX2FC_CONSTANTS_H_ |
| 2 | #define __BNX2FC_CONSTANTS_H_ | 15 | #define __BNX2FC_CONSTANTS_H_ |
| 3 | 16 | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c index 0cbee1b23ee2..d055df01faa5 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.c +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c | |||
| @@ -1,3 +1,16 @@ | |||
| 1 | /* bnx2fc_debug.c: QLogic NetXtreme II Linux FCoE offload driver. | ||
| 2 | * Handles operations such as session offload/upload etc, and manages | ||
| 3 | * session resources such as connection id and qp resources. | ||
| 4 | * | ||
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | ||
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 1 | #include "bnx2fc.h" | 14 | #include "bnx2fc.h" |
| 2 | 15 | ||
| 3 | void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) | 16 | void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h index 4808ff99621f..2b9006774f37 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_debug.h +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h | |||
| @@ -1,3 +1,16 @@ | |||
| 1 | /* bnx2fc_debug.h: QLogic NetXtreme II Linux FCoE offload driver. | ||
| 2 | * Handles operations such as session offload/upload etc, and manages | ||
| 3 | * session resources such as connection id and qp resources. | ||
| 4 | * | ||
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | ||
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 1 | #ifndef __BNX2FC_DEBUG__ | 14 | #ifndef __BNX2FC_DEBUG__ |
| 2 | #define __BNX2FC_DEBUG__ | 15 | #define __BNX2FC_DEBUG__ |
| 3 | 16 | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index b1c9a4f8caee..ca75c7ca2559 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c | |||
| @@ -1,9 +1,10 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver. | 2 | * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver. |
| 3 | * This file contains helper routines that handle ELS requests | 3 | * This file contains helper routines that handle ELS requests |
| 4 | * and responses. | 4 | * and responses. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 6 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 7 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 785d0d71781e..79e5c94107a9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
| @@ -1,9 +1,10 @@ | |||
| 1 | /* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver. | 1 | /* bnx2fc_fcoe.c: QLogic NetXtreme II Linux FCoE offload driver. |
| 2 | * This file contains the code that interacts with libfc, libfcoe, | 2 | * This file contains the code that interacts with libfc, libfcoe, |
| 3 | * cnic modules to create FCoE instances, send/receive non-offloaded | 3 | * cnic modules to create FCoE instances, send/receive non-offloaded |
| 4 | * FIP/FCoE packets, listen to link events etc. | 4 | * FIP/FCoE packets, listen to link events etc. |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 6 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 7 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
| @@ -26,12 +27,12 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |||
| 26 | 27 | ||
| 27 | 28 | ||
| 28 | static char version[] = | 29 | static char version[] = |
| 29 | "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \ | 30 | "QLogic NetXtreme II FCoE Driver " DRV_MODULE_NAME \ |
| 30 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 31 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
| 31 | 32 | ||
| 32 | 33 | ||
| 33 | MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>"); | 34 | MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>"); |
| 34 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver"); | 35 | MODULE_DESCRIPTION("QLogic NetXtreme II BCM57710 FCoE Driver"); |
| 35 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
| 36 | MODULE_VERSION(DRV_MODULE_VERSION); | 37 | MODULE_VERSION(DRV_MODULE_VERSION); |
| 37 | 38 | ||
| @@ -692,7 +693,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) | |||
| 692 | if (!lport->vport) | 693 | if (!lport->vport) |
| 693 | fc_host_max_npiv_vports(lport->host) = USHRT_MAX; | 694 | fc_host_max_npiv_vports(lport->host) = USHRT_MAX; |
| 694 | snprintf(fc_host_symbolic_name(lport->host), 256, | 695 | snprintf(fc_host_symbolic_name(lport->host), 256, |
| 695 | "%s (Broadcom %s) v%s over %s", | 696 | "%s (QLogic %s) v%s over %s", |
| 696 | BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, | 697 | BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, |
| 697 | interface->netdev->name); | 698 | interface->netdev->name); |
| 698 | 699 | ||
| @@ -2775,7 +2776,7 @@ static struct fc_function_template bnx2fc_vport_xport_function = { | |||
| 2775 | */ | 2776 | */ |
| 2776 | static struct scsi_host_template bnx2fc_shost_template = { | 2777 | static struct scsi_host_template bnx2fc_shost_template = { |
| 2777 | .module = THIS_MODULE, | 2778 | .module = THIS_MODULE, |
| 2778 | .name = "Broadcom Offload FCoE Initiator", | 2779 | .name = "QLogic Offload FCoE Initiator", |
| 2779 | .queuecommand = bnx2fc_queuecommand, | 2780 | .queuecommand = bnx2fc_queuecommand, |
| 2780 | .eh_abort_handler = bnx2fc_eh_abort, /* abts */ | 2781 | .eh_abort_handler = bnx2fc_eh_abort, /* abts */ |
| 2781 | .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ | 2782 | .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 512aed3ae4f1..c6688d72a846 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
| @@ -1,8 +1,9 @@ | |||
| 1 | /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. | 1 | /* bnx2fc_hwi.c: QLogic NetXtreme II Linux FCoE offload driver. |
| 2 | * This file contains the code that low level functions that interact | 2 | * This file contains the code that low level functions that interact |
| 3 | * with 57712 FCoE firmware. | 3 | * with 57712 FCoE firmware. |
| 4 | * | 4 | * |
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 7bc47fc7c686..4c5891e66038 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. | 1 | /* bnx2fc_io.c: QLogic NetXtreme II Linux FCoE offload driver. |
| 2 | * IO manager and SCSI IO processing. | 2 | * IO manager and SCSI IO processing. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 4 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 5 | * Copyright (c) 2014, QLogic Corporation | ||
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
| @@ -1450,9 +1451,9 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) | |||
| 1450 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 1451 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; |
| 1451 | struct bnx2fc_rport *tgt = io_req->tgt; | 1452 | struct bnx2fc_rport *tgt = io_req->tgt; |
| 1452 | struct bnx2fc_cmd *cmd, *tmp; | 1453 | struct bnx2fc_cmd *cmd, *tmp; |
| 1453 | int tm_lun = sc_cmd->device->lun; | 1454 | u64 tm_lun = sc_cmd->device->lun; |
| 1455 | u64 lun; | ||
| 1454 | int rc = 0; | 1456 | int rc = 0; |
| 1455 | int lun; | ||
| 1456 | 1457 | ||
| 1457 | /* called with tgt_lock held */ | 1458 | /* called with tgt_lock held */ |
| 1458 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); | 1459 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 6870cf6781d9..c66c708412a6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
| @@ -1,8 +1,9 @@ | |||
| 1 | /* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver. | 1 | /* bnx2fc_tgt.c: QLogic NetXtreme II Linux FCoE offload driver. |
| 2 | * Handles operations such as session offload/upload etc, and manages | 2 | * Handles operations such as session offload/upload etc, and manages |
| 3 | * session resources such as connection id and qp resources. | 3 | * session resources such as connection id and qp resources. |
| 4 | * | 4 | * |
| 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation | 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 3d33767f2f2c..917534109a50 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h | |||
| @@ -1,13 +1,15 @@ | |||
| 1 | /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI | 1 | /* 57xx_iscsi_constants.h: QLogic NetXtreme II iSCSI HSI |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2014, QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
| 8 | * | 9 | * |
| 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 10 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 11 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 12 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 11 | */ | 13 | */ |
| 12 | #ifndef __57XX_ISCSI_CONSTANTS_H_ | 14 | #ifndef __57XX_ISCSI_CONSTANTS_H_ |
| 13 | #define __57XX_ISCSI_CONSTANTS_H_ | 15 | #define __57XX_ISCSI_CONSTANTS_H_ |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index 7052a839b0ea..19b3a97dbacd 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h | |||
| @@ -1,13 +1,15 @@ | |||
| 1 | /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. | 1 | /* 57xx_iscsi_hsi.h: QLogic NetXtreme II iSCSI HSI. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2014, QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
| 8 | * | 9 | * |
| 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 10 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 11 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 12 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 11 | */ | 13 | */ |
| 12 | #ifndef __57XX_ISCSI_HSI_LINUX_LE__ | 14 | #ifndef __57XX_ISCSI_HSI_LINUX_LE__ |
| 13 | #define __57XX_ISCSI_HSI_LINUX_LE__ | 15 | #define __57XX_ISCSI_HSI_LINUX_LE__ |
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 01cff1894b6d..44ce54e536e5 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | config SCSI_BNX2_ISCSI | 1 | config SCSI_BNX2_ISCSI |
| 2 | tristate "Broadcom NetXtreme II iSCSI support" | 2 | tristate "QLogic NetXtreme II iSCSI support" |
| 3 | depends on NET | 3 | depends on NET |
| 4 | depends on PCI | 4 | depends on PCI |
| 5 | select SCSI_ISCSI_ATTRS | 5 | select SCSI_ISCSI_ATTRS |
| @@ -8,5 +8,5 @@ config SCSI_BNX2_ISCSI | |||
| 8 | select NET_VENDOR_BROADCOM | 8 | select NET_VENDOR_BROADCOM |
| 9 | select CNIC | 9 | select CNIC |
| 10 | ---help--- | 10 | ---help--- |
| 11 | This driver supports iSCSI offload for the Broadcom NetXtreme II | 11 | This driver supports iSCSI offload for the QLogic NetXtreme II |
| 12 | devices. | 12 | devices. |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index c73bbcb63c02..ed7f3228e234 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
| @@ -1,15 +1,17 @@ | |||
| 1 | /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i.h: QLogic NetXtreme II iSCSI driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
| 5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
| 10 | * | 11 | * |
| 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 13 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 14 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 13 | */ | 15 | */ |
| 14 | 16 | ||
| 15 | #ifndef _BNX2I_H_ | 17 | #ifndef _BNX2I_H_ |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index d6d491c2f004..fb072cc5e9fd 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
| @@ -1,15 +1,17 @@ | |||
| 1 | /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
| 5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
| 10 | * | 11 | * |
| 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 13 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 14 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 13 | */ | 15 | */ |
| 14 | 16 | ||
| 15 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 80c03b452d61..c8b410c24cf0 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
| @@ -1,15 +1,17 @@ | |||
| 1 | /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i.c: QLogic NetXtreme II iSCSI driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
| 5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
| 6 | * Copyright (c) 2014, QLogic Corporation | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
| 10 | * | 11 | * |
| 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 13 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 14 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 13 | */ | 15 | */ |
| 14 | 16 | ||
| 15 | #include "bnx2i.h" | 17 | #include "bnx2i.h" |
| @@ -18,18 +20,18 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); | |||
| 18 | static u32 adapter_count; | 20 | static u32 adapter_count; |
| 19 | 21 | ||
| 20 | #define DRV_MODULE_NAME "bnx2i" | 22 | #define DRV_MODULE_NAME "bnx2i" |
| 21 | #define DRV_MODULE_VERSION "2.7.6.2" | 23 | #define DRV_MODULE_VERSION "2.7.10.1" |
| 22 | #define DRV_MODULE_RELDATE "Jun 06, 2013" | 24 | #define DRV_MODULE_RELDATE "Jul 16, 2014" |
| 23 | 25 | ||
| 24 | static char version[] = | 26 | static char version[] = |
| 25 | "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ | 27 | "QLogic NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ |
| 26 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 28 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
| 27 | 29 | ||
| 28 | 30 | ||
| 29 | MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and " | 31 | MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and " |
| 30 | "Eddie Wai <eddie.wai@broadcom.com>"); | 32 | "Eddie Wai <eddie.wai@broadcom.com>"); |
| 31 | 33 | ||
| 32 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712" | 34 | MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/57710/57711/57712" |
| 33 | "/57800/57810/57840 iSCSI Driver"); | 35 | "/57800/57810/57840 iSCSI Driver"); |
| 34 | MODULE_LICENSE("GPL"); | 36 | MODULE_LICENSE("GPL"); |
| 35 | MODULE_VERSION(DRV_MODULE_VERSION); | 37 | MODULE_VERSION(DRV_MODULE_VERSION); |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 166543f7ef55..40e22497d249 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
| @@ -1,16 +1,18 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. | 2 | * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2006 - 2013 Broadcom Corporation | 4 | * Copyright (c) 2006 - 2013 Broadcom Corporation |
| 5 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 5 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
| 6 | * Copyright (c) 2007, 2008 Mike Christie | 6 | * Copyright (c) 2007, 2008 Mike Christie |
| 7 | * Copyright (c) 2014, QLogic Corporation | ||
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation. | 11 | * the Free Software Foundation. |
| 11 | * | 12 | * |
| 12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 13 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 13 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 14 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 15 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 14 | */ | 16 | */ |
| 15 | 17 | ||
| 16 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| @@ -1643,12 +1645,11 @@ static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, | |||
| 1643 | stats->r2t_pdus = conn->r2t_pdus_cnt; | 1645 | stats->r2t_pdus = conn->r2t_pdus_cnt; |
| 1644 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; | 1646 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; |
| 1645 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; | 1647 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; |
| 1646 | stats->custom_length = 3; | ||
| 1647 | strcpy(stats->custom[2].desc, "eh_abort_cnt"); | ||
| 1648 | stats->custom[2].value = conn->eh_abort_cnt; | ||
| 1649 | stats->digest_err = 0; | 1648 | stats->digest_err = 0; |
| 1650 | stats->timeout_err = 0; | 1649 | stats->timeout_err = 0; |
| 1651 | stats->custom_length = 0; | 1650 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); |
| 1651 | stats->custom[0].value = conn->eh_abort_cnt; | ||
| 1652 | stats->custom_length = 1; | ||
| 1652 | } | 1653 | } |
| 1653 | 1654 | ||
| 1654 | 1655 | ||
| @@ -2249,7 +2250,7 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param) | |||
| 2249 | */ | 2250 | */ |
| 2250 | static struct scsi_host_template bnx2i_host_template = { | 2251 | static struct scsi_host_template bnx2i_host_template = { |
| 2251 | .module = THIS_MODULE, | 2252 | .module = THIS_MODULE, |
| 2252 | .name = "Broadcom Offload iSCSI Initiator", | 2253 | .name = "QLogic Offload iSCSI Initiator", |
| 2253 | .proc_name = "bnx2i", | 2254 | .proc_name = "bnx2i", |
| 2254 | .queuecommand = iscsi_queuecommand, | 2255 | .queuecommand = iscsi_queuecommand, |
| 2255 | .eh_abort_handler = iscsi_eh_abort, | 2256 | .eh_abort_handler = iscsi_eh_abort, |
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c index a0a3d9fe61fe..6d56fd60cb2b 100644 --- a/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c | |||
| @@ -1,13 +1,15 @@ | |||
| 1 | /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i_sysfs.c: QLogic NetXtreme II iSCSI driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2004 - 2013 Broadcom Corporation | 3 | * Copyright (c) 2004 - 2013 Broadcom Corporation |
| 4 | * Copyright (c) 2014, QLogic Corporation | ||
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
| 8 | * | 9 | * |
| 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 10 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
| 10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | 11 | * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) |
| 12 | * Maintained by: QLogic-Storage-Upstream@qlogic.com | ||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include "bnx2i.h" | 15 | #include "bnx2i.h" |
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 2a323742ce04..ef5ae0d03616 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
| @@ -84,15 +84,19 @@ static const char * vendor_labels[CH_TYPES-4] = { | |||
| 84 | }; | 84 | }; |
| 85 | // module_param_string_array(vendor_labels, NULL, 0444); | 85 | // module_param_string_array(vendor_labels, NULL, 0444); |
| 86 | 86 | ||
| 87 | #define ch_printk(prefix, ch, fmt, a...) \ | ||
| 88 | sdev_printk(prefix, (ch)->device, "[%s] " fmt, \ | ||
| 89 | (ch)->name, ##a) | ||
| 90 | |||
| 87 | #define DPRINTK(fmt, arg...) \ | 91 | #define DPRINTK(fmt, arg...) \ |
| 88 | do { \ | 92 | do { \ |
| 89 | if (debug) \ | 93 | if (debug) \ |
| 90 | printk(KERN_DEBUG "%s: " fmt, ch->name, ##arg); \ | 94 | ch_printk(KERN_DEBUG, ch, fmt, ##arg); \ |
| 91 | } while (0) | 95 | } while (0) |
| 92 | #define VPRINTK(level, fmt, arg...) \ | 96 | #define VPRINTK(level, fmt, arg...) \ |
| 93 | do { \ | 97 | do { \ |
| 94 | if (verbose) \ | 98 | if (verbose) \ |
| 95 | printk(level "%s: " fmt, ch->name, ##arg); \ | 99 | ch_printk(level, ch, fmt, ##arg); \ |
| 96 | } while (0) | 100 | } while (0) |
| 97 | 101 | ||
| 98 | /* ------------------------------------------------------------------- */ | 102 | /* ------------------------------------------------------------------- */ |
| @@ -196,7 +200,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, | |||
| 196 | __scsi_print_command(cmd); | 200 | __scsi_print_command(cmd); |
| 197 | } | 201 | } |
| 198 | 202 | ||
| 199 | result = scsi_execute_req(ch->device, cmd, direction, buffer, | 203 | result = scsi_execute_req(ch->device, cmd, direction, buffer, |
| 200 | buflength, &sshdr, timeout * HZ, | 204 | buflength, &sshdr, timeout * HZ, |
| 201 | MAX_RETRIES, NULL); | 205 | MAX_RETRIES, NULL); |
| 202 | 206 | ||
| @@ -247,7 +251,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data) | |||
| 247 | retry: | 251 | retry: |
| 248 | memset(cmd,0,sizeof(cmd)); | 252 | memset(cmd,0,sizeof(cmd)); |
| 249 | cmd[0] = READ_ELEMENT_STATUS; | 253 | cmd[0] = READ_ELEMENT_STATUS; |
| 250 | cmd[1] = (ch->device->lun << 5) | | 254 | cmd[1] = ((ch->device->lun & 0x7) << 5) | |
| 251 | (ch->voltags ? 0x10 : 0) | | 255 | (ch->voltags ? 0x10 : 0) | |
| 252 | ch_elem_to_typecode(ch,elem); | 256 | ch_elem_to_typecode(ch,elem); |
| 253 | cmd[2] = (elem >> 8) & 0xff; | 257 | cmd[2] = (elem >> 8) & 0xff; |
| @@ -283,7 +287,7 @@ ch_init_elem(scsi_changer *ch) | |||
| 283 | VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n"); | 287 | VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n"); |
| 284 | memset(cmd,0,sizeof(cmd)); | 288 | memset(cmd,0,sizeof(cmd)); |
| 285 | cmd[0] = INITIALIZE_ELEMENT_STATUS; | 289 | cmd[0] = INITIALIZE_ELEMENT_STATUS; |
| 286 | cmd[1] = ch->device->lun << 5; | 290 | cmd[1] = (ch->device->lun & 0x7) << 5; |
| 287 | err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); | 291 | err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); |
| 288 | VPRINTK(KERN_INFO, "... finished\n"); | 292 | VPRINTK(KERN_INFO, "... finished\n"); |
| 289 | return err; | 293 | return err; |
| @@ -303,7 +307,7 @@ ch_readconfig(scsi_changer *ch) | |||
| 303 | 307 | ||
| 304 | memset(cmd,0,sizeof(cmd)); | 308 | memset(cmd,0,sizeof(cmd)); |
| 305 | cmd[0] = MODE_SENSE; | 309 | cmd[0] = MODE_SENSE; |
| 306 | cmd[1] = ch->device->lun << 5; | 310 | cmd[1] = (ch->device->lun & 0x7) << 5; |
| 307 | cmd[2] = 0x1d; | 311 | cmd[2] = 0x1d; |
| 308 | cmd[4] = 255; | 312 | cmd[4] = 255; |
| 309 | result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); | 313 | result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); |
| @@ -428,7 +432,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) | |||
| 428 | trans = ch->firsts[CHET_MT]; | 432 | trans = ch->firsts[CHET_MT]; |
| 429 | memset(cmd,0,sizeof(cmd)); | 433 | memset(cmd,0,sizeof(cmd)); |
| 430 | cmd[0] = POSITION_TO_ELEMENT; | 434 | cmd[0] = POSITION_TO_ELEMENT; |
| 431 | cmd[1] = ch->device->lun << 5; | 435 | cmd[1] = (ch->device->lun & 0x7) << 5; |
| 432 | cmd[2] = (trans >> 8) & 0xff; | 436 | cmd[2] = (trans >> 8) & 0xff; |
| 433 | cmd[3] = trans & 0xff; | 437 | cmd[3] = trans & 0xff; |
| 434 | cmd[4] = (elem >> 8) & 0xff; | 438 | cmd[4] = (elem >> 8) & 0xff; |
| @@ -447,7 +451,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) | |||
| 447 | trans = ch->firsts[CHET_MT]; | 451 | trans = ch->firsts[CHET_MT]; |
| 448 | memset(cmd,0,sizeof(cmd)); | 452 | memset(cmd,0,sizeof(cmd)); |
| 449 | cmd[0] = MOVE_MEDIUM; | 453 | cmd[0] = MOVE_MEDIUM; |
| 450 | cmd[1] = ch->device->lun << 5; | 454 | cmd[1] = (ch->device->lun & 0x7) << 5; |
| 451 | cmd[2] = (trans >> 8) & 0xff; | 455 | cmd[2] = (trans >> 8) & 0xff; |
| 452 | cmd[3] = trans & 0xff; | 456 | cmd[3] = trans & 0xff; |
| 453 | cmd[4] = (src >> 8) & 0xff; | 457 | cmd[4] = (src >> 8) & 0xff; |
| @@ -470,7 +474,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src, | |||
| 470 | trans = ch->firsts[CHET_MT]; | 474 | trans = ch->firsts[CHET_MT]; |
| 471 | memset(cmd,0,sizeof(cmd)); | 475 | memset(cmd,0,sizeof(cmd)); |
| 472 | cmd[0] = EXCHANGE_MEDIUM; | 476 | cmd[0] = EXCHANGE_MEDIUM; |
| 473 | cmd[1] = ch->device->lun << 5; | 477 | cmd[1] = (ch->device->lun & 0x7) << 5; |
| 474 | cmd[2] = (trans >> 8) & 0xff; | 478 | cmd[2] = (trans >> 8) & 0xff; |
| 475 | cmd[3] = trans & 0xff; | 479 | cmd[3] = trans & 0xff; |
| 476 | cmd[4] = (src >> 8) & 0xff; | 480 | cmd[4] = (src >> 8) & 0xff; |
| @@ -518,7 +522,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem, | |||
| 518 | elem, tag); | 522 | elem, tag); |
| 519 | memset(cmd,0,sizeof(cmd)); | 523 | memset(cmd,0,sizeof(cmd)); |
| 520 | cmd[0] = SEND_VOLUME_TAG; | 524 | cmd[0] = SEND_VOLUME_TAG; |
| 521 | cmd[1] = (ch->device->lun << 5) | | 525 | cmd[1] = ((ch->device->lun & 0x7) << 5) | |
| 522 | ch_elem_to_typecode(ch,elem); | 526 | ch_elem_to_typecode(ch,elem); |
| 523 | cmd[2] = (elem >> 8) & 0xff; | 527 | cmd[2] = (elem >> 8) & 0xff; |
| 524 | cmd[3] = elem & 0xff; | 528 | cmd[3] = elem & 0xff; |
| @@ -754,7 +758,7 @@ static long ch_ioctl(struct file *file, | |||
| 754 | voltag_retry: | 758 | voltag_retry: |
| 755 | memset(ch_cmd, 0, sizeof(ch_cmd)); | 759 | memset(ch_cmd, 0, sizeof(ch_cmd)); |
| 756 | ch_cmd[0] = READ_ELEMENT_STATUS; | 760 | ch_cmd[0] = READ_ELEMENT_STATUS; |
| 757 | ch_cmd[1] = (ch->device->lun << 5) | | 761 | ch_cmd[1] = ((ch->device->lun & 0x7) << 5) | |
| 758 | (ch->voltags ? 0x10 : 0) | | 762 | (ch->voltags ? 0x10 : 0) | |
| 759 | ch_elem_to_typecode(ch,elem); | 763 | ch_elem_to_typecode(ch,elem); |
| 760 | ch_cmd[2] = (elem >> 8) & 0xff; | 764 | ch_cmd[2] = (elem >> 8) & 0xff; |
| @@ -924,8 +928,8 @@ static int ch_probe(struct device *dev) | |||
| 924 | MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch, | 928 | MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch, |
| 925 | "s%s", ch->name); | 929 | "s%s", ch->name); |
| 926 | if (IS_ERR(class_dev)) { | 930 | if (IS_ERR(class_dev)) { |
| 927 | printk(KERN_WARNING "ch%d: device_create failed\n", | 931 | sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n", |
| 928 | ch->minor); | 932 | ch->minor); |
| 929 | ret = PTR_ERR(class_dev); | 933 | ret = PTR_ERR(class_dev); |
| 930 | goto remove_idr; | 934 | goto remove_idr; |
| 931 | } | 935 | } |
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 1aafc331ee63..17794add855c 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c | |||
| @@ -1167,7 +1167,7 @@ static struct pci_error_handlers csio_err_handler = { | |||
| 1167 | .resume = csio_pci_resume, | 1167 | .resume = csio_pci_resume, |
| 1168 | }; | 1168 | }; |
| 1169 | 1169 | ||
| 1170 | static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { | 1170 | static const struct pci_device_id csio_pci_tbl[] = { |
| 1171 | CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ | 1171 | CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ |
| 1172 | CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ | 1172 | CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ |
| 1173 | CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ | 1173 | CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ |
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 7494e4bc69cc..86103c8475d8 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c | |||
| @@ -1657,7 +1657,7 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) | |||
| 1657 | case FW_SCSI_UNDER_FLOW_ERR: | 1657 | case FW_SCSI_UNDER_FLOW_ERR: |
| 1658 | csio_warn(hw, | 1658 | csio_warn(hw, |
| 1659 | "Under-flow error,cmnd:0x%x expected" | 1659 | "Under-flow error,cmnd:0x%x expected" |
| 1660 | " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n", | 1660 | " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n", |
| 1661 | cmnd->cmnd[0], scsi_bufflen(cmnd), | 1661 | cmnd->cmnd[0], scsi_bufflen(cmnd), |
| 1662 | scsi_get_resid(cmnd), cmnd->device->lun, | 1662 | scsi_get_resid(cmnd), cmnd->device->lun, |
| 1663 | rn->flowid); | 1663 | rn->flowid); |
| @@ -1957,7 +1957,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd) | |||
| 1957 | 1957 | ||
| 1958 | csio_dbg(hw, | 1958 | csio_dbg(hw, |
| 1959 | "Request to abort ioreq:%p cmd:%p cdb:%08llx" | 1959 | "Request to abort ioreq:%p cmd:%p cdb:%08llx" |
| 1960 | " ssni:0x%x lun:%d iq:0x%x\n", | 1960 | " ssni:0x%x lun:%llu iq:0x%x\n", |
| 1961 | ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, | 1961 | ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, |
| 1962 | cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); | 1962 | cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); |
| 1963 | 1963 | ||
| @@ -2015,13 +2015,13 @@ inval_scmnd: | |||
| 2015 | /* FW successfully aborted the request */ | 2015 | /* FW successfully aborted the request */ |
| 2016 | if (host_byte(cmnd->result) == DID_REQUEUE) { | 2016 | if (host_byte(cmnd->result) == DID_REQUEUE) { |
| 2017 | csio_info(hw, | 2017 | csio_info(hw, |
| 2018 | "Aborted SCSI command to (%d:%d) serial#:0x%lx\n", | 2018 | "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n", |
| 2019 | cmnd->device->id, cmnd->device->lun, | 2019 | cmnd->device->id, cmnd->device->lun, |
| 2020 | cmnd->serial_number); | 2020 | cmnd->serial_number); |
| 2021 | return SUCCESS; | 2021 | return SUCCESS; |
| 2022 | } else { | 2022 | } else { |
| 2023 | csio_info(hw, | 2023 | csio_info(hw, |
| 2024 | "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n", | 2024 | "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n", |
| 2025 | cmnd->device->id, cmnd->device->lun, | 2025 | cmnd->device->id, cmnd->device->lun, |
| 2026 | cmnd->serial_number); | 2026 | cmnd->serial_number); |
| 2027 | return FAILED; | 2027 | return FAILED; |
| @@ -2100,13 +2100,13 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2100 | if (!rn) | 2100 | if (!rn) |
| 2101 | goto fail; | 2101 | goto fail; |
| 2102 | 2102 | ||
| 2103 | csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n", | 2103 | csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n", |
| 2104 | cmnd->device->lun, rn->flowid, rn->scsi_id); | 2104 | cmnd->device->lun, rn->flowid, rn->scsi_id); |
| 2105 | 2105 | ||
| 2106 | if (!csio_is_lnode_ready(ln)) { | 2106 | if (!csio_is_lnode_ready(ln)) { |
| 2107 | csio_err(hw, | 2107 | csio_err(hw, |
| 2108 | "LUN reset cannot be issued on non-ready" | 2108 | "LUN reset cannot be issued on non-ready" |
| 2109 | " local node vnpi:0x%x (LUN:%d)\n", | 2109 | " local node vnpi:0x%x (LUN:%llu)\n", |
| 2110 | ln->vnp_flowid, cmnd->device->lun); | 2110 | ln->vnp_flowid, cmnd->device->lun); |
| 2111 | goto fail; | 2111 | goto fail; |
| 2112 | } | 2112 | } |
| @@ -2126,7 +2126,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2126 | if (fc_remote_port_chkready(rn->rport)) { | 2126 | if (fc_remote_port_chkready(rn->rport)) { |
| 2127 | csio_err(hw, | 2127 | csio_err(hw, |
| 2128 | "LUN reset cannot be issued on non-ready" | 2128 | "LUN reset cannot be issued on non-ready" |
| 2129 | " remote node ssni:0x%x (LUN:%d)\n", | 2129 | " remote node ssni:0x%x (LUN:%llu)\n", |
| 2130 | rn->flowid, cmnd->device->lun); | 2130 | rn->flowid, cmnd->device->lun); |
| 2131 | goto fail; | 2131 | goto fail; |
| 2132 | } | 2132 | } |
| @@ -2168,7 +2168,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2168 | sld.level = CSIO_LEV_LUN; | 2168 | sld.level = CSIO_LEV_LUN; |
| 2169 | sld.lnode = ioreq->lnode; | 2169 | sld.lnode = ioreq->lnode; |
| 2170 | sld.rnode = ioreq->rnode; | 2170 | sld.rnode = ioreq->rnode; |
| 2171 | sld.oslun = (uint64_t)cmnd->device->lun; | 2171 | sld.oslun = cmnd->device->lun; |
| 2172 | 2172 | ||
| 2173 | spin_lock_irqsave(&hw->lock, flags); | 2173 | spin_lock_irqsave(&hw->lock, flags); |
| 2174 | /* Kick off TM SM on the ioreq */ | 2174 | /* Kick off TM SM on the ioreq */ |
| @@ -2190,7 +2190,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2190 | 2190 | ||
| 2191 | /* LUN reset timed-out */ | 2191 | /* LUN reset timed-out */ |
| 2192 | if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { | 2192 | if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { |
| 2193 | csio_err(hw, "LUN reset (%d:%d) timed out\n", | 2193 | csio_err(hw, "LUN reset (%d:%llu) timed out\n", |
| 2194 | cmnd->device->id, cmnd->device->lun); | 2194 | cmnd->device->id, cmnd->device->lun); |
| 2195 | 2195 | ||
| 2196 | spin_lock_irq(&hw->lock); | 2196 | spin_lock_irq(&hw->lock); |
| @@ -2203,7 +2203,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2203 | 2203 | ||
| 2204 | /* LUN reset returned, check cached status */ | 2204 | /* LUN reset returned, check cached status */ |
| 2205 | if (cmnd->SCp.Status != FW_SUCCESS) { | 2205 | if (cmnd->SCp.Status != FW_SUCCESS) { |
| 2206 | csio_err(hw, "LUN reset failed (%d:%d), status: %d\n", | 2206 | csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n", |
| 2207 | cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); | 2207 | cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); |
| 2208 | goto fail; | 2208 | goto fail; |
| 2209 | } | 2209 | } |
| @@ -2223,7 +2223,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2223 | /* Aborts may have timed out */ | 2223 | /* Aborts may have timed out */ |
| 2224 | if (retval != 0) { | 2224 | if (retval != 0) { |
| 2225 | csio_err(hw, | 2225 | csio_err(hw, |
| 2226 | "Attempt to abort I/Os during LUN reset of %d" | 2226 | "Attempt to abort I/Os during LUN reset of %llu" |
| 2227 | " returned %d\n", cmnd->device->lun, retval); | 2227 | " returned %d\n", cmnd->device->lun, retval); |
| 2228 | /* Return I/Os back to active_q */ | 2228 | /* Return I/Os back to active_q */ |
| 2229 | spin_lock_irq(&hw->lock); | 2229 | spin_lock_irq(&hw->lock); |
| @@ -2234,7 +2234,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) | |||
| 2234 | 2234 | ||
| 2235 | CSIO_INC_STATS(rn, n_lun_rst); | 2235 | CSIO_INC_STATS(rn, n_lun_rst); |
| 2236 | 2236 | ||
| 2237 | csio_info(hw, "LUN reset occurred (%d:%d)\n", | 2237 | csio_info(hw, "LUN reset occurred (%d:%llu)\n", |
| 2238 | cmnd->device->id, cmnd->device->lun); | 2238 | cmnd->device->id, cmnd->device->lun); |
| 2239 | 2239 | ||
| 2240 | return SUCCESS; | 2240 | return SUCCESS; |
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 4255ce264abf..773da14cfa14 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c | |||
| @@ -232,7 +232,7 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, | |||
| 232 | 232 | ||
| 233 | q = wrm->q_arr[free_idx]; | 233 | q = wrm->q_arr[free_idx]; |
| 234 | 234 | ||
| 235 | q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart); | 235 | q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); |
| 236 | if (!q->vstart) { | 236 | if (!q->vstart) { |
| 237 | csio_err(hw, | 237 | csio_err(hw, |
| 238 | "Failed to allocate DMA memory for " | 238 | "Failed to allocate DMA memory for " |
| @@ -240,12 +240,6 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, | |||
| 240 | return -1; | 240 | return -1; |
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | /* | ||
| 244 | * We need to zero out the contents, importantly for ingress, | ||
| 245 | * since we start with a generatiom bit of 1 for ingress. | ||
| 246 | */ | ||
| 247 | memset(q->vstart, 0, qsz); | ||
| 248 | |||
| 249 | q->type = type; | 243 | q->type = type; |
| 250 | q->owner = owner; | 244 | q->owner = owner; |
| 251 | q->pidx = q->cidx = q->inc_idx = 0; | 245 | q->pidx = q->cidx = q->inc_idx = 0; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index 6bbc36fbd6ec..e4603985dce3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config SCSI_CXGB3_ISCSI | 1 | config SCSI_CXGB3_ISCSI |
| 2 | tristate "Chelsio T3 iSCSI support" | 2 | tristate "Chelsio T3 iSCSI support" |
| 3 | depends on PCI && INET | 3 | depends on PCI && INET && (IPV6 || IPV6=n) |
| 4 | select NETDEVICES | 4 | select NETDEVICES |
| 5 | select ETHERNET | 5 | select ETHERNET |
| 6 | select NET_VENDOR_CHELSIO | 6 | select NET_VENDOR_CHELSIO |
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index 16b2c7d26617..8c4e423037b6 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config SCSI_CXGB4_ISCSI | 1 | config SCSI_CXGB4_ISCSI |
| 2 | tristate "Chelsio T4 iSCSI support" | 2 | tristate "Chelsio T4 iSCSI support" |
| 3 | depends on PCI && INET | 3 | depends on PCI && INET && (IPV6 || IPV6=n) |
| 4 | select NETDEVICES | 4 | select NETDEVICES |
| 5 | select ETHERNET | 5 | select ETHERNET |
| 6 | select NET_VENDOR_CHELSIO | 6 | select NET_VENDOR_CHELSIO |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index e8ee5e5fe0ef..79788a12712d 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <net/tcp.h> | 19 | #include <net/tcp.h> |
| 20 | #include <net/dst.h> | 20 | #include <net/dst.h> |
| 21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
| 22 | #include <net/addrconf.h> | ||
| 22 | 23 | ||
| 23 | #include "t4_regs.h" | 24 | #include "t4_regs.h" |
| 24 | #include "t4_msg.h" | 25 | #include "t4_msg.h" |
| @@ -150,6 +151,7 @@ static struct scsi_transport_template *cxgb4i_stt; | |||
| 150 | * The section below implments CPLs that related to iscsi tcp connection | 151 | * The section below implments CPLs that related to iscsi tcp connection |
| 151 | * open/close/abort and data send/receive. | 152 | * open/close/abort and data send/receive. |
| 152 | */ | 153 | */ |
| 154 | |||
| 153 | #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) | 155 | #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) |
| 154 | #define RCV_BUFSIZ_MASK 0x3FFU | 156 | #define RCV_BUFSIZ_MASK 0x3FFU |
| 155 | #define MAX_IMM_TX_PKT_LEN 128 | 157 | #define MAX_IMM_TX_PKT_LEN 128 |
| @@ -179,6 +181,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |||
| 179 | struct l2t_entry *e) | 181 | struct l2t_entry *e) |
| 180 | { | 182 | { |
| 181 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); | 183 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
| 184 | int t4 = is_t4(lldi->adapter_type); | ||
| 182 | int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); | 185 | int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); |
| 183 | unsigned long long opt0; | 186 | unsigned long long opt0; |
| 184 | unsigned int opt2; | 187 | unsigned int opt2; |
| @@ -248,6 +251,97 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |||
| 248 | } | 251 | } |
| 249 | 252 | ||
| 250 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); | 253 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); |
| 254 | |||
| 255 | pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", | ||
| 256 | (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, | ||
| 257 | csk->state, csk->flags, csk->atid, csk->rss_qid); | ||
| 258 | |||
| 259 | cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); | ||
| 260 | } | ||
| 261 | |||
| 262 | static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, | ||
| 263 | struct l2t_entry *e) | ||
| 264 | { | ||
| 265 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); | ||
| 266 | int t4 = is_t4(lldi->adapter_type); | ||
| 267 | int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); | ||
| 268 | unsigned long long opt0; | ||
| 269 | unsigned int opt2; | ||
| 270 | unsigned int qid_atid = ((unsigned int)csk->atid) | | ||
| 271 | (((unsigned int)csk->rss_qid) << 14); | ||
| 272 | |||
| 273 | opt0 = KEEP_ALIVE(1) | | ||
| 274 | WND_SCALE(wscale) | | ||
| 275 | MSS_IDX(csk->mss_idx) | | ||
| 276 | L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | | ||
| 277 | TX_CHAN(csk->tx_chan) | | ||
| 278 | SMAC_SEL(csk->smac_idx) | | ||
| 279 | ULP_MODE(ULP_MODE_ISCSI) | | ||
| 280 | RCV_BUFSIZ(cxgb4i_rcv_win >> 10); | ||
| 281 | |||
| 282 | opt2 = RX_CHANNEL(0) | | ||
| 283 | RSS_QUEUE_VALID | | ||
| 284 | RX_FC_DISABLE | | ||
| 285 | RSS_QUEUE(csk->rss_qid); | ||
| 286 | |||
| 287 | if (t4) { | ||
| 288 | struct cpl_act_open_req6 *req = | ||
| 289 | (struct cpl_act_open_req6 *)skb->head; | ||
| 290 | |||
| 291 | INIT_TP_WR(req, 0); | ||
| 292 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, | ||
| 293 | qid_atid)); | ||
| 294 | req->local_port = csk->saddr6.sin6_port; | ||
| 295 | req->peer_port = csk->daddr6.sin6_port; | ||
| 296 | |||
| 297 | req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); | ||
| 298 | req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + | ||
| 299 | 8); | ||
| 300 | req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); | ||
| 301 | req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + | ||
| 302 | 8); | ||
| 303 | |||
| 304 | req->opt0 = cpu_to_be64(opt0); | ||
| 305 | |||
| 306 | opt2 |= RX_FC_VALID; | ||
| 307 | req->opt2 = cpu_to_be32(opt2); | ||
| 308 | |||
| 309 | req->params = cpu_to_be32(cxgb4_select_ntuple( | ||
| 310 | csk->cdev->ports[csk->port_id], | ||
| 311 | csk->l2t)); | ||
| 312 | } else { | ||
| 313 | struct cpl_t5_act_open_req6 *req = | ||
| 314 | (struct cpl_t5_act_open_req6 *)skb->head; | ||
| 315 | |||
| 316 | INIT_TP_WR(req, 0); | ||
| 317 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, | ||
| 318 | qid_atid)); | ||
| 319 | req->local_port = csk->saddr6.sin6_port; | ||
| 320 | req->peer_port = csk->daddr6.sin6_port; | ||
| 321 | req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); | ||
| 322 | req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + | ||
| 323 | 8); | ||
| 324 | req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); | ||
| 325 | req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + | ||
| 326 | 8); | ||
| 327 | req->opt0 = cpu_to_be64(opt0); | ||
| 328 | |||
| 329 | opt2 |= T5_OPT_2_VALID; | ||
| 330 | req->opt2 = cpu_to_be32(opt2); | ||
| 331 | |||
| 332 | req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( | ||
| 333 | csk->cdev->ports[csk->port_id], | ||
| 334 | csk->l2t))); | ||
| 335 | } | ||
| 336 | |||
| 337 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); | ||
| 338 | |||
| 339 | pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", | ||
| 340 | t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, | ||
| 341 | &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), | ||
| 342 | &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), | ||
| 343 | csk->rss_qid); | ||
| 344 | |||
| 251 | cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); | 345 | cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); |
| 252 | } | 346 | } |
| 253 | 347 | ||
| @@ -586,9 +680,11 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 586 | goto rel_skb; | 680 | goto rel_skb; |
| 587 | } | 681 | } |
| 588 | 682 | ||
| 589 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 683 | pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", |
| 590 | "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", | 684 | (&csk->saddr), (&csk->daddr), |
| 591 | csk, csk->state, csk->flags, tid, atid, rcv_isn); | 685 | atid, tid, csk, csk->state, csk->flags, rcv_isn); |
| 686 | |||
| 687 | module_put(THIS_MODULE); | ||
| 592 | 688 | ||
| 593 | cxgbi_sock_get(csk); | 689 | cxgbi_sock_get(csk); |
| 594 | csk->tid = tid; | 690 | csk->tid = tid; |
| @@ -663,6 +759,9 @@ static void csk_act_open_retry_timer(unsigned long data) | |||
| 663 | struct sk_buff *skb; | 759 | struct sk_buff *skb; |
| 664 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | 760 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; |
| 665 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); | 761 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); |
| 762 | void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, | ||
| 763 | struct l2t_entry *); | ||
| 764 | int t4 = is_t4(lldi->adapter_type), size, size6; | ||
| 666 | 765 | ||
| 667 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 766 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 668 | "csk 0x%p,%u,0x%lx,%u.\n", | 767 | "csk 0x%p,%u,0x%lx,%u.\n", |
| @@ -670,20 +769,35 @@ static void csk_act_open_retry_timer(unsigned long data) | |||
| 670 | 769 | ||
| 671 | cxgbi_sock_get(csk); | 770 | cxgbi_sock_get(csk); |
| 672 | spin_lock_bh(&csk->lock); | 771 | spin_lock_bh(&csk->lock); |
| 673 | skb = alloc_wr(is_t4(lldi->adapter_type) ? | 772 | |
| 674 | sizeof(struct cpl_act_open_req) : | 773 | if (t4) { |
| 675 | sizeof(struct cpl_t5_act_open_req), | 774 | size = sizeof(struct cpl_act_open_req); |
| 676 | 0, GFP_ATOMIC); | 775 | size6 = sizeof(struct cpl_act_open_req6); |
| 776 | } else { | ||
| 777 | size = sizeof(struct cpl_t5_act_open_req); | ||
| 778 | size6 = sizeof(struct cpl_t5_act_open_req6); | ||
| 779 | } | ||
| 780 | |||
| 781 | if (csk->csk_family == AF_INET) { | ||
| 782 | send_act_open_func = send_act_open_req; | ||
| 783 | skb = alloc_wr(size, 0, GFP_ATOMIC); | ||
| 784 | } else { | ||
| 785 | send_act_open_func = send_act_open_req6; | ||
| 786 | skb = alloc_wr(size6, 0, GFP_ATOMIC); | ||
| 787 | } | ||
| 788 | |||
| 677 | if (!skb) | 789 | if (!skb) |
| 678 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | 790 | cxgbi_sock_fail_act_open(csk, -ENOMEM); |
| 679 | else { | 791 | else { |
| 680 | skb->sk = (struct sock *)csk; | 792 | skb->sk = (struct sock *)csk; |
| 681 | t4_set_arp_err_handler(skb, csk, | 793 | t4_set_arp_err_handler(skb, csk, |
| 682 | cxgbi_sock_act_open_req_arp_failure); | 794 | cxgbi_sock_act_open_req_arp_failure); |
| 683 | send_act_open_req(csk, skb, csk->l2t); | 795 | send_act_open_func(csk, skb, csk->l2t); |
| 684 | } | 796 | } |
| 797 | |||
| 685 | spin_unlock_bh(&csk->lock); | 798 | spin_unlock_bh(&csk->lock); |
| 686 | cxgbi_sock_put(csk); | 799 | cxgbi_sock_put(csk); |
| 800 | |||
| 687 | } | 801 | } |
| 688 | 802 | ||
| 689 | static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | 803 | static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) |
| @@ -703,10 +817,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 703 | goto rel_skb; | 817 | goto rel_skb; |
| 704 | } | 818 | } |
| 705 | 819 | ||
| 706 | pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", | 820 | pr_info_ipaddr("tid %u/%u, status %u.\n" |
| 707 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), | 821 | "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), |
| 708 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), | 822 | atid, tid, status, csk, csk->state, csk->flags); |
| 709 | atid, tid, status, csk, csk->state, csk->flags); | ||
| 710 | 823 | ||
| 711 | if (status == CPL_ERR_RTX_NEG_ADVICE) | 824 | if (status == CPL_ERR_RTX_NEG_ADVICE) |
| 712 | goto rel_skb; | 825 | goto rel_skb; |
| @@ -746,9 +859,9 @@ static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 746 | pr_err("can't find connection for tid %u.\n", tid); | 859 | pr_err("can't find connection for tid %u.\n", tid); |
| 747 | goto rel_skb; | 860 | goto rel_skb; |
| 748 | } | 861 | } |
| 749 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 862 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", |
| 750 | "csk 0x%p,%u,0x%lx,%u.\n", | 863 | (&csk->saddr), (&csk->daddr), |
| 751 | csk, csk->state, csk->flags, csk->tid); | 864 | csk, csk->state, csk->flags, csk->tid); |
| 752 | cxgbi_sock_rcv_peer_close(csk); | 865 | cxgbi_sock_rcv_peer_close(csk); |
| 753 | rel_skb: | 866 | rel_skb: |
| 754 | __kfree_skb(skb); | 867 | __kfree_skb(skb); |
| @@ -767,9 +880,9 @@ static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 767 | pr_err("can't find connection for tid %u.\n", tid); | 880 | pr_err("can't find connection for tid %u.\n", tid); |
| 768 | goto rel_skb; | 881 | goto rel_skb; |
| 769 | } | 882 | } |
| 770 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 883 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", |
| 771 | "csk 0x%p,%u,0x%lx,%u.\n", | 884 | (&csk->saddr), (&csk->daddr), |
| 772 | csk, csk->state, csk->flags, csk->tid); | 885 | csk, csk->state, csk->flags, csk->tid); |
| 773 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); | 886 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); |
| 774 | rel_skb: | 887 | rel_skb: |
| 775 | __kfree_skb(skb); | 888 | __kfree_skb(skb); |
| @@ -808,9 +921,9 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 808 | goto rel_skb; | 921 | goto rel_skb; |
| 809 | } | 922 | } |
| 810 | 923 | ||
| 811 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 924 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", |
| 812 | "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n", | 925 | (&csk->saddr), (&csk->daddr), |
| 813 | csk, csk->state, csk->flags, csk->tid, req->status); | 926 | csk, csk->state, csk->flags, csk->tid, req->status); |
| 814 | 927 | ||
| 815 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | 928 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || |
| 816 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) | 929 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) |
| @@ -851,10 +964,10 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) | |||
| 851 | if (!csk) | 964 | if (!csk) |
| 852 | goto rel_skb; | 965 | goto rel_skb; |
| 853 | 966 | ||
| 854 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 967 | if (csk) |
| 855 | "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", | 968 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", |
| 856 | rpl->status, csk, csk ? csk->state : 0, | 969 | (&csk->saddr), (&csk->daddr), csk, |
| 857 | csk ? csk->flags : 0UL); | 970 | csk->state, csk->flags, csk->tid, rpl->status); |
| 858 | 971 | ||
| 859 | if (rpl->status == CPL_ERR_ABORT_FAILED) | 972 | if (rpl->status == CPL_ERR_ABORT_FAILED) |
| 860 | goto rel_skb; | 973 | goto rel_skb; |
| @@ -1163,15 +1276,35 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
| 1163 | struct cxgbi_device *cdev = csk->cdev; | 1276 | struct cxgbi_device *cdev = csk->cdev; |
| 1164 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); | 1277 | struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); |
| 1165 | struct net_device *ndev = cdev->ports[csk->port_id]; | 1278 | struct net_device *ndev = cdev->ports[csk->port_id]; |
| 1166 | struct port_info *pi = netdev_priv(ndev); | ||
| 1167 | struct sk_buff *skb = NULL; | 1279 | struct sk_buff *skb = NULL; |
| 1168 | struct neighbour *n; | 1280 | struct neighbour *n = NULL; |
| 1281 | void *daddr; | ||
| 1169 | unsigned int step; | 1282 | unsigned int step; |
| 1283 | unsigned int size, size6; | ||
| 1284 | int t4 = is_t4(lldi->adapter_type); | ||
| 1170 | 1285 | ||
| 1171 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 1286 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
| 1172 | "csk 0x%p,%u,0x%lx,%u.\n", | 1287 | "csk 0x%p,%u,0x%lx,%u.\n", |
| 1173 | csk, csk->state, csk->flags, csk->tid); | 1288 | csk, csk->state, csk->flags, csk->tid); |
| 1174 | 1289 | ||
| 1290 | if (csk->csk_family == AF_INET) | ||
| 1291 | daddr = &csk->daddr.sin_addr.s_addr; | ||
| 1292 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1293 | else if (csk->csk_family == AF_INET6) | ||
| 1294 | daddr = &csk->daddr6.sin6_addr; | ||
| 1295 | #endif | ||
| 1296 | else { | ||
| 1297 | pr_err("address family 0x%x not supported\n", csk->csk_family); | ||
| 1298 | goto rel_resource; | ||
| 1299 | } | ||
| 1300 | |||
| 1301 | n = dst_neigh_lookup(csk->dst, daddr); | ||
| 1302 | |||
| 1303 | if (!n) { | ||
| 1304 | pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); | ||
| 1305 | goto rel_resource; | ||
| 1306 | } | ||
| 1307 | |||
| 1175 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); | 1308 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); |
| 1176 | if (csk->atid < 0) { | 1309 | if (csk->atid < 0) { |
| 1177 | pr_err("%s, NO atid available.\n", ndev->name); | 1310 | pr_err("%s, NO atid available.\n", ndev->name); |
| @@ -1192,10 +1325,19 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
| 1192 | } | 1325 | } |
| 1193 | cxgbi_sock_get(csk); | 1326 | cxgbi_sock_get(csk); |
| 1194 | 1327 | ||
| 1195 | skb = alloc_wr(is_t4(lldi->adapter_type) ? | 1328 | if (t4) { |
| 1196 | sizeof(struct cpl_act_open_req) : | 1329 | size = sizeof(struct cpl_act_open_req); |
| 1197 | sizeof(struct cpl_t5_act_open_req), | 1330 | size6 = sizeof(struct cpl_act_open_req6); |
| 1198 | 0, GFP_ATOMIC); | 1331 | } else { |
| 1332 | size = sizeof(struct cpl_t5_act_open_req); | ||
| 1333 | size6 = sizeof(struct cpl_t5_act_open_req6); | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | if (csk->csk_family == AF_INET) | ||
| 1337 | skb = alloc_wr(size, 0, GFP_NOIO); | ||
| 1338 | else | ||
| 1339 | skb = alloc_wr(size6, 0, GFP_NOIO); | ||
| 1340 | |||
| 1199 | if (!skb) | 1341 | if (!skb) |
| 1200 | goto rel_resource; | 1342 | goto rel_resource; |
| 1201 | skb->sk = (struct sock *)csk; | 1343 | skb->sk = (struct sock *)csk; |
| @@ -1211,19 +1353,27 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
| 1211 | csk->txq_idx = cxgb4_port_idx(ndev) * step; | 1353 | csk->txq_idx = cxgb4_port_idx(ndev) * step; |
| 1212 | step = lldi->nrxq / lldi->nchan; | 1354 | step = lldi->nrxq / lldi->nchan; |
| 1213 | csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; | 1355 | csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; |
| 1214 | csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; | 1356 | csk->wr_cred = lldi->wr_cred - |
| 1357 | DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); | ||
| 1358 | csk->wr_max_cred = csk->wr_cred; | ||
| 1215 | csk->wr_una_cred = 0; | 1359 | csk->wr_una_cred = 0; |
| 1216 | cxgbi_sock_reset_wr_list(csk); | 1360 | cxgbi_sock_reset_wr_list(csk); |
| 1217 | csk->err = 0; | 1361 | csk->err = 0; |
| 1218 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | ||
| 1219 | "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n", | ||
| 1220 | csk, pi->port_id, ndev->name, csk->tx_chan, | ||
| 1221 | csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, | ||
| 1222 | csk->smac_idx); | ||
| 1223 | 1362 | ||
| 1363 | pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", | ||
| 1364 | (&csk->saddr), (&csk->daddr), csk, csk->state, | ||
| 1365 | csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, | ||
| 1366 | csk->mtu, csk->mss_idx, csk->smac_idx); | ||
| 1367 | |||
| 1368 | /* must wait for either a act_open_rpl or act_open_establish */ | ||
| 1369 | try_module_get(THIS_MODULE); | ||
| 1224 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); | 1370 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); |
| 1225 | send_act_open_req(csk, skb, csk->l2t); | 1371 | if (csk->csk_family == AF_INET) |
| 1372 | send_act_open_req(csk, skb, csk->l2t); | ||
| 1373 | else | ||
| 1374 | send_act_open_req6(csk, skb, csk->l2t); | ||
| 1226 | neigh_release(n); | 1375 | neigh_release(n); |
| 1376 | |||
| 1227 | return 0; | 1377 | return 0; |
| 1228 | 1378 | ||
| 1229 | rel_resource: | 1379 | rel_resource: |
| @@ -1234,8 +1384,6 @@ rel_resource: | |||
| 1234 | return -EINVAL; | 1384 | return -EINVAL; |
| 1235 | } | 1385 | } |
| 1236 | 1386 | ||
| 1237 | #define CPL_ISCSI_DATA 0xB2 | ||
| 1238 | #define CPL_RX_ISCSI_DDP 0x49 | ||
| 1239 | cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { | 1387 | cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { |
| 1240 | [CPL_ACT_ESTABLISH] = do_act_establish, | 1388 | [CPL_ACT_ESTABLISH] = do_act_establish, |
| 1241 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | 1389 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, |
| @@ -1487,6 +1635,129 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) | |||
| 1487 | return 0; | 1635 | return 0; |
| 1488 | } | 1636 | } |
| 1489 | 1637 | ||
| 1638 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1639 | static int cxgbi_inet6addr_handler(struct notifier_block *this, | ||
| 1640 | unsigned long event, void *data) | ||
| 1641 | { | ||
| 1642 | struct inet6_ifaddr *ifa = data; | ||
| 1643 | struct net_device *event_dev = ifa->idev->dev; | ||
| 1644 | struct cxgbi_device *cdev; | ||
| 1645 | int ret = NOTIFY_DONE; | ||
| 1646 | |||
| 1647 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) | ||
| 1648 | event_dev = vlan_dev_real_dev(event_dev); | ||
| 1649 | |||
| 1650 | cdev = cxgbi_device_find_by_netdev(event_dev, NULL); | ||
| 1651 | |||
| 1652 | if (!cdev) | ||
| 1653 | return ret; | ||
| 1654 | |||
| 1655 | switch (event) { | ||
| 1656 | case NETDEV_UP: | ||
| 1657 | ret = cxgb4_clip_get(event_dev, | ||
| 1658 | (const struct in6_addr *) | ||
| 1659 | ((ifa)->addr.s6_addr)); | ||
| 1660 | if (ret < 0) | ||
| 1661 | return ret; | ||
| 1662 | |||
| 1663 | ret = NOTIFY_OK; | ||
| 1664 | break; | ||
| 1665 | |||
| 1666 | case NETDEV_DOWN: | ||
| 1667 | cxgb4_clip_release(event_dev, | ||
| 1668 | (const struct in6_addr *) | ||
| 1669 | ((ifa)->addr.s6_addr)); | ||
| 1670 | ret = NOTIFY_OK; | ||
| 1671 | break; | ||
| 1672 | |||
| 1673 | default: | ||
| 1674 | break; | ||
| 1675 | } | ||
| 1676 | |||
| 1677 | return ret; | ||
| 1678 | } | ||
| 1679 | |||
| 1680 | static struct notifier_block cxgbi_inet6addr_notifier = { | ||
| 1681 | .notifier_call = cxgbi_inet6addr_handler | ||
| 1682 | }; | ||
| 1683 | |||
| 1684 | /* Retrieve IPv6 addresses from a root device (bond, vlan) associated with | ||
| 1685 | * a physical device. | ||
| 1686 | * The physical device reference is needed to send the actual CLIP command. | ||
| 1687 | */ | ||
| 1688 | static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) | ||
| 1689 | { | ||
| 1690 | struct inet6_dev *idev = NULL; | ||
| 1691 | struct inet6_ifaddr *ifa; | ||
| 1692 | int ret = 0; | ||
| 1693 | |||
| 1694 | idev = __in6_dev_get(root_dev); | ||
| 1695 | if (!idev) | ||
| 1696 | return ret; | ||
| 1697 | |||
| 1698 | read_lock_bh(&idev->lock); | ||
| 1699 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | ||
| 1700 | pr_info("updating the clip for addr %pI6\n", | ||
| 1701 | ifa->addr.s6_addr); | ||
| 1702 | ret = cxgb4_clip_get(dev, (const struct in6_addr *) | ||
| 1703 | ifa->addr.s6_addr); | ||
| 1704 | if (ret < 0) | ||
| 1705 | break; | ||
| 1706 | } | ||
| 1707 | |||
| 1708 | read_unlock_bh(&idev->lock); | ||
| 1709 | return ret; | ||
| 1710 | } | ||
| 1711 | |||
| 1712 | static int update_root_dev_clip(struct net_device *dev) | ||
| 1713 | { | ||
| 1714 | struct net_device *root_dev = NULL; | ||
| 1715 | int i, ret = 0; | ||
| 1716 | |||
| 1717 | /* First populate the real net device's IPv6 address */ | ||
| 1718 | ret = update_dev_clip(dev, dev); | ||
| 1719 | if (ret) | ||
| 1720 | return ret; | ||
| 1721 | |||
| 1722 | /* Parse all bond and vlan devices layered on top of the physical dev */ | ||
| 1723 | root_dev = netdev_master_upper_dev_get(dev); | ||
| 1724 | if (root_dev) { | ||
| 1725 | ret = update_dev_clip(root_dev, dev); | ||
| 1726 | if (ret) | ||
| 1727 | return ret; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | for (i = 0; i < VLAN_N_VID; i++) { | ||
| 1731 | root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); | ||
| 1732 | if (!root_dev) | ||
| 1733 | continue; | ||
| 1734 | |||
| 1735 | ret = update_dev_clip(root_dev, dev); | ||
| 1736 | if (ret) | ||
| 1737 | break; | ||
| 1738 | } | ||
| 1739 | return ret; | ||
| 1740 | } | ||
| 1741 | |||
| 1742 | static void cxgbi_update_clip(struct cxgbi_device *cdev) | ||
| 1743 | { | ||
| 1744 | int i; | ||
| 1745 | |||
| 1746 | rcu_read_lock(); | ||
| 1747 | |||
| 1748 | for (i = 0; i < cdev->nports; i++) { | ||
| 1749 | struct net_device *dev = cdev->ports[i]; | ||
| 1750 | int ret = 0; | ||
| 1751 | |||
| 1752 | if (dev) | ||
| 1753 | ret = update_root_dev_clip(dev); | ||
| 1754 | if (ret < 0) | ||
| 1755 | break; | ||
| 1756 | } | ||
| 1757 | rcu_read_unlock(); | ||
| 1758 | } | ||
| 1759 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
| 1760 | |||
| 1490 | static void *t4_uld_add(const struct cxgb4_lld_info *lldi) | 1761 | static void *t4_uld_add(const struct cxgb4_lld_info *lldi) |
| 1491 | { | 1762 | { |
| 1492 | struct cxgbi_device *cdev; | 1763 | struct cxgbi_device *cdev; |
| @@ -1605,6 +1876,9 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state) | |||
| 1605 | switch (state) { | 1876 | switch (state) { |
| 1606 | case CXGB4_STATE_UP: | 1877 | case CXGB4_STATE_UP: |
| 1607 | pr_info("cdev 0x%p, UP.\n", cdev); | 1878 | pr_info("cdev 0x%p, UP.\n", cdev); |
| 1879 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1880 | cxgbi_update_clip(cdev); | ||
| 1881 | #endif | ||
| 1608 | /* re-initialize */ | 1882 | /* re-initialize */ |
| 1609 | break; | 1883 | break; |
| 1610 | case CXGB4_STATE_START_RECOVERY: | 1884 | case CXGB4_STATE_START_RECOVERY: |
| @@ -1635,11 +1909,18 @@ static int __init cxgb4i_init_module(void) | |||
| 1635 | if (rc < 0) | 1909 | if (rc < 0) |
| 1636 | return rc; | 1910 | return rc; |
| 1637 | cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); | 1911 | cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); |
| 1912 | |||
| 1913 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1914 | register_inet6addr_notifier(&cxgbi_inet6addr_notifier); | ||
| 1915 | #endif | ||
| 1638 | return 0; | 1916 | return 0; |
| 1639 | } | 1917 | } |
| 1640 | 1918 | ||
| 1641 | static void __exit cxgb4i_exit_module(void) | 1919 | static void __exit cxgb4i_exit_module(void) |
| 1642 | { | 1920 | { |
| 1921 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1922 | unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier); | ||
| 1923 | #endif | ||
| 1643 | cxgb4_unregister_uld(CXGB4_ULD_ISCSI); | 1924 | cxgb4_unregister_uld(CXGB4_ULD_ISCSI); |
| 1644 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); | 1925 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); |
| 1645 | cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); | 1926 | cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index b44c1cff3114..d65df6dc106f 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
| @@ -24,6 +24,10 @@ | |||
| 24 | #include <linux/inet.h> | 24 | #include <linux/inet.h> |
| 25 | #include <net/dst.h> | 25 | #include <net/dst.h> |
| 26 | #include <net/route.h> | 26 | #include <net/route.h> |
| 27 | #include <net/ipv6.h> | ||
| 28 | #include <net/ip6_route.h> | ||
| 29 | #include <net/addrconf.h> | ||
| 30 | |||
| 27 | #include <linux/inetdevice.h> /* ip_dev_find */ | 31 | #include <linux/inetdevice.h> /* ip_dev_find */ |
| 28 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| 29 | #include <net/tcp.h> | 33 | #include <net/tcp.h> |
| @@ -193,8 +197,8 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) | |||
| 193 | } | 197 | } |
| 194 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); | 198 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); |
| 195 | 199 | ||
| 196 | static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, | 200 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, |
| 197 | int *port) | 201 | int *port) |
| 198 | { | 202 | { |
| 199 | struct net_device *vdev = NULL; | 203 | struct net_device *vdev = NULL; |
| 200 | struct cxgbi_device *cdev, *tmp; | 204 | struct cxgbi_device *cdev, *tmp; |
| @@ -224,6 +228,40 @@ static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, | |||
| 224 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); | 228 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); |
| 225 | return NULL; | 229 | return NULL; |
| 226 | } | 230 | } |
| 231 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); | ||
| 232 | |||
| 233 | static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, | ||
| 234 | int *port) | ||
| 235 | { | ||
| 236 | struct net_device *vdev = NULL; | ||
| 237 | struct cxgbi_device *cdev, *tmp; | ||
| 238 | int i; | ||
| 239 | |||
| 240 | if (ndev->priv_flags & IFF_802_1Q_VLAN) { | ||
| 241 | vdev = ndev; | ||
| 242 | ndev = vlan_dev_real_dev(ndev); | ||
| 243 | pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); | ||
| 244 | } | ||
| 245 | |||
| 246 | mutex_lock(&cdev_mutex); | ||
| 247 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | ||
| 248 | for (i = 0; i < cdev->nports; i++) { | ||
| 249 | if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, | ||
| 250 | MAX_ADDR_LEN)) { | ||
| 251 | cdev->hbas[i]->vdev = vdev; | ||
| 252 | mutex_unlock(&cdev_mutex); | ||
| 253 | if (port) | ||
| 254 | *port = i; | ||
| 255 | return cdev; | ||
| 256 | } | ||
| 257 | } | ||
| 258 | } | ||
| 259 | mutex_unlock(&cdev_mutex); | ||
| 260 | log_debug(1 << CXGBI_DBG_DEV, | ||
| 261 | "ndev 0x%p, %s, NO match mac found.\n", | ||
| 262 | ndev, ndev->name); | ||
| 263 | return NULL; | ||
| 264 | } | ||
| 227 | 265 | ||
| 228 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) | 266 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) |
| 229 | { | 267 | { |
| @@ -245,7 +283,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev) | |||
| 245 | } | 283 | } |
| 246 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); | 284 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); |
| 247 | 285 | ||
| 248 | int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, | 286 | int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, |
| 249 | unsigned int max_id, struct scsi_host_template *sht, | 287 | unsigned int max_id, struct scsi_host_template *sht, |
| 250 | struct scsi_transport_template *stt) | 288 | struct scsi_transport_template *stt) |
| 251 | { | 289 | { |
| @@ -320,6 +358,7 @@ static int sock_get_port(struct cxgbi_sock *csk) | |||
| 320 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 358 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
| 321 | unsigned int start; | 359 | unsigned int start; |
| 322 | int idx; | 360 | int idx; |
| 361 | __be16 *port; | ||
| 323 | 362 | ||
| 324 | if (!pmap->max_connect) { | 363 | if (!pmap->max_connect) { |
| 325 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n", | 364 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n", |
| @@ -327,9 +366,14 @@ static int sock_get_port(struct cxgbi_sock *csk) | |||
| 327 | return -EADDRNOTAVAIL; | 366 | return -EADDRNOTAVAIL; |
| 328 | } | 367 | } |
| 329 | 368 | ||
| 330 | if (csk->saddr.sin_port) { | 369 | if (csk->csk_family == AF_INET) |
| 370 | port = &csk->saddr.sin_port; | ||
| 371 | else /* ipv6 */ | ||
| 372 | port = &csk->saddr6.sin6_port; | ||
| 373 | |||
| 374 | if (*port) { | ||
| 331 | pr_err("source port NON-ZERO %u.\n", | 375 | pr_err("source port NON-ZERO %u.\n", |
| 332 | ntohs(csk->saddr.sin_port)); | 376 | ntohs(*port)); |
| 333 | return -EADDRINUSE; | 377 | return -EADDRINUSE; |
| 334 | } | 378 | } |
| 335 | 379 | ||
| @@ -347,8 +391,7 @@ static int sock_get_port(struct cxgbi_sock *csk) | |||
| 347 | idx = 0; | 391 | idx = 0; |
| 348 | if (!pmap->port_csk[idx]) { | 392 | if (!pmap->port_csk[idx]) { |
| 349 | pmap->used++; | 393 | pmap->used++; |
| 350 | csk->saddr.sin_port = | 394 | *port = htons(pmap->sport_base + idx); |
| 351 | htons(pmap->sport_base + idx); | ||
| 352 | pmap->next = idx; | 395 | pmap->next = idx; |
| 353 | pmap->port_csk[idx] = csk; | 396 | pmap->port_csk[idx] = csk; |
| 354 | spin_unlock_bh(&pmap->lock); | 397 | spin_unlock_bh(&pmap->lock); |
| @@ -374,16 +417,22 @@ static void sock_put_port(struct cxgbi_sock *csk) | |||
| 374 | { | 417 | { |
| 375 | struct cxgbi_device *cdev = csk->cdev; | 418 | struct cxgbi_device *cdev = csk->cdev; |
| 376 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 419 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
| 420 | __be16 *port; | ||
| 377 | 421 | ||
| 378 | if (csk->saddr.sin_port) { | 422 | if (csk->csk_family == AF_INET) |
| 379 | int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; | 423 | port = &csk->saddr.sin_port; |
| 424 | else /* ipv6 */ | ||
| 425 | port = &csk->saddr6.sin6_port; | ||
| 380 | 426 | ||
| 381 | csk->saddr.sin_port = 0; | 427 | if (*port) { |
| 428 | int idx = ntohs(*port) - pmap->sport_base; | ||
| 429 | |||
| 430 | *port = 0; | ||
| 382 | if (idx < 0 || idx >= pmap->max_connect) { | 431 | if (idx < 0 || idx >= pmap->max_connect) { |
| 383 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", | 432 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", |
| 384 | cdev, csk->port_id, | 433 | cdev, csk->port_id, |
| 385 | cdev->ports[csk->port_id]->name, | 434 | cdev->ports[csk->port_id]->name, |
| 386 | ntohs(csk->saddr.sin_port)); | 435 | ntohs(*port)); |
| 387 | return; | 436 | return; |
| 388 | } | 437 | } |
| 389 | 438 | ||
| @@ -479,17 +528,11 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) | |||
| 479 | int port = 0xFFFF; | 528 | int port = 0xFFFF; |
| 480 | int err = 0; | 529 | int err = 0; |
| 481 | 530 | ||
| 482 | if (daddr->sin_family != AF_INET) { | ||
| 483 | pr_info("address family 0x%x NOT supported.\n", | ||
| 484 | daddr->sin_family); | ||
| 485 | err = -EAFNOSUPPORT; | ||
| 486 | goto err_out; | ||
| 487 | } | ||
| 488 | |||
| 489 | rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); | 531 | rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); |
| 490 | if (!rt) { | 532 | if (!rt) { |
| 491 | pr_info("no route to ipv4 0x%x, port %u.\n", | 533 | pr_info("no route to ipv4 0x%x, port %u.\n", |
| 492 | daddr->sin_addr.s_addr, daddr->sin_port); | 534 | be32_to_cpu(daddr->sin_addr.s_addr), |
| 535 | be16_to_cpu(daddr->sin_port)); | ||
| 493 | err = -ENETUNREACH; | 536 | err = -ENETUNREACH; |
| 494 | goto err_out; | 537 | goto err_out; |
| 495 | } | 538 | } |
| @@ -537,9 +580,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) | |||
| 537 | csk->port_id = port; | 580 | csk->port_id = port; |
| 538 | csk->mtu = mtu; | 581 | csk->mtu = mtu; |
| 539 | csk->dst = dst; | 582 | csk->dst = dst; |
| 583 | |||
| 584 | csk->csk_family = AF_INET; | ||
| 540 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; | 585 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; |
| 541 | csk->daddr.sin_port = daddr->sin_port; | 586 | csk->daddr.sin_port = daddr->sin_port; |
| 542 | csk->daddr.sin_family = daddr->sin_family; | 587 | csk->daddr.sin_family = daddr->sin_family; |
| 588 | csk->saddr.sin_family = daddr->sin_family; | ||
| 543 | csk->saddr.sin_addr.s_addr = fl4.saddr; | 589 | csk->saddr.sin_addr.s_addr = fl4.saddr; |
| 544 | neigh_release(n); | 590 | neigh_release(n); |
| 545 | 591 | ||
| @@ -556,6 +602,123 @@ err_out: | |||
| 556 | return ERR_PTR(err); | 602 | return ERR_PTR(err); |
| 557 | } | 603 | } |
| 558 | 604 | ||
| 605 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 606 | static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, | ||
| 607 | const struct in6_addr *daddr) | ||
| 608 | { | ||
| 609 | struct flowi6 fl; | ||
| 610 | |||
| 611 | if (saddr) | ||
| 612 | memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); | ||
| 613 | if (daddr) | ||
| 614 | memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); | ||
| 615 | return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
| 616 | } | ||
| 617 | |||
| 618 | static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) | ||
| 619 | { | ||
| 620 | struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; | ||
| 621 | struct dst_entry *dst; | ||
| 622 | struct net_device *ndev; | ||
| 623 | struct cxgbi_device *cdev; | ||
| 624 | struct rt6_info *rt = NULL; | ||
| 625 | struct neighbour *n; | ||
| 626 | struct in6_addr pref_saddr; | ||
| 627 | struct cxgbi_sock *csk = NULL; | ||
| 628 | unsigned int mtu = 0; | ||
| 629 | int port = 0xFFFF; | ||
| 630 | int err = 0; | ||
| 631 | |||
| 632 | rt = find_route_ipv6(NULL, &daddr6->sin6_addr); | ||
| 633 | |||
| 634 | if (!rt) { | ||
| 635 | pr_info("no route to ipv6 %pI6 port %u\n", | ||
| 636 | daddr6->sin6_addr.s6_addr, | ||
| 637 | be16_to_cpu(daddr6->sin6_port)); | ||
| 638 | err = -ENETUNREACH; | ||
| 639 | goto err_out; | ||
| 640 | } | ||
| 641 | |||
| 642 | dst = &rt->dst; | ||
| 643 | |||
| 644 | n = dst_neigh_lookup(dst, &daddr6->sin6_addr); | ||
| 645 | |||
| 646 | if (!n) { | ||
| 647 | pr_info("%pI6, port %u, dst no neighbour.\n", | ||
| 648 | daddr6->sin6_addr.s6_addr, | ||
| 649 | be16_to_cpu(daddr6->sin6_port)); | ||
| 650 | err = -ENETUNREACH; | ||
| 651 | goto rel_rt; | ||
| 652 | } | ||
| 653 | ndev = n->dev; | ||
| 654 | |||
| 655 | if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { | ||
| 656 | pr_info("multi-cast route %pI6 port %u, dev %s.\n", | ||
| 657 | daddr6->sin6_addr.s6_addr, | ||
| 658 | ntohs(daddr6->sin6_port), ndev->name); | ||
| 659 | err = -ENETUNREACH; | ||
| 660 | goto rel_rt; | ||
| 661 | } | ||
| 662 | |||
| 663 | cdev = cxgbi_device_find_by_netdev(ndev, &port); | ||
| 664 | if (!cdev) | ||
| 665 | cdev = cxgbi_device_find_by_mac(ndev, &port); | ||
| 666 | if (!cdev) { | ||
| 667 | pr_info("dst %pI6 %s, NOT cxgbi device.\n", | ||
| 668 | daddr6->sin6_addr.s6_addr, ndev->name); | ||
| 669 | err = -ENETUNREACH; | ||
| 670 | goto rel_rt; | ||
| 671 | } | ||
| 672 | log_debug(1 << CXGBI_DBG_SOCK, | ||
| 673 | "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", | ||
| 674 | daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, | ||
| 675 | ndev->name, cdev); | ||
| 676 | |||
| 677 | csk = cxgbi_sock_create(cdev); | ||
| 678 | if (!csk) { | ||
| 679 | err = -ENOMEM; | ||
| 680 | goto rel_rt; | ||
| 681 | } | ||
| 682 | csk->cdev = cdev; | ||
| 683 | csk->port_id = port; | ||
| 684 | csk->mtu = mtu; | ||
| 685 | csk->dst = dst; | ||
| 686 | |||
| 687 | if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { | ||
| 688 | struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); | ||
| 689 | |||
| 690 | err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, | ||
| 691 | &daddr6->sin6_addr, 0, &pref_saddr); | ||
| 692 | if (err) { | ||
| 693 | pr_info("failed to get source address to reach %pI6\n", | ||
| 694 | &daddr6->sin6_addr); | ||
| 695 | goto rel_rt; | ||
| 696 | } | ||
| 697 | } else { | ||
| 698 | pref_saddr = rt->rt6i_prefsrc.addr; | ||
| 699 | } | ||
| 700 | |||
| 701 | csk->csk_family = AF_INET6; | ||
| 702 | csk->daddr6.sin6_addr = daddr6->sin6_addr; | ||
| 703 | csk->daddr6.sin6_port = daddr6->sin6_port; | ||
| 704 | csk->daddr6.sin6_family = daddr6->sin6_family; | ||
| 705 | csk->saddr6.sin6_addr = pref_saddr; | ||
| 706 | |||
| 707 | neigh_release(n); | ||
| 708 | return csk; | ||
| 709 | |||
| 710 | rel_rt: | ||
| 711 | if (n) | ||
| 712 | neigh_release(n); | ||
| 713 | |||
| 714 | ip6_rt_put(rt); | ||
| 715 | if (csk) | ||
| 716 | cxgbi_sock_closed(csk); | ||
| 717 | err_out: | ||
| 718 | return ERR_PTR(err); | ||
| 719 | } | ||
| 720 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
| 721 | |||
| 559 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, | 722 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, |
| 560 | unsigned int opt) | 723 | unsigned int opt) |
| 561 | { | 724 | { |
| @@ -2194,6 +2357,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | |||
| 2194 | } | 2357 | } |
| 2195 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); | 2358 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); |
| 2196 | 2359 | ||
| 2360 | static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) | ||
| 2361 | { | ||
| 2362 | int len; | ||
| 2363 | |||
| 2364 | cxgbi_sock_get(csk); | ||
| 2365 | len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); | ||
| 2366 | cxgbi_sock_put(csk); | ||
| 2367 | |||
| 2368 | return len; | ||
| 2369 | } | ||
| 2370 | |||
| 2371 | static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) | ||
| 2372 | { | ||
| 2373 | int len; | ||
| 2374 | |||
| 2375 | cxgbi_sock_get(csk); | ||
| 2376 | if (csk->csk_family == AF_INET) | ||
| 2377 | len = sprintf(buf, "%pI4", | ||
| 2378 | &csk->daddr.sin_addr.s_addr); | ||
| 2379 | else | ||
| 2380 | len = sprintf(buf, "%pI6", | ||
| 2381 | &csk->daddr6.sin6_addr); | ||
| 2382 | |||
| 2383 | cxgbi_sock_put(csk); | ||
| 2384 | |||
| 2385 | return len; | ||
| 2386 | } | ||
| 2387 | |||
| 2197 | int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, | 2388 | int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, |
| 2198 | char *buf) | 2389 | char *buf) |
| 2199 | { | 2390 | { |
| @@ -2447,7 +2638,19 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, | |||
| 2447 | } | 2638 | } |
| 2448 | } | 2639 | } |
| 2449 | 2640 | ||
| 2450 | csk = cxgbi_check_route(dst_addr); | 2641 | if (dst_addr->sa_family == AF_INET) { |
| 2642 | csk = cxgbi_check_route(dst_addr); | ||
| 2643 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 2644 | } else if (dst_addr->sa_family == AF_INET6) { | ||
| 2645 | csk = cxgbi_check_route6(dst_addr); | ||
| 2646 | #endif | ||
| 2647 | } else { | ||
| 2648 | pr_info("address family 0x%x NOT supported.\n", | ||
| 2649 | dst_addr->sa_family); | ||
| 2650 | err = -EAFNOSUPPORT; | ||
| 2651 | return (struct iscsi_endpoint *)ERR_PTR(err); | ||
| 2652 | } | ||
| 2653 | |||
| 2451 | if (IS_ERR(csk)) | 2654 | if (IS_ERR(csk)) |
| 2452 | return (struct iscsi_endpoint *)csk; | 2655 | return (struct iscsi_endpoint *)csk; |
| 2453 | cxgbi_sock_get(csk); | 2656 | cxgbi_sock_get(csk); |
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 8135f04671af..b3e6e7541cc5 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
| @@ -44,6 +44,15 @@ enum cxgbi_dbg_flag { | |||
| 44 | pr_info(fmt, ##__VA_ARGS__); \ | 44 | pr_info(fmt, ##__VA_ARGS__); \ |
| 45 | } while (0) | 45 | } while (0) |
| 46 | 46 | ||
| 47 | #define pr_info_ipaddr(fmt_trail, \ | ||
| 48 | addr1, addr2, args_trail...) \ | ||
| 49 | do { \ | ||
| 50 | if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \ | ||
| 51 | break; \ | ||
| 52 | pr_info("%pISpc - %pISpc, " fmt_trail, \ | ||
| 53 | addr1, addr2, args_trail); \ | ||
| 54 | } while (0) | ||
| 55 | |||
| 47 | /* max. connections per adapter */ | 56 | /* max. connections per adapter */ |
| 48 | #define CXGBI_MAX_CONN 16384 | 57 | #define CXGBI_MAX_CONN 16384 |
| 49 | 58 | ||
| @@ -202,8 +211,15 @@ struct cxgbi_sock { | |||
| 202 | spinlock_t lock; | 211 | spinlock_t lock; |
| 203 | struct kref refcnt; | 212 | struct kref refcnt; |
| 204 | unsigned int state; | 213 | unsigned int state; |
| 205 | struct sockaddr_in saddr; | 214 | unsigned int csk_family; |
| 206 | struct sockaddr_in daddr; | 215 | union { |
| 216 | struct sockaddr_in saddr; | ||
| 217 | struct sockaddr_in6 saddr6; | ||
| 218 | }; | ||
| 219 | union { | ||
| 220 | struct sockaddr_in daddr; | ||
| 221 | struct sockaddr_in6 daddr6; | ||
| 222 | }; | ||
| 207 | struct dst_entry *dst; | 223 | struct dst_entry *dst; |
| 208 | struct sk_buff_head receive_queue; | 224 | struct sk_buff_head receive_queue; |
| 209 | struct sk_buff_head write_queue; | 225 | struct sk_buff_head write_queue; |
| @@ -692,7 +708,8 @@ struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); | |||
| 692 | void cxgbi_device_unregister(struct cxgbi_device *); | 708 | void cxgbi_device_unregister(struct cxgbi_device *); |
| 693 | void cxgbi_device_unregister_all(unsigned int flag); | 709 | void cxgbi_device_unregister_all(unsigned int flag); |
| 694 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); | 710 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); |
| 695 | int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, | 711 | struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); |
| 712 | int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, | ||
| 696 | struct scsi_host_template *, | 713 | struct scsi_host_template *, |
| 697 | struct scsi_transport_template *); | 714 | struct scsi_transport_template *); |
| 698 | void cxgbi_hbas_remove(struct cxgbi_device *); | 715 | void cxgbi_hbas_remove(struct cxgbi_device *); |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 83d9bf6fa6ca..0c6be0a17f53 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
| @@ -519,9 +519,7 @@ static struct ParameterData cfg_data[] = { | |||
| 519 | CFG_PARAM_UNSET, | 519 | CFG_PARAM_UNSET, |
| 520 | 0, | 520 | 0, |
| 521 | 0x2f, | 521 | 0x2f, |
| 522 | #ifdef CONFIG_SCSI_MULTI_LUN | 522 | NAC_SCANLUN | |
| 523 | NAC_SCANLUN | | ||
| 524 | #endif | ||
| 525 | NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 523 | NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET |
| 526 | /*| NAC_ACTIVE_NEG*/, | 524 | /*| NAC_ACTIVE_NEG*/, |
| 527 | NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08 | 525 | NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08 |
| @@ -1089,7 +1087,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s | |||
| 1089 | struct AdapterCtlBlk *acb = | 1087 | struct AdapterCtlBlk *acb = |
| 1090 | (struct AdapterCtlBlk *)cmd->device->host->hostdata; | 1088 | (struct AdapterCtlBlk *)cmd->device->host->hostdata; |
| 1091 | dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", | 1089 | dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", |
| 1092 | cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 1090 | cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]); |
| 1093 | 1091 | ||
| 1094 | /* Assume BAD_TARGET; will be cleared later */ | 1092 | /* Assume BAD_TARGET; will be cleared later */ |
| 1095 | cmd->result = DID_BAD_TARGET << 16; | 1093 | cmd->result = DID_BAD_TARGET << 16; |
| @@ -1104,7 +1102,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s | |||
| 1104 | /* does the specified lun on the specified device exist */ | 1102 | /* does the specified lun on the specified device exist */ |
| 1105 | if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { | 1103 | if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { |
| 1106 | dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", | 1104 | dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", |
| 1107 | cmd->device->id, cmd->device->lun); | 1105 | cmd->device->id, (u8)cmd->device->lun); |
| 1108 | goto complete; | 1106 | goto complete; |
| 1109 | } | 1107 | } |
| 1110 | 1108 | ||
| @@ -1113,7 +1111,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s | |||
| 1113 | if (!dcb) { | 1111 | if (!dcb) { |
| 1114 | /* should never happen */ | 1112 | /* should never happen */ |
| 1115 | dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", | 1113 | dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", |
| 1116 | cmd->device->id, cmd->device->lun); | 1114 | cmd->device->id, (u8)cmd->device->lun); |
| 1117 | goto complete; | 1115 | goto complete; |
| 1118 | } | 1116 | } |
| 1119 | 1117 | ||
| @@ -1209,7 +1207,7 @@ static void dump_register_info(struct AdapterCtlBlk *acb, | |||
| 1209 | "cmnd=0x%02x <%02i-%i>\n", | 1207 | "cmnd=0x%02x <%02i-%i>\n", |
| 1210 | srb, srb->cmd, | 1208 | srb, srb->cmd, |
| 1211 | srb->cmd->cmnd[0], srb->cmd->device->id, | 1209 | srb->cmd->cmnd[0], srb->cmd->device->id, |
| 1212 | srb->cmd->device->lun); | 1210 | (u8)srb->cmd->device->lun); |
| 1213 | printk(" sglist=%p cnt=%i idx=%i len=%zu\n", | 1211 | printk(" sglist=%p cnt=%i idx=%i len=%zu\n", |
| 1214 | srb->segment_x, srb->sg_count, srb->sg_index, | 1212 | srb->segment_x, srb->sg_count, srb->sg_index, |
| 1215 | srb->total_xfer_length); | 1213 | srb->total_xfer_length); |
| @@ -1304,7 +1302,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) | |||
| 1304 | (struct AdapterCtlBlk *)cmd->device->host->hostdata; | 1302 | (struct AdapterCtlBlk *)cmd->device->host->hostdata; |
| 1305 | dprintkl(KERN_INFO, | 1303 | dprintkl(KERN_INFO, |
| 1306 | "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", | 1304 | "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", |
| 1307 | cmd, cmd->device->id, cmd->device->lun, cmd); | 1305 | cmd, cmd->device->id, (u8)cmd->device->lun, cmd); |
| 1308 | 1306 | ||
| 1309 | if (timer_pending(&acb->waiting_timer)) | 1307 | if (timer_pending(&acb->waiting_timer)) |
| 1310 | del_timer(&acb->waiting_timer); | 1308 | del_timer(&acb->waiting_timer); |
| @@ -1371,7 +1369,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) | |||
| 1371 | struct DeviceCtlBlk *dcb; | 1369 | struct DeviceCtlBlk *dcb; |
| 1372 | struct ScsiReqBlk *srb; | 1370 | struct ScsiReqBlk *srb; |
| 1373 | dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", | 1371 | dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", |
| 1374 | cmd, cmd->device->id, cmd->device->lun, cmd); | 1372 | cmd, cmd->device->id, (u8)cmd->device->lun, cmd); |
| 1375 | 1373 | ||
| 1376 | dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); | 1374 | dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); |
| 1377 | if (!dcb) { | 1375 | if (!dcb) { |
| @@ -1607,7 +1605,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, | |||
| 1607 | dprintkl(KERN_WARNING, "start_scsi: (0x%p) " | 1605 | dprintkl(KERN_WARNING, "start_scsi: (0x%p) " |
| 1608 | "Out of tags target=<%02i-%i>)\n", | 1606 | "Out of tags target=<%02i-%i>)\n", |
| 1609 | srb->cmd, srb->cmd->device->id, | 1607 | srb->cmd, srb->cmd->device->id, |
| 1610 | srb->cmd->device->lun); | 1608 | (u8)srb->cmd->device->lun); |
| 1611 | srb->state = SRB_READY; | 1609 | srb->state = SRB_READY; |
| 1612 | DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, | 1610 | DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, |
| 1613 | DO_HWRESELECT); | 1611 | DO_HWRESELECT); |
| @@ -1625,7 +1623,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, | |||
| 1625 | /*polling:*/ | 1623 | /*polling:*/ |
| 1626 | /* Send CDB ..command block ......... */ | 1624 | /* Send CDB ..command block ......... */ |
| 1627 | dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", | 1625 | dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", |
| 1628 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun, | 1626 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, |
| 1629 | srb->cmd->cmnd[0], srb->tag_number); | 1627 | srb->cmd->cmnd[0], srb->tag_number); |
| 1630 | if (srb->flag & AUTO_REQSENSE) { | 1628 | if (srb->flag & AUTO_REQSENSE) { |
| 1631 | DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); | 1629 | DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); |
| @@ -2043,7 +2041,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2043 | u16 scsi_status = *pscsi_status; | 2041 | u16 scsi_status = *pscsi_status; |
| 2044 | u32 d_left_counter = 0; | 2042 | u32 d_left_counter = 0; |
| 2045 | dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", | 2043 | dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", |
| 2046 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2044 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2047 | 2045 | ||
| 2048 | /* | 2046 | /* |
| 2049 | * KG: We need to drain the buffers before we draw any conclusions! | 2047 | * KG: We need to drain the buffers before we draw any conclusions! |
| @@ -2173,7 +2171,7 @@ static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2173 | u16 *pscsi_status) | 2171 | u16 *pscsi_status) |
| 2174 | { | 2172 | { |
| 2175 | dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", | 2173 | dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", |
| 2176 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2174 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2177 | clear_fifo(acb, "data_out_phase1"); | 2175 | clear_fifo(acb, "data_out_phase1"); |
| 2178 | /* do prepare before transfer when data out phase */ | 2176 | /* do prepare before transfer when data out phase */ |
| 2179 | data_io_transfer(acb, srb, XFERDATAOUT); | 2177 | data_io_transfer(acb, srb, XFERDATAOUT); |
| @@ -2185,7 +2183,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2185 | u16 scsi_status = *pscsi_status; | 2183 | u16 scsi_status = *pscsi_status; |
| 2186 | 2184 | ||
| 2187 | dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", | 2185 | dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", |
| 2188 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2186 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2189 | 2187 | ||
| 2190 | /* | 2188 | /* |
| 2191 | * KG: DataIn is much more tricky than DataOut. When the device is finished | 2189 | * KG: DataIn is much more tricky than DataOut. When the device is finished |
| @@ -2396,7 +2394,7 @@ static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2396 | u16 *pscsi_status) | 2394 | u16 *pscsi_status) |
| 2397 | { | 2395 | { |
| 2398 | dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", | 2396 | dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", |
| 2399 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2397 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2400 | data_io_transfer(acb, srb, XFERDATAIN); | 2398 | data_io_transfer(acb, srb, XFERDATAIN); |
| 2401 | } | 2399 | } |
| 2402 | 2400 | ||
| @@ -2408,7 +2406,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, | |||
| 2408 | u8 bval; | 2406 | u8 bval; |
| 2409 | dprintkdbg(DBG_0, | 2407 | dprintkdbg(DBG_0, |
| 2410 | "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", | 2408 | "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", |
| 2411 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun, | 2409 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, |
| 2412 | ((io_dir & DMACMD_DIR) ? 'r' : 'w'), | 2410 | ((io_dir & DMACMD_DIR) ? 'r' : 'w'), |
| 2413 | srb->total_xfer_length, srb->sg_index, srb->sg_count); | 2411 | srb->total_xfer_length, srb->sg_index, srb->sg_count); |
| 2414 | if (srb == acb->tmp_srb) | 2412 | if (srb == acb->tmp_srb) |
| @@ -2581,7 +2579,7 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2581 | u16 *pscsi_status) | 2579 | u16 *pscsi_status) |
| 2582 | { | 2580 | { |
| 2583 | dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", | 2581 | dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", |
| 2584 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2582 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2585 | srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); | 2583 | srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); |
| 2586 | srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ | 2584 | srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ |
| 2587 | srb->state = SRB_COMPLETED; | 2585 | srb->state = SRB_COMPLETED; |
| @@ -2595,7 +2593,7 @@ static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, | |||
| 2595 | u16 *pscsi_status) | 2593 | u16 *pscsi_status) |
| 2596 | { | 2594 | { |
| 2597 | dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", | 2595 | dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", |
| 2598 | srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); | 2596 | srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 2599 | srb->state = SRB_STATUS; | 2597 | srb->state = SRB_STATUS; |
| 2600 | DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ | 2598 | DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ |
| 2601 | DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); | 2599 | DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); |
| @@ -3320,7 +3318,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, | |||
| 3320 | int ckc_only = 1; | 3318 | int ckc_only = 1; |
| 3321 | 3319 | ||
| 3322 | dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, | 3320 | dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, |
| 3323 | srb->cmd->device->id, srb->cmd->device->lun); | 3321 | srb->cmd->device->id, (u8)srb->cmd->device->lun); |
| 3324 | dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", | 3322 | dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", |
| 3325 | srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, | 3323 | srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, |
| 3326 | scsi_sgtalbe(cmd)); | 3324 | scsi_sgtalbe(cmd)); |
| @@ -3500,7 +3498,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, | |||
| 3500 | if (srb->total_xfer_length) | 3498 | if (srb->total_xfer_length) |
| 3501 | dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " | 3499 | dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " |
| 3502 | "cmnd=0x%02x Missed %i bytes\n", | 3500 | "cmnd=0x%02x Missed %i bytes\n", |
| 3503 | cmd, cmd->device->id, cmd->device->lun, | 3501 | cmd, cmd->device->id, (u8)cmd->device->lun, |
| 3504 | cmd->cmnd[0], srb->total_xfer_length); | 3502 | cmd->cmnd[0], srb->total_xfer_length); |
| 3505 | } | 3503 | } |
| 3506 | 3504 | ||
| @@ -3540,7 +3538,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, | |||
| 3540 | dir = p->sc_data_direction; | 3538 | dir = p->sc_data_direction; |
| 3541 | result = MK_RES(0, did_flag, 0, 0); | 3539 | result = MK_RES(0, did_flag, 0, 0); |
| 3542 | printk("G:%p(%02i-%i) ", p, | 3540 | printk("G:%p(%02i-%i) ", p, |
| 3543 | p->device->id, p->device->lun); | 3541 | p->device->id, (u8)p->device->lun); |
| 3544 | srb_going_remove(dcb, srb); | 3542 | srb_going_remove(dcb, srb); |
| 3545 | free_tag(dcb, srb); | 3543 | free_tag(dcb, srb); |
| 3546 | srb_free_insert(acb, srb); | 3544 | srb_free_insert(acb, srb); |
| @@ -3570,7 +3568,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, | |||
| 3570 | 3568 | ||
| 3571 | result = MK_RES(0, did_flag, 0, 0); | 3569 | result = MK_RES(0, did_flag, 0, 0); |
| 3572 | printk("W:%p<%02i-%i>", p, p->device->id, | 3570 | printk("W:%p<%02i-%i>", p, p->device->id, |
| 3573 | p->device->lun); | 3571 | (u8)p->device->lun); |
| 3574 | srb_waiting_remove(dcb, srb); | 3572 | srb_waiting_remove(dcb, srb); |
| 3575 | srb_free_insert(acb, srb); | 3573 | srb_free_insert(acb, srb); |
| 3576 | p->result = result; | 3574 | p->result = result; |
| @@ -3679,7 +3677,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, | |||
| 3679 | { | 3677 | { |
| 3680 | struct scsi_cmnd *cmd = srb->cmd; | 3678 | struct scsi_cmnd *cmd = srb->cmd; |
| 3681 | dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", | 3679 | dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", |
| 3682 | cmd, cmd->device->id, cmd->device->lun); | 3680 | cmd, cmd->device->id, (u8)cmd->device->lun); |
| 3683 | 3681 | ||
| 3684 | srb->flag |= AUTO_REQSENSE; | 3682 | srb->flag |= AUTO_REQSENSE; |
| 3685 | srb->adapter_status = 0; | 3683 | srb->adapter_status = 0; |
| @@ -4434,15 +4432,10 @@ static void adapter_init_scsi_host(struct Scsi_Host *host) | |||
| 4434 | if (host->max_id - 1 == eeprom->scsi_id) | 4432 | if (host->max_id - 1 == eeprom->scsi_id) |
| 4435 | host->max_id--; | 4433 | host->max_id--; |
| 4436 | 4434 | ||
| 4437 | #ifdef CONFIG_SCSI_MULTI_LUN | ||
| 4438 | if (eeprom->channel_cfg & NAC_SCANLUN) | 4435 | if (eeprom->channel_cfg & NAC_SCANLUN) |
| 4439 | host->max_lun = 8; | 4436 | host->max_lun = 8; |
| 4440 | else | 4437 | else |
| 4441 | host->max_lun = 1; | 4438 | host->max_lun = 1; |
| 4442 | #else | ||
| 4443 | host->max_lun = 1; | ||
| 4444 | #endif | ||
| 4445 | |||
| 4446 | } | 4439 | } |
| 4447 | 4440 | ||
| 4448 | 4441 | ||
| @@ -4645,7 +4638,7 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
| 4645 | SPRINTF("irq_level 0x%04x, ", acb->irq_level); | 4638 | SPRINTF("irq_level 0x%04x, ", acb->irq_level); |
| 4646 | SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); | 4639 | SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); |
| 4647 | 4640 | ||
| 4648 | SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun); | 4641 | SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); |
| 4649 | SPRINTF("AdapterID %i\n", host->this_id); | 4642 | SPRINTF("AdapterID %i\n", host->this_id); |
| 4650 | 4643 | ||
| 4651 | SPRINTF("tag_max_num %i", acb->tag_max_num); | 4644 | SPRINTF("tag_max_num %i", acb->tag_max_num); |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index c0ae8fa57a3b..67283ef418ac 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
| @@ -459,7 +459,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd | |||
| 459 | * to the device structure. This should be a TEST_UNIT_READY | 459 | * to the device structure. This should be a TEST_UNIT_READY |
| 460 | * command from scan_scsis_single. | 460 | * command from scan_scsis_single. |
| 461 | */ | 461 | */ |
| 462 | if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) { | 462 | if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) { |
| 463 | // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response | 463 | // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response |
| 464 | // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. | 464 | // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. |
| 465 | cmd->result = (DID_NO_CONNECT << 16); | 465 | cmd->result = (DID_NO_CONNECT << 16); |
| @@ -579,8 +579,8 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
| 579 | seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev); | 579 | seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev); |
| 580 | 580 | ||
| 581 | unit = d->pI2o_dev->lct_data.tid; | 581 | unit = d->pI2o_dev->lct_data.tid; |
| 582 | seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n", | 582 | seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n", |
| 583 | unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun, | 583 | unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun, |
| 584 | scsi_device_online(d->pScsi_dev)? "online":"offline"); | 584 | scsi_device_online(d->pScsi_dev)? "online":"offline"); |
| 585 | d = d->next_lun; | 585 | d = d->next_lun; |
| 586 | } | 586 | } |
| @@ -1162,7 +1162,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba) | |||
| 1162 | } | 1162 | } |
| 1163 | } | 1163 | } |
| 1164 | 1164 | ||
| 1165 | static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) | 1165 | static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun) |
| 1166 | { | 1166 | { |
| 1167 | struct adpt_device* d; | 1167 | struct adpt_device* d; |
| 1168 | 1168 | ||
| @@ -1462,7 +1462,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba) | |||
| 1462 | i2o_lct *lct = pHba->lct; | 1462 | i2o_lct *lct = pHba->lct; |
| 1463 | u8 bus_no = 0; | 1463 | u8 bus_no = 0; |
| 1464 | s16 scsi_id; | 1464 | s16 scsi_id; |
| 1465 | s16 scsi_lun; | 1465 | u64 scsi_lun; |
| 1466 | u32 buf[10]; // larger than 7, or 8 ... | 1466 | u32 buf[10]; // larger than 7, or 8 ... |
| 1467 | struct adpt_device* pDev; | 1467 | struct adpt_device* pDev; |
| 1468 | 1468 | ||
| @@ -1496,7 +1496,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba) | |||
| 1496 | } | 1496 | } |
| 1497 | bus_no = buf[0]>>16; | 1497 | bus_no = buf[0]>>16; |
| 1498 | scsi_id = buf[1]; | 1498 | scsi_id = buf[1]; |
| 1499 | scsi_lun = (buf[2]>>8 )&0xff; | 1499 | scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); |
| 1500 | if(bus_no >= MAX_CHANNEL) { // Something wrong skip it | 1500 | if(bus_no >= MAX_CHANNEL) { // Something wrong skip it |
| 1501 | printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); | 1501 | printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); |
| 1502 | continue; | 1502 | continue; |
| @@ -1571,7 +1571,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba) | |||
| 1571 | if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) { | 1571 | if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) { |
| 1572 | bus_no = buf[0]>>16; | 1572 | bus_no = buf[0]>>16; |
| 1573 | scsi_id = buf[1]; | 1573 | scsi_id = buf[1]; |
| 1574 | scsi_lun = (buf[2]>>8 )&0xff; | 1574 | scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); |
| 1575 | if(bus_no >= MAX_CHANNEL) { // Something wrong skip it | 1575 | if(bus_no >= MAX_CHANNEL) { // Something wrong skip it |
| 1576 | continue; | 1576 | continue; |
| 1577 | } | 1577 | } |
| @@ -2407,8 +2407,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) | |||
| 2407 | case I2O_SCSI_DSC_COMMAND_TIMEOUT: | 2407 | case I2O_SCSI_DSC_COMMAND_TIMEOUT: |
| 2408 | case I2O_SCSI_DSC_NO_ADAPTER: | 2408 | case I2O_SCSI_DSC_NO_ADAPTER: |
| 2409 | case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE: | 2409 | case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE: |
| 2410 | printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n", | 2410 | printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n", |
| 2411 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); | 2411 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); |
| 2412 | cmd->result = (DID_TIME_OUT << 16); | 2412 | cmd->result = (DID_TIME_OUT << 16); |
| 2413 | break; | 2413 | break; |
| 2414 | case I2O_SCSI_DSC_ADAPTER_BUSY: | 2414 | case I2O_SCSI_DSC_ADAPTER_BUSY: |
| @@ -2447,8 +2447,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) | |||
| 2447 | case I2O_SCSI_DSC_QUEUE_FROZEN: | 2447 | case I2O_SCSI_DSC_QUEUE_FROZEN: |
| 2448 | case I2O_SCSI_DSC_REQUEST_INVALID: | 2448 | case I2O_SCSI_DSC_REQUEST_INVALID: |
| 2449 | default: | 2449 | default: |
| 2450 | printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", | 2450 | printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", |
| 2451 | pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, | 2451 | pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, |
| 2452 | hba_status, dev_status, cmd->cmnd[0]); | 2452 | hba_status, dev_status, cmd->cmnd[0]); |
| 2453 | cmd->result = (DID_ERROR << 16); | 2453 | cmd->result = (DID_ERROR << 16); |
| 2454 | break; | 2454 | break; |
| @@ -2464,8 +2464,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) | |||
| 2464 | cmd->sense_buffer[2] == DATA_PROTECT ){ | 2464 | cmd->sense_buffer[2] == DATA_PROTECT ){ |
| 2465 | /* This is to handle an array failed */ | 2465 | /* This is to handle an array failed */ |
| 2466 | cmd->result = (DID_TIME_OUT << 16); | 2466 | cmd->result = (DID_TIME_OUT << 16); |
| 2467 | printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", | 2467 | printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", |
| 2468 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, | 2468 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, |
| 2469 | hba_status, dev_status, cmd->cmnd[0]); | 2469 | hba_status, dev_status, cmd->cmnd[0]); |
| 2470 | 2470 | ||
| 2471 | } | 2471 | } |
| @@ -2476,8 +2476,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) | |||
| 2476 | * for a limitted number of retries. | 2476 | * for a limitted number of retries. |
| 2477 | */ | 2477 | */ |
| 2478 | cmd->result = (DID_TIME_OUT << 16); | 2478 | cmd->result = (DID_TIME_OUT << 16); |
| 2479 | printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n", | 2479 | printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n", |
| 2480 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, | 2480 | pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, |
| 2481 | ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]); | 2481 | ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]); |
| 2482 | } | 2482 | } |
| 2483 | 2483 | ||
| @@ -2517,7 +2517,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) | |||
| 2517 | i2o_lct *lct = pHba->lct; | 2517 | i2o_lct *lct = pHba->lct; |
| 2518 | u8 bus_no = 0; | 2518 | u8 bus_no = 0; |
| 2519 | s16 scsi_id; | 2519 | s16 scsi_id; |
| 2520 | s16 scsi_lun; | 2520 | u64 scsi_lun; |
| 2521 | u32 buf[10]; // at least 8 u32's | 2521 | u32 buf[10]; // at least 8 u32's |
| 2522 | struct adpt_device* pDev = NULL; | 2522 | struct adpt_device* pDev = NULL; |
| 2523 | struct i2o_device* pI2o_dev = NULL; | 2523 | struct i2o_device* pI2o_dev = NULL; |
| @@ -2564,7 +2564,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) | |||
| 2564 | } | 2564 | } |
| 2565 | 2565 | ||
| 2566 | scsi_id = buf[1]; | 2566 | scsi_id = buf[1]; |
| 2567 | scsi_lun = (buf[2]>>8 )&0xff; | 2567 | scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]); |
| 2568 | pDev = pHba->channel[bus_no].device[scsi_id]; | 2568 | pDev = pHba->channel[bus_no].device[scsi_id]; |
| 2569 | /* da lun */ | 2569 | /* da lun */ |
| 2570 | while(pDev) { | 2570 | while(pDev) { |
| @@ -2633,7 +2633,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) | |||
| 2633 | while(pDev) { | 2633 | while(pDev) { |
| 2634 | if(pDev->scsi_lun == scsi_lun) { | 2634 | if(pDev->scsi_lun == scsi_lun) { |
| 2635 | if(!scsi_device_online(pDev->pScsi_dev)) { | 2635 | if(!scsi_device_online(pDev->pScsi_dev)) { |
| 2636 | printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n", | 2636 | printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n", |
| 2637 | pHba->name,bus_no,scsi_id,scsi_lun); | 2637 | pHba->name,bus_no,scsi_id,scsi_lun); |
| 2638 | if (pDev->pScsi_dev) { | 2638 | if (pDev->pScsi_dev) { |
| 2639 | scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING); | 2639 | scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING); |
| @@ -2665,7 +2665,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) | |||
| 2665 | // in the LCT table | 2665 | // in the LCT table |
| 2666 | if (pDev->state & DPTI_DEV_UNSCANNED){ | 2666 | if (pDev->state & DPTI_DEV_UNSCANNED){ |
| 2667 | pDev->state = DPTI_DEV_OFFLINE; | 2667 | pDev->state = DPTI_DEV_OFFLINE; |
| 2668 | printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun); | 2668 | printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun); |
| 2669 | if (pDev->pScsi_dev) { | 2669 | if (pDev->pScsi_dev) { |
| 2670 | scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE); | 2670 | scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE); |
| 2671 | } | 2671 | } |
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index aeb046186c84..1fa345ab8ecb 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h | |||
| @@ -184,7 +184,7 @@ struct adpt_device { | |||
| 184 | u32 block_size; | 184 | u32 block_size; |
| 185 | u8 scsi_channel; | 185 | u8 scsi_channel; |
| 186 | u8 scsi_id; | 186 | u8 scsi_id; |
| 187 | u8 scsi_lun; | 187 | u64 scsi_lun; |
| 188 | u8 state; | 188 | u8 state; |
| 189 | u16 tid; | 189 | u16 tid; |
| 190 | struct i2o_device* pI2o_dev; | 190 | struct i2o_device* pI2o_dev; |
| @@ -231,7 +231,7 @@ typedef struct _adpt_hba { | |||
| 231 | u32 sg_tablesize; // Scatter/Gather List Size. | 231 | u32 sg_tablesize; // Scatter/Gather List Size. |
| 232 | u8 top_scsi_channel; | 232 | u8 top_scsi_channel; |
| 233 | u8 top_scsi_id; | 233 | u8 top_scsi_id; |
| 234 | u8 top_scsi_lun; | 234 | u64 top_scsi_lun; |
| 235 | u8 dma64; | 235 | u8 dma64; |
| 236 | 236 | ||
| 237 | i2o_status_block* status_block; | 237 | i2o_status_block* status_block; |
| @@ -300,7 +300,7 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m); | |||
| 300 | static void adpt_i2o_delete_hba(adpt_hba* pHba); | 300 | static void adpt_i2o_delete_hba(adpt_hba* pHba); |
| 301 | static void adpt_inquiry(adpt_hba* pHba); | 301 | static void adpt_inquiry(adpt_hba* pHba); |
| 302 | static void adpt_fail_posted_scbs(adpt_hba* pHba); | 302 | static void adpt_fail_posted_scbs(adpt_hba* pHba); |
| 303 | static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun); | 303 | static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun); |
| 304 | static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ; | 304 | static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ; |
| 305 | static int adpt_i2o_online_hba(adpt_hba* pHba); | 305 | static int adpt_i2o_online_hba(adpt_hba* pHba); |
| 306 | static void adpt_i2o_post_wait_complete(u32, int); | 306 | static void adpt_i2o_post_wait_complete(u32, int); |
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index ebf57364df91..813dd5c998e4 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c | |||
| @@ -1238,8 +1238,8 @@ static int port_detect(unsigned long port_base, unsigned int j, | |||
| 1238 | struct eata_config *cf; | 1238 | struct eata_config *cf; |
| 1239 | dma_addr_t cf_dma_addr; | 1239 | dma_addr_t cf_dma_addr; |
| 1240 | 1240 | ||
| 1241 | cf = pci_alloc_consistent(pdev, sizeof(struct eata_config), | 1241 | cf = pci_zalloc_consistent(pdev, sizeof(struct eata_config), |
| 1242 | &cf_dma_addr); | 1242 | &cf_dma_addr); |
| 1243 | 1243 | ||
| 1244 | if (!cf) { | 1244 | if (!cf) { |
| 1245 | printk | 1245 | printk |
| @@ -1249,7 +1249,6 @@ static int port_detect(unsigned long port_base, unsigned int j, | |||
| 1249 | } | 1249 | } |
| 1250 | 1250 | ||
| 1251 | /* Set board configuration */ | 1251 | /* Set board configuration */ |
| 1252 | memset((char *)cf, 0, sizeof(struct eata_config)); | ||
| 1253 | cf->len = (ushort) H2DEV16((ushort) 510); | 1252 | cf->len = (ushort) H2DEV16((ushort) 510); |
| 1254 | cf->ocena = 1; | 1253 | cf->ocena = 1; |
| 1255 | 1254 | ||
| @@ -1399,7 +1398,7 @@ static int port_detect(unsigned long port_base, unsigned int j, | |||
| 1399 | 1398 | ||
| 1400 | if (shost->max_id > 8 || shost->max_lun > 8) | 1399 | if (shost->max_id > 8 || shost->max_lun > 8) |
| 1401 | printk | 1400 | printk |
| 1402 | ("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n", | 1401 | ("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n", |
| 1403 | ha->board_name, shost->max_id, shost->max_lun); | 1402 | ha->board_name, shost->max_id, shost->max_lun); |
| 1404 | 1403 | ||
| 1405 | for (i = 0; i <= shost->max_channel; i++) | 1404 | for (i = 0; i <= shost->max_channel; i++) |
| @@ -2449,7 +2448,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost) | |||
| 2449 | "target_status 0x%x, sense key 0x%x.\n", | 2448 | "target_status 0x%x, sense key 0x%x.\n", |
| 2450 | ha->board_name, | 2449 | ha->board_name, |
| 2451 | SCpnt->device->channel, SCpnt->device->id, | 2450 | SCpnt->device->channel, SCpnt->device->id, |
| 2452 | SCpnt->device->lun, | 2451 | (u8)SCpnt->device->lun, |
| 2453 | spp->target_status, SCpnt->sense_buffer[2]); | 2452 | spp->target_status, SCpnt->sense_buffer[2]); |
| 2454 | 2453 | ||
| 2455 | ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0; | 2454 | ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0; |
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 7d9b54ae7f62..a0dd1b67a467 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c | |||
| @@ -257,8 +257,8 @@ int fnic_set_intr_mode(struct fnic *fnic) | |||
| 257 | fnic->raw_wq_count >= m && | 257 | fnic->raw_wq_count >= m && |
| 258 | fnic->wq_copy_count >= o && | 258 | fnic->wq_copy_count >= o && |
| 259 | fnic->cq_count >= n + m + o) { | 259 | fnic->cq_count >= n + m + o) { |
| 260 | if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, | 260 | if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry, |
| 261 | n + m + o + 1)) { | 261 | n + m + o + 1)) { |
| 262 | fnic->rq_count = n; | 262 | fnic->rq_count = n; |
| 263 | fnic->raw_wq_count = m; | 263 | fnic->raw_wq_count = m; |
| 264 | fnic->wq_copy_count = o; | 264 | fnic->wq_copy_count = o; |
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index ea28b5ca4c73..961bdf5d31cd 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
| @@ -1753,7 +1753,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
| 1753 | tag = sc->request->tag; | 1753 | tag = sc->request->tag; |
| 1754 | FNIC_SCSI_DBG(KERN_DEBUG, | 1754 | FNIC_SCSI_DBG(KERN_DEBUG, |
| 1755 | fnic->lport->host, | 1755 | fnic->lport->host, |
| 1756 | "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n", | 1756 | "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", |
| 1757 | rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); | 1757 | rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); |
| 1758 | 1758 | ||
| 1759 | CMD_FLAGS(sc) = FNIC_NO_FLAGS; | 1759 | CMD_FLAGS(sc) = FNIC_NO_FLAGS; |
| @@ -2207,7 +2207,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
| 2207 | 2207 | ||
| 2208 | rport = starget_to_rport(scsi_target(sc->device)); | 2208 | rport = starget_to_rport(scsi_target(sc->device)); |
| 2209 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 2209 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
| 2210 | "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n", | 2210 | "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", |
| 2211 | rport->port_id, sc->device->lun, sc); | 2211 | rport->port_id, sc->device->lun, sc); |
| 2212 | 2212 | ||
| 2213 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) | 2213 | if (lp->state != LPORT_ST_READY || !(lp->link_up)) |
| @@ -2224,6 +2224,22 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
| 2224 | 2224 | ||
| 2225 | tag = sc->request->tag; | 2225 | tag = sc->request->tag; |
| 2226 | if (unlikely(tag < 0)) { | 2226 | if (unlikely(tag < 0)) { |
| 2227 | /* | ||
| 2228 | * XXX(hch): current the midlayer fakes up a struct | ||
| 2229 | * request for the explicit reset ioctls, and those | ||
| 2230 | * don't have a tag allocated to them. The below | ||
| 2231 | * code pokes into midlayer structures to paper over | ||
| 2232 | * this design issue, but that won't work for blk-mq. | ||
| 2233 | * | ||
| 2234 | * Either someone who can actually test the hardware | ||
| 2235 | * will have to come up with a similar hack for the | ||
| 2236 | * blk-mq case, or we'll have to bite the bullet and | ||
| 2237 | * fix the way the EH ioctls work for real, but until | ||
| 2238 | * that happens we fail these explicit requests here. | ||
| 2239 | */ | ||
| 2240 | if (shost_use_blk_mq(sc->device->host)) | ||
| 2241 | goto fnic_device_reset_end; | ||
| 2242 | |||
| 2227 | tag = fnic_scsi_host_start_tag(fnic, sc); | 2243 | tag = fnic_scsi_host_start_tag(fnic, sc); |
| 2228 | if (unlikely(tag == SCSI_NO_TAG)) | 2244 | if (unlikely(tag == SCSI_NO_TAG)) |
| 2229 | goto fnic_device_reset_end; | 2245 | goto fnic_device_reset_end; |
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index a1bc8ca958e1..b331272e93bc 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c | |||
| @@ -768,7 +768,7 @@ static void sprint_command(struct seq_file *m, unsigned char *command) | |||
| 768 | 768 | ||
| 769 | static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd) | 769 | static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd) |
| 770 | { | 770 | { |
| 771 | PRINTP("host number %d destination target %d, lun %d\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun); | 771 | PRINTP("host number %d destination target %d, lun %llu\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun); |
| 772 | PRINTP(" command = "); | 772 | PRINTP(" command = "); |
| 773 | sprint_command(m, cmd->cmnd); | 773 | sprint_command(m, cmd->cmnd); |
| 774 | } | 774 | } |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 3cbb57a8b846..6de80e352871 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
| @@ -204,18 +204,33 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | |||
| 204 | struct scsi_host_template *sht = shost->hostt; | 204 | struct scsi_host_template *sht = shost->hostt; |
| 205 | int error = -EINVAL; | 205 | int error = -EINVAL; |
| 206 | 206 | ||
| 207 | printk(KERN_INFO "scsi%d : %s\n", shost->host_no, | 207 | shost_printk(KERN_INFO, shost, "%s\n", |
| 208 | sht->info ? sht->info(shost) : sht->name); | 208 | sht->info ? sht->info(shost) : sht->name); |
| 209 | 209 | ||
| 210 | if (!shost->can_queue) { | 210 | if (!shost->can_queue) { |
| 211 | printk(KERN_ERR "%s: can_queue = 0 no longer supported\n", | 211 | shost_printk(KERN_ERR, shost, |
| 212 | sht->name); | 212 | "can_queue = 0 no longer supported\n"); |
| 213 | goto fail; | 213 | goto fail; |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | if (shost_use_blk_mq(shost)) { | ||
| 217 | error = scsi_mq_setup_tags(shost); | ||
| 218 | if (error) | ||
| 219 | goto fail; | ||
| 220 | } | ||
| 221 | |||
| 222 | /* | ||
| 223 | * Note that we allocate the freelist even for the MQ case for now, | ||
| 224 | * as we need a command set aside for scsi_reset_provider. Having | ||
| 225 | * the full host freelist and one command available for that is a | ||
| 226 | * little heavy-handed, but avoids introducing a special allocator | ||
| 227 | * just for this. Eventually the structure of scsi_reset_provider | ||
| 228 | * will need a major overhaul. | ||
| 229 | */ | ||
| 216 | error = scsi_setup_command_freelist(shost); | 230 | error = scsi_setup_command_freelist(shost); |
| 217 | if (error) | 231 | if (error) |
| 218 | goto fail; | 232 | goto out_destroy_tags; |
| 233 | |||
| 219 | 234 | ||
| 220 | if (!shost->shost_gendev.parent) | 235 | if (!shost->shost_gendev.parent) |
| 221 | shost->shost_gendev.parent = dev ? dev : &platform_bus; | 236 | shost->shost_gendev.parent = dev ? dev : &platform_bus; |
| @@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | |||
| 226 | 241 | ||
| 227 | error = device_add(&shost->shost_gendev); | 242 | error = device_add(&shost->shost_gendev); |
| 228 | if (error) | 243 | if (error) |
| 229 | goto out; | 244 | goto out_destroy_freelist; |
| 230 | 245 | ||
| 231 | pm_runtime_set_active(&shost->shost_gendev); | 246 | pm_runtime_set_active(&shost->shost_gendev); |
| 232 | pm_runtime_enable(&shost->shost_gendev); | 247 | pm_runtime_enable(&shost->shost_gendev); |
| @@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | |||
| 279 | device_del(&shost->shost_dev); | 294 | device_del(&shost->shost_dev); |
| 280 | out_del_gendev: | 295 | out_del_gendev: |
| 281 | device_del(&shost->shost_gendev); | 296 | device_del(&shost->shost_gendev); |
| 282 | out: | 297 | out_destroy_freelist: |
| 283 | scsi_destroy_command_freelist(shost); | 298 | scsi_destroy_command_freelist(shost); |
| 299 | out_destroy_tags: | ||
| 300 | if (shost_use_blk_mq(shost)) | ||
| 301 | scsi_mq_destroy_tags(shost); | ||
| 284 | fail: | 302 | fail: |
| 285 | return error; | 303 | return error; |
| 286 | } | 304 | } |
| @@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev) | |||
| 309 | } | 327 | } |
| 310 | 328 | ||
| 311 | scsi_destroy_command_freelist(shost); | 329 | scsi_destroy_command_freelist(shost); |
| 312 | if (shost->bqt) | 330 | if (shost_use_blk_mq(shost)) { |
| 313 | blk_free_tags(shost->bqt); | 331 | if (shost->tag_set.tags) |
| 332 | scsi_mq_destroy_tags(shost); | ||
| 333 | } else { | ||
| 334 | if (shost->bqt) | ||
| 335 | blk_free_tags(shost->bqt); | ||
| 336 | } | ||
| 314 | 337 | ||
| 315 | kfree(shost->shost_data); | 338 | kfree(shost->shost_data); |
| 316 | 339 | ||
| @@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
| 436 | else | 459 | else |
| 437 | shost->dma_boundary = 0xffffffff; | 460 | shost->dma_boundary = 0xffffffff; |
| 438 | 461 | ||
| 462 | shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq; | ||
| 463 | |||
| 439 | device_initialize(&shost->shost_gendev); | 464 | device_initialize(&shost->shost_gendev); |
| 440 | dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); | 465 | dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); |
| 441 | shost->shost_gendev.bus = &scsi_bus_type; | 466 | shost->shost_gendev.bus = &scsi_bus_type; |
| @@ -450,8 +475,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
| 450 | shost->ehandler = kthread_run(scsi_error_handler, shost, | 475 | shost->ehandler = kthread_run(scsi_error_handler, shost, |
| 451 | "scsi_eh_%d", shost->host_no); | 476 | "scsi_eh_%d", shost->host_no); |
| 452 | if (IS_ERR(shost->ehandler)) { | 477 | if (IS_ERR(shost->ehandler)) { |
| 453 | printk(KERN_WARNING "scsi%d: error handler thread failed to spawn, error = %ld\n", | 478 | shost_printk(KERN_WARNING, shost, |
| 454 | shost->host_no, PTR_ERR(shost->ehandler)); | 479 | "error handler thread failed to spawn, error = %ld\n", |
| 480 | PTR_ERR(shost->ehandler)); | ||
| 455 | goto fail_kfree; | 481 | goto fail_kfree; |
| 456 | } | 482 | } |
| 457 | 483 | ||
| @@ -584,7 +610,7 @@ EXPORT_SYMBOL(scsi_is_host_device); | |||
| 584 | int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) | 610 | int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) |
| 585 | { | 611 | { |
| 586 | if (unlikely(!shost->work_q)) { | 612 | if (unlikely(!shost->work_q)) { |
| 587 | printk(KERN_ERR | 613 | shost_printk(KERN_ERR, shost, |
| 588 | "ERROR: Scsi host '%s' attempted to queue scsi-work, " | 614 | "ERROR: Scsi host '%s' attempted to queue scsi-work, " |
| 589 | "when no workqueue created.\n", shost->hostt->name); | 615 | "when no workqueue created.\n", shost->hostt->name); |
| 590 | dump_stack(); | 616 | dump_stack(); |
| @@ -603,7 +629,7 @@ EXPORT_SYMBOL_GPL(scsi_queue_work); | |||
| 603 | void scsi_flush_work(struct Scsi_Host *shost) | 629 | void scsi_flush_work(struct Scsi_Host *shost) |
| 604 | { | 630 | { |
| 605 | if (!shost->work_q) { | 631 | if (!shost->work_q) { |
| 606 | printk(KERN_ERR | 632 | shost_printk(KERN_ERR, shost, |
| 607 | "ERROR: Scsi host '%s' attempted to flush scsi-work, " | 633 | "ERROR: Scsi host '%s' attempted to flush scsi-work, " |
| 608 | "when no workqueue created.\n", shost->hostt->name); | 634 | "when no workqueue created.\n", shost->hostt->name); |
| 609 | dump_stack(); | 635 | dump_stack(); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 31184b35370f..6b35d0dfe64c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -1708,7 +1708,14 @@ static void complete_scsi_command(struct CommandList *cp) | |||
| 1708 | 1708 | ||
| 1709 | cmd->result |= ei->ScsiStatus; | 1709 | cmd->result |= ei->ScsiStatus; |
| 1710 | 1710 | ||
| 1711 | /* copy the sense data whether we need to or not. */ | 1711 | scsi_set_resid(cmd, ei->ResidualCnt); |
| 1712 | if (ei->CommandStatus == 0) { | ||
| 1713 | cmd_free(h, cp); | ||
| 1714 | cmd->scsi_done(cmd); | ||
| 1715 | return; | ||
| 1716 | } | ||
| 1717 | |||
| 1718 | /* copy the sense data */ | ||
| 1712 | if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) | 1719 | if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) |
| 1713 | sense_data_size = SCSI_SENSE_BUFFERSIZE; | 1720 | sense_data_size = SCSI_SENSE_BUFFERSIZE; |
| 1714 | else | 1721 | else |
| @@ -1717,13 +1724,6 @@ static void complete_scsi_command(struct CommandList *cp) | |||
| 1717 | sense_data_size = ei->SenseLen; | 1724 | sense_data_size = ei->SenseLen; |
| 1718 | 1725 | ||
| 1719 | memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); | 1726 | memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); |
| 1720 | scsi_set_resid(cmd, ei->ResidualCnt); | ||
| 1721 | |||
| 1722 | if (ei->CommandStatus == 0) { | ||
| 1723 | cmd_free(h, cp); | ||
| 1724 | cmd->scsi_done(cmd); | ||
| 1725 | return; | ||
| 1726 | } | ||
| 1727 | 1727 | ||
| 1728 | /* For I/O accelerator commands, copy over some fields to the normal | 1728 | /* For I/O accelerator commands, copy over some fields to the normal |
| 1729 | * CISS header used below for error handling. | 1729 | * CISS header used below for error handling. |
| @@ -3686,6 +3686,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
| 3686 | (((u64) cmd->cmnd[2]) << 8) | | 3686 | (((u64) cmd->cmnd[2]) << 8) | |
| 3687 | cmd->cmnd[3]; | 3687 | cmd->cmnd[3]; |
| 3688 | block_cnt = cmd->cmnd[4]; | 3688 | block_cnt = cmd->cmnd[4]; |
| 3689 | if (block_cnt == 0) | ||
| 3690 | block_cnt = 256; | ||
| 3689 | break; | 3691 | break; |
| 3690 | case WRITE_10: | 3692 | case WRITE_10: |
| 3691 | is_write = 1; | 3693 | is_write = 1; |
| @@ -3734,7 +3736,6 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
| 3734 | default: | 3736 | default: |
| 3735 | return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ | 3737 | return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ |
| 3736 | } | 3738 | } |
| 3737 | BUG_ON(block_cnt == 0); | ||
| 3738 | last_block = first_block + block_cnt - 1; | 3739 | last_block = first_block + block_cnt - 1; |
| 3739 | 3740 | ||
| 3740 | /* check for write to non-RAID-0 */ | 3741 | /* check for write to non-RAID-0 */ |
| @@ -4590,7 +4591,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
| 4590 | return FAILED; | 4591 | return FAILED; |
| 4591 | 4592 | ||
| 4592 | memset(msg, 0, sizeof(msg)); | 4593 | memset(msg, 0, sizeof(msg)); |
| 4593 | ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ", | 4594 | ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", |
| 4594 | h->scsi_host->host_no, sc->device->channel, | 4595 | h->scsi_host->host_no, sc->device->channel, |
| 4595 | sc->device->id, sc->device->lun); | 4596 | sc->device->id, sc->device->lun); |
| 4596 | 4597 | ||
| @@ -4731,23 +4732,21 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |||
| 4731 | union u64bit temp64; | 4732 | union u64bit temp64; |
| 4732 | dma_addr_t cmd_dma_handle, err_dma_handle; | 4733 | dma_addr_t cmd_dma_handle, err_dma_handle; |
| 4733 | 4734 | ||
| 4734 | c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); | 4735 | c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); |
| 4735 | if (c == NULL) | 4736 | if (c == NULL) |
| 4736 | return NULL; | 4737 | return NULL; |
| 4737 | memset(c, 0, sizeof(*c)); | ||
| 4738 | 4738 | ||
| 4739 | c->cmd_type = CMD_SCSI; | 4739 | c->cmd_type = CMD_SCSI; |
| 4740 | c->cmdindex = -1; | 4740 | c->cmdindex = -1; |
| 4741 | 4741 | ||
| 4742 | c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), | 4742 | c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info), |
| 4743 | &err_dma_handle); | 4743 | &err_dma_handle); |
| 4744 | 4744 | ||
| 4745 | if (c->err_info == NULL) { | 4745 | if (c->err_info == NULL) { |
| 4746 | pci_free_consistent(h->pdev, | 4746 | pci_free_consistent(h->pdev, |
| 4747 | sizeof(*c), c, cmd_dma_handle); | 4747 | sizeof(*c), c, cmd_dma_handle); |
| 4748 | return NULL; | 4748 | return NULL; |
| 4749 | } | 4749 | } |
| 4750 | memset(c->err_info, 0, sizeof(*c->err_info)); | ||
| 4751 | 4750 | ||
| 4752 | INIT_LIST_HEAD(&c->list); | 4751 | INIT_LIST_HEAD(&c->list); |
| 4753 | c->busaddr = (u32) cmd_dma_handle; | 4752 | c->busaddr = (u32) cmd_dma_handle; |
| @@ -5092,7 +5091,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
| 5092 | } | 5091 | } |
| 5093 | if (ioc->Request.Type.Direction & XFER_WRITE) { | 5092 | if (ioc->Request.Type.Direction & XFER_WRITE) { |
| 5094 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { | 5093 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { |
| 5095 | status = -ENOMEM; | 5094 | status = -EFAULT; |
| 5096 | goto cleanup1; | 5095 | goto cleanup1; |
| 5097 | } | 5096 | } |
| 5098 | } else | 5097 | } else |
| @@ -6365,9 +6364,9 @@ static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) | |||
| 6365 | { | 6364 | { |
| 6366 | u32 driver_support; | 6365 | u32 driver_support; |
| 6367 | 6366 | ||
| 6368 | #ifdef CONFIG_X86 | ||
| 6369 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | ||
| 6370 | driver_support = readl(&(h->cfgtable->driver_support)); | 6367 | driver_support = readl(&(h->cfgtable->driver_support)); |
| 6368 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | ||
| 6369 | #ifdef CONFIG_X86 | ||
| 6371 | driver_support |= ENABLE_SCSI_PREFETCH; | 6370 | driver_support |= ENABLE_SCSI_PREFETCH; |
| 6372 | #endif | 6371 | #endif |
| 6373 | driver_support |= ENABLE_UNIT_ATTN; | 6372 | driver_support |= ENABLE_UNIT_ATTN; |
| @@ -6913,8 +6912,12 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h) | |||
| 6913 | d = list_entry(this, struct offline_device_entry, | 6912 | d = list_entry(this, struct offline_device_entry, |
| 6914 | offline_list); | 6913 | offline_list); |
| 6915 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | 6914 | spin_unlock_irqrestore(&h->offline_device_lock, flags); |
| 6916 | if (!hpsa_volume_offline(h, d->scsi3addr)) | 6915 | if (!hpsa_volume_offline(h, d->scsi3addr)) { |
| 6916 | spin_lock_irqsave(&h->offline_device_lock, flags); | ||
| 6917 | list_del(&d->offline_list); | ||
| 6918 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | ||
| 6917 | return 1; | 6919 | return 1; |
| 6920 | } | ||
| 6918 | spin_lock_irqsave(&h->offline_device_lock, flags); | 6921 | spin_lock_irqsave(&h->offline_device_lock, flags); |
| 6919 | } | 6922 | } |
| 6920 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | 6923 | spin_unlock_irqrestore(&h->offline_device_lock, flags); |
| @@ -6995,8 +6998,10 @@ reinit_after_soft_reset: | |||
| 6995 | 6998 | ||
| 6996 | /* Allocate and clear per-cpu variable lockup_detected */ | 6999 | /* Allocate and clear per-cpu variable lockup_detected */ |
| 6997 | h->lockup_detected = alloc_percpu(u32); | 7000 | h->lockup_detected = alloc_percpu(u32); |
| 6998 | if (!h->lockup_detected) | 7001 | if (!h->lockup_detected) { |
| 7002 | rc = -ENOMEM; | ||
| 6999 | goto clean1; | 7003 | goto clean1; |
| 7004 | } | ||
| 7000 | set_lockup_detected_for_all_cpus(h, 0); | 7005 | set_lockup_detected_for_all_cpus(h, 0); |
| 7001 | 7006 | ||
| 7002 | rc = hpsa_pci_init(h); | 7007 | rc = hpsa_pci_init(h); |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ee196b363d81..dedb62c21b29 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
| @@ -1024,7 +1024,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, | |||
| 1024 | 1024 | ||
| 1025 | _req->scp = scp; | 1025 | _req->scp = scp; |
| 1026 | 1026 | ||
| 1027 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) " | 1027 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " |
| 1028 | "req_index=%d, req=%p\n", | 1028 | "req_index=%d, req=%p\n", |
| 1029 | scp, | 1029 | scp, |
| 1030 | host->host_no, scp->device->channel, | 1030 | host->host_no, scp->device->channel, |
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile index cb150d1e5850..3840c64f2966 100644 --- a/drivers/scsi/ibmvscsi/Makefile +++ b/drivers/scsi/ibmvscsi/Makefile | |||
| @@ -1,3 +1,2 @@ | |||
| 1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o | 1 | obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o |
| 2 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o | ||
| 3 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o | 2 | obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 8dd47689d584..598c42cba5a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; | 47 | static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; |
| 48 | static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; | 48 | static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; |
| 49 | static unsigned int max_lun = IBMVFC_MAX_LUN; | 49 | static u64 max_lun = IBMVFC_MAX_LUN; |
| 50 | static unsigned int max_targets = IBMVFC_MAX_TARGETS; | 50 | static unsigned int max_targets = IBMVFC_MAX_TARGETS; |
| 51 | static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; | 51 | static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; |
| 52 | static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; | 52 | static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; |
| @@ -71,7 +71,7 @@ MODULE_PARM_DESC(default_timeout, | |||
| 71 | module_param_named(max_requests, max_requests, uint, S_IRUGO); | 71 | module_param_named(max_requests, max_requests, uint, S_IRUGO); |
| 72 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " | 72 | MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " |
| 73 | "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); | 73 | "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); |
| 74 | module_param_named(max_lun, max_lun, uint, S_IRUGO); | 74 | module_param_named(max_lun, max_lun, ullong, S_IRUGO); |
| 75 | MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " | 75 | MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " |
| 76 | "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); | 76 | "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); |
| 77 | module_param_named(max_targets, max_targets, uint, S_IRUGO); | 77 | module_param_named(max_targets, max_targets, uint, S_IRUGO); |
| @@ -166,13 +166,13 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt) | |||
| 166 | switch (entry->fmt) { | 166 | switch (entry->fmt) { |
| 167 | case IBMVFC_CMD_FORMAT: | 167 | case IBMVFC_CMD_FORMAT: |
| 168 | entry->op_code = vfc_cmd->iu.cdb[0]; | 168 | entry->op_code = vfc_cmd->iu.cdb[0]; |
| 169 | entry->scsi_id = vfc_cmd->tgt_scsi_id; | 169 | entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); |
| 170 | entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); | 170 | entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); |
| 171 | entry->tmf_flags = vfc_cmd->iu.tmf_flags; | 171 | entry->tmf_flags = vfc_cmd->iu.tmf_flags; |
| 172 | entry->u.start.xfer_len = vfc_cmd->iu.xfer_len; | 172 | entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len); |
| 173 | break; | 173 | break; |
| 174 | case IBMVFC_MAD_FORMAT: | 174 | case IBMVFC_MAD_FORMAT: |
| 175 | entry->op_code = mad->opcode; | 175 | entry->op_code = be32_to_cpu(mad->opcode); |
| 176 | break; | 176 | break; |
| 177 | default: | 177 | default: |
| 178 | break; | 178 | break; |
| @@ -199,18 +199,18 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt) | |||
| 199 | switch (entry->fmt) { | 199 | switch (entry->fmt) { |
| 200 | case IBMVFC_CMD_FORMAT: | 200 | case IBMVFC_CMD_FORMAT: |
| 201 | entry->op_code = vfc_cmd->iu.cdb[0]; | 201 | entry->op_code = vfc_cmd->iu.cdb[0]; |
| 202 | entry->scsi_id = vfc_cmd->tgt_scsi_id; | 202 | entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); |
| 203 | entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); | 203 | entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); |
| 204 | entry->tmf_flags = vfc_cmd->iu.tmf_flags; | 204 | entry->tmf_flags = vfc_cmd->iu.tmf_flags; |
| 205 | entry->u.end.status = vfc_cmd->status; | 205 | entry->u.end.status = be16_to_cpu(vfc_cmd->status); |
| 206 | entry->u.end.error = vfc_cmd->error; | 206 | entry->u.end.error = be16_to_cpu(vfc_cmd->error); |
| 207 | entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags; | 207 | entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags; |
| 208 | entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code; | 208 | entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code; |
| 209 | entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status; | 209 | entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status; |
| 210 | break; | 210 | break; |
| 211 | case IBMVFC_MAD_FORMAT: | 211 | case IBMVFC_MAD_FORMAT: |
| 212 | entry->op_code = mad->opcode; | 212 | entry->op_code = be32_to_cpu(mad->opcode); |
| 213 | entry->u.end.status = mad->status; | 213 | entry->u.end.status = be16_to_cpu(mad->status); |
| 214 | break; | 214 | break; |
| 215 | default: | 215 | default: |
| 216 | break; | 216 | break; |
| @@ -270,14 +270,14 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) | |||
| 270 | { | 270 | { |
| 271 | int err; | 271 | int err; |
| 272 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; | 272 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; |
| 273 | int fc_rsp_len = rsp->fcp_rsp_len; | 273 | int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len); |
| 274 | 274 | ||
| 275 | if ((rsp->flags & FCP_RSP_LEN_VALID) && | 275 | if ((rsp->flags & FCP_RSP_LEN_VALID) && |
| 276 | ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || | 276 | ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || |
| 277 | rsp->data.info.rsp_code)) | 277 | rsp->data.info.rsp_code)) |
| 278 | return DID_ERROR << 16; | 278 | return DID_ERROR << 16; |
| 279 | 279 | ||
| 280 | err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); | 280 | err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); |
| 281 | if (err >= 0) | 281 | if (err >= 0) |
| 282 | return rsp->scsi_status | (cmd_status[err].result << 16); | 282 | return rsp->scsi_status | (cmd_status[err].result << 16); |
| 283 | return rsp->scsi_status | (DID_ERROR << 16); | 283 | return rsp->scsi_status | (DID_ERROR << 16); |
| @@ -807,7 +807,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) | |||
| 807 | evt->cmnd->result = (error_code << 16); | 807 | evt->cmnd->result = (error_code << 16); |
| 808 | evt->done = ibmvfc_scsi_eh_done; | 808 | evt->done = ibmvfc_scsi_eh_done; |
| 809 | } else | 809 | } else |
| 810 | evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED; | 810 | evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); |
| 811 | 811 | ||
| 812 | list_del(&evt->queue); | 812 | list_del(&evt->queue); |
| 813 | del_timer(&evt->timer); | 813 | del_timer(&evt->timer); |
| @@ -955,7 +955,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) | |||
| 955 | 955 | ||
| 956 | spin_lock_irqsave(shost->host_lock, flags); | 956 | spin_lock_irqsave(shost->host_lock, flags); |
| 957 | if (vhost->state == IBMVFC_ACTIVE) { | 957 | if (vhost->state == IBMVFC_ACTIVE) { |
| 958 | switch (vhost->login_buf->resp.link_speed / 100) { | 958 | switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) { |
| 959 | case 1: | 959 | case 1: |
| 960 | fc_host_speed(shost) = FC_PORTSPEED_1GBIT; | 960 | fc_host_speed(shost) = FC_PORTSPEED_1GBIT; |
| 961 | break; | 961 | break; |
| @@ -976,7 +976,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) | |||
| 976 | break; | 976 | break; |
| 977 | default: | 977 | default: |
| 978 | ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", | 978 | ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", |
| 979 | vhost->login_buf->resp.link_speed / 100); | 979 | be64_to_cpu(vhost->login_buf->resp.link_speed) / 100); |
| 980 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 980 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; |
| 981 | break; | 981 | break; |
| 982 | } | 982 | } |
| @@ -1171,21 +1171,21 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) | |||
| 1171 | 1171 | ||
| 1172 | memset(login_info, 0, sizeof(*login_info)); | 1172 | memset(login_info, 0, sizeof(*login_info)); |
| 1173 | 1173 | ||
| 1174 | login_info->ostype = IBMVFC_OS_LINUX; | 1174 | login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); |
| 1175 | login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9; | 1175 | login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9); |
| 1176 | login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu); | 1176 | login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); |
| 1177 | login_info->max_response = sizeof(struct ibmvfc_fcp_rsp); | 1177 | login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); |
| 1178 | login_info->partition_num = vhost->partition_number; | 1178 | login_info->partition_num = cpu_to_be32(vhost->partition_number); |
| 1179 | login_info->vfc_frame_version = 1; | 1179 | login_info->vfc_frame_version = cpu_to_be32(1); |
| 1180 | login_info->fcp_version = 3; | 1180 | login_info->fcp_version = cpu_to_be16(3); |
| 1181 | login_info->flags = IBMVFC_FLUSH_ON_HALT; | 1181 | login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT); |
| 1182 | if (vhost->client_migrated) | 1182 | if (vhost->client_migrated) |
| 1183 | login_info->flags |= IBMVFC_CLIENT_MIGRATED; | 1183 | login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED); |
| 1184 | 1184 | ||
| 1185 | login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; | 1185 | login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ); |
| 1186 | login_info->capabilities = IBMVFC_CAN_MIGRATE; | 1186 | login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE); |
| 1187 | login_info->async.va = vhost->async_crq.msg_token; | 1187 | login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token); |
| 1188 | login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs); | 1188 | login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs)); |
| 1189 | strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); | 1189 | strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); |
| 1190 | strncpy(login_info->device_name, | 1190 | strncpy(login_info->device_name, |
| 1191 | dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME); | 1191 | dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME); |
| @@ -1225,7 +1225,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost) | |||
| 1225 | struct ibmvfc_event *evt = &pool->events[i]; | 1225 | struct ibmvfc_event *evt = &pool->events[i]; |
| 1226 | atomic_set(&evt->free, 1); | 1226 | atomic_set(&evt->free, 1); |
| 1227 | evt->crq.valid = 0x80; | 1227 | evt->crq.valid = 0x80; |
| 1228 | evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i); | 1228 | evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); |
| 1229 | evt->xfer_iu = pool->iu_storage + i; | 1229 | evt->xfer_iu = pool->iu_storage + i; |
| 1230 | evt->vhost = vhost; | 1230 | evt->vhost = vhost; |
| 1231 | evt->ext_list = NULL; | 1231 | evt->ext_list = NULL; |
| @@ -1310,8 +1310,8 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, | |||
| 1310 | struct scatterlist *sg; | 1310 | struct scatterlist *sg; |
| 1311 | 1311 | ||
| 1312 | scsi_for_each_sg(scmd, sg, nseg, i) { | 1312 | scsi_for_each_sg(scmd, sg, nseg, i) { |
| 1313 | md[i].va = sg_dma_address(sg); | 1313 | md[i].va = cpu_to_be64(sg_dma_address(sg)); |
| 1314 | md[i].len = sg_dma_len(sg); | 1314 | md[i].len = cpu_to_be32(sg_dma_len(sg)); |
| 1315 | md[i].key = 0; | 1315 | md[i].key = 0; |
| 1316 | } | 1316 | } |
| 1317 | } | 1317 | } |
| @@ -1337,7 +1337,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, | |||
| 1337 | 1337 | ||
| 1338 | sg_mapped = scsi_dma_map(scmd); | 1338 | sg_mapped = scsi_dma_map(scmd); |
| 1339 | if (!sg_mapped) { | 1339 | if (!sg_mapped) { |
| 1340 | vfc_cmd->flags |= IBMVFC_NO_MEM_DESC; | 1340 | vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC); |
| 1341 | return 0; | 1341 | return 0; |
| 1342 | } else if (unlikely(sg_mapped < 0)) { | 1342 | } else if (unlikely(sg_mapped < 0)) { |
| 1343 | if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) | 1343 | if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) |
| @@ -1346,10 +1346,10 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, | |||
| 1346 | } | 1346 | } |
| 1347 | 1347 | ||
| 1348 | if (scmd->sc_data_direction == DMA_TO_DEVICE) { | 1348 | if (scmd->sc_data_direction == DMA_TO_DEVICE) { |
| 1349 | vfc_cmd->flags |= IBMVFC_WRITE; | 1349 | vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE); |
| 1350 | vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA; | 1350 | vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA; |
| 1351 | } else { | 1351 | } else { |
| 1352 | vfc_cmd->flags |= IBMVFC_READ; | 1352 | vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ); |
| 1353 | vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA; | 1353 | vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA; |
| 1354 | } | 1354 | } |
| 1355 | 1355 | ||
| @@ -1358,7 +1358,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, | |||
| 1358 | return 0; | 1358 | return 0; |
| 1359 | } | 1359 | } |
| 1360 | 1360 | ||
| 1361 | vfc_cmd->flags |= IBMVFC_SCATTERLIST; | 1361 | vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST); |
| 1362 | 1362 | ||
| 1363 | if (!evt->ext_list) { | 1363 | if (!evt->ext_list) { |
| 1364 | evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, | 1364 | evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, |
| @@ -1374,8 +1374,8 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, | |||
| 1374 | 1374 | ||
| 1375 | ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); | 1375 | ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); |
| 1376 | 1376 | ||
| 1377 | data->va = evt->ext_list_token; | 1377 | data->va = cpu_to_be64(evt->ext_list_token); |
| 1378 | data->len = sg_mapped * sizeof(struct srp_direct_buf); | 1378 | data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf)); |
| 1379 | data->key = 0; | 1379 | data->key = 0; |
| 1380 | return 0; | 1380 | return 0; |
| 1381 | } | 1381 | } |
| @@ -1404,15 +1404,15 @@ static void ibmvfc_timeout(struct ibmvfc_event *evt) | |||
| 1404 | static int ibmvfc_send_event(struct ibmvfc_event *evt, | 1404 | static int ibmvfc_send_event(struct ibmvfc_event *evt, |
| 1405 | struct ibmvfc_host *vhost, unsigned long timeout) | 1405 | struct ibmvfc_host *vhost, unsigned long timeout) |
| 1406 | { | 1406 | { |
| 1407 | u64 *crq_as_u64 = (u64 *) &evt->crq; | 1407 | __be64 *crq_as_u64 = (__be64 *) &evt->crq; |
| 1408 | int rc; | 1408 | int rc; |
| 1409 | 1409 | ||
| 1410 | /* Copy the IU into the transfer area */ | 1410 | /* Copy the IU into the transfer area */ |
| 1411 | *evt->xfer_iu = evt->iu; | 1411 | *evt->xfer_iu = evt->iu; |
| 1412 | if (evt->crq.format == IBMVFC_CMD_FORMAT) | 1412 | if (evt->crq.format == IBMVFC_CMD_FORMAT) |
| 1413 | evt->xfer_iu->cmd.tag = (u64)evt; | 1413 | evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt); |
| 1414 | else if (evt->crq.format == IBMVFC_MAD_FORMAT) | 1414 | else if (evt->crq.format == IBMVFC_MAD_FORMAT) |
| 1415 | evt->xfer_iu->mad_common.tag = (u64)evt; | 1415 | evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt); |
| 1416 | else | 1416 | else |
| 1417 | BUG(); | 1417 | BUG(); |
| 1418 | 1418 | ||
| @@ -1428,7 +1428,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, | |||
| 1428 | 1428 | ||
| 1429 | mb(); | 1429 | mb(); |
| 1430 | 1430 | ||
| 1431 | if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) { | 1431 | if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), |
| 1432 | be64_to_cpu(crq_as_u64[1])))) { | ||
| 1432 | list_del(&evt->queue); | 1433 | list_del(&evt->queue); |
| 1433 | del_timer(&evt->timer); | 1434 | del_timer(&evt->timer); |
| 1434 | 1435 | ||
| @@ -1451,7 +1452,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, | |||
| 1451 | evt->cmnd->result = DID_ERROR << 16; | 1452 | evt->cmnd->result = DID_ERROR << 16; |
| 1452 | evt->done = ibmvfc_scsi_eh_done; | 1453 | evt->done = ibmvfc_scsi_eh_done; |
| 1453 | } else | 1454 | } else |
| 1454 | evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR; | 1455 | evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR); |
| 1455 | 1456 | ||
| 1456 | evt->done(evt); | 1457 | evt->done(evt); |
| 1457 | } else | 1458 | } else |
| @@ -1472,7 +1473,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) | |||
| 1472 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; | 1473 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; |
| 1473 | struct scsi_cmnd *cmnd = evt->cmnd; | 1474 | struct scsi_cmnd *cmnd = evt->cmnd; |
| 1474 | const char *err = unknown_error; | 1475 | const char *err = unknown_error; |
| 1475 | int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); | 1476 | int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); |
| 1476 | int logerr = 0; | 1477 | int logerr = 0; |
| 1477 | int rsp_code = 0; | 1478 | int rsp_code = 0; |
| 1478 | 1479 | ||
| @@ -1526,13 +1527,13 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) | |||
| 1526 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; | 1527 | struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; |
| 1527 | struct scsi_cmnd *cmnd = evt->cmnd; | 1528 | struct scsi_cmnd *cmnd = evt->cmnd; |
| 1528 | u32 rsp_len = 0; | 1529 | u32 rsp_len = 0; |
| 1529 | u32 sense_len = rsp->fcp_sense_len; | 1530 | u32 sense_len = be32_to_cpu(rsp->fcp_sense_len); |
| 1530 | 1531 | ||
| 1531 | if (cmnd) { | 1532 | if (cmnd) { |
| 1532 | if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) | 1533 | if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID) |
| 1533 | scsi_set_resid(cmnd, vfc_cmd->adapter_resid); | 1534 | scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid)); |
| 1534 | else if (rsp->flags & FCP_RESID_UNDER) | 1535 | else if (rsp->flags & FCP_RESID_UNDER) |
| 1535 | scsi_set_resid(cmnd, rsp->fcp_resid); | 1536 | scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid)); |
| 1536 | else | 1537 | else |
| 1537 | scsi_set_resid(cmnd, 0); | 1538 | scsi_set_resid(cmnd, 0); |
| 1538 | 1539 | ||
| @@ -1540,12 +1541,13 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) | |||
| 1540 | cmnd->result = ibmvfc_get_err_result(vfc_cmd); | 1541 | cmnd->result = ibmvfc_get_err_result(vfc_cmd); |
| 1541 | 1542 | ||
| 1542 | if (rsp->flags & FCP_RSP_LEN_VALID) | 1543 | if (rsp->flags & FCP_RSP_LEN_VALID) |
| 1543 | rsp_len = rsp->fcp_rsp_len; | 1544 | rsp_len = be32_to_cpu(rsp->fcp_rsp_len); |
| 1544 | if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) | 1545 | if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) |
| 1545 | sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; | 1546 | sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; |
| 1546 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) | 1547 | if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) |
| 1547 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); | 1548 | memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); |
| 1548 | if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) | 1549 | if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) && |
| 1550 | (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED)) | ||
| 1549 | ibmvfc_relogin(cmnd->device); | 1551 | ibmvfc_relogin(cmnd->device); |
| 1550 | 1552 | ||
| 1551 | if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) | 1553 | if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) |
| @@ -1630,19 +1632,19 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, | |||
| 1630 | cmnd->scsi_done = done; | 1632 | cmnd->scsi_done = done; |
| 1631 | vfc_cmd = &evt->iu.cmd; | 1633 | vfc_cmd = &evt->iu.cmd; |
| 1632 | memset(vfc_cmd, 0, sizeof(*vfc_cmd)); | 1634 | memset(vfc_cmd, 0, sizeof(*vfc_cmd)); |
| 1633 | vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); | 1635 | vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); |
| 1634 | vfc_cmd->resp.len = sizeof(vfc_cmd->rsp); | 1636 | vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp)); |
| 1635 | vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE; | 1637 | vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); |
| 1636 | vfc_cmd->payload_len = sizeof(vfc_cmd->iu); | 1638 | vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu)); |
| 1637 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); | 1639 | vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp)); |
| 1638 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; | 1640 | vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata); |
| 1639 | vfc_cmd->tgt_scsi_id = rport->port_id; | 1641 | vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id); |
| 1640 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); | 1642 | vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd)); |
| 1641 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); | 1643 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); |
| 1642 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); | 1644 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); |
| 1643 | 1645 | ||
| 1644 | if (scsi_populate_tag_msg(cmnd, tag)) { | 1646 | if (scsi_populate_tag_msg(cmnd, tag)) { |
| 1645 | vfc_cmd->task_tag = tag[1]; | 1647 | vfc_cmd->task_tag = cpu_to_be64(tag[1]); |
| 1646 | switch (tag[0]) { | 1648 | switch (tag[0]) { |
| 1647 | case MSG_SIMPLE_TAG: | 1649 | case MSG_SIMPLE_TAG: |
| 1648 | vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; | 1650 | vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; |
| @@ -1732,12 +1734,12 @@ static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) | |||
| 1732 | 1734 | ||
| 1733 | tmf = &evt->iu.tmf; | 1735 | tmf = &evt->iu.tmf; |
| 1734 | memset(tmf, 0, sizeof(*tmf)); | 1736 | memset(tmf, 0, sizeof(*tmf)); |
| 1735 | tmf->common.version = 1; | 1737 | tmf->common.version = cpu_to_be32(1); |
| 1736 | tmf->common.opcode = IBMVFC_TMF_MAD; | 1738 | tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); |
| 1737 | tmf->common.length = sizeof(*tmf); | 1739 | tmf->common.length = cpu_to_be16(sizeof(*tmf)); |
| 1738 | tmf->scsi_id = port_id; | 1740 | tmf->scsi_id = cpu_to_be64(port_id); |
| 1739 | tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; | 1741 | tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); |
| 1740 | tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY; | 1742 | tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY); |
| 1741 | rc = ibmvfc_send_event(evt, vhost, default_timeout); | 1743 | rc = ibmvfc_send_event(evt, vhost, default_timeout); |
| 1742 | 1744 | ||
| 1743 | if (rc != 0) { | 1745 | if (rc != 0) { |
| @@ -1789,10 +1791,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) | |||
| 1789 | ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); | 1791 | ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); |
| 1790 | plogi = &evt->iu.plogi; | 1792 | plogi = &evt->iu.plogi; |
| 1791 | memset(plogi, 0, sizeof(*plogi)); | 1793 | memset(plogi, 0, sizeof(*plogi)); |
| 1792 | plogi->common.version = 1; | 1794 | plogi->common.version = cpu_to_be32(1); |
| 1793 | plogi->common.opcode = IBMVFC_PORT_LOGIN; | 1795 | plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); |
| 1794 | plogi->common.length = sizeof(*plogi); | 1796 | plogi->common.length = cpu_to_be16(sizeof(*plogi)); |
| 1795 | plogi->scsi_id = port_id; | 1797 | plogi->scsi_id = cpu_to_be64(port_id); |
| 1796 | evt->sync_iu = &rsp_iu; | 1798 | evt->sync_iu = &rsp_iu; |
| 1797 | init_completion(&evt->comp); | 1799 | init_completion(&evt->comp); |
| 1798 | 1800 | ||
| @@ -1904,26 +1906,26 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job) | |||
| 1904 | mad = &evt->iu.passthru; | 1906 | mad = &evt->iu.passthru; |
| 1905 | 1907 | ||
| 1906 | memset(mad, 0, sizeof(*mad)); | 1908 | memset(mad, 0, sizeof(*mad)); |
| 1907 | mad->common.version = 1; | 1909 | mad->common.version = cpu_to_be32(1); |
| 1908 | mad->common.opcode = IBMVFC_PASSTHRU; | 1910 | mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); |
| 1909 | mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); | 1911 | mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); |
| 1910 | 1912 | ||
| 1911 | mad->cmd_ioba.va = (u64)evt->crq.ioba + | 1913 | mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + |
| 1912 | offsetof(struct ibmvfc_passthru_mad, iu); | 1914 | offsetof(struct ibmvfc_passthru_mad, iu)); |
| 1913 | mad->cmd_ioba.len = sizeof(mad->iu); | 1915 | mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); |
| 1914 | 1916 | ||
| 1915 | mad->iu.cmd_len = job->request_payload.payload_len; | 1917 | mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len); |
| 1916 | mad->iu.rsp_len = job->reply_payload.payload_len; | 1918 | mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len); |
| 1917 | mad->iu.flags = fc_flags; | 1919 | mad->iu.flags = cpu_to_be32(fc_flags); |
| 1918 | mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; | 1920 | mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); |
| 1919 | 1921 | ||
| 1920 | mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list); | 1922 | mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list)); |
| 1921 | mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list); | 1923 | mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list)); |
| 1922 | mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list); | 1924 | mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list)); |
| 1923 | mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list); | 1925 | mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list)); |
| 1924 | mad->iu.scsi_id = port_id; | 1926 | mad->iu.scsi_id = cpu_to_be64(port_id); |
| 1925 | mad->iu.tag = (u64)evt; | 1927 | mad->iu.tag = cpu_to_be64((u64)evt); |
| 1926 | rsp_len = mad->iu.rsp.len; | 1928 | rsp_len = be32_to_cpu(mad->iu.rsp.len); |
| 1927 | 1929 | ||
| 1928 | evt->sync_iu = &rsp_iu; | 1930 | evt->sync_iu = &rsp_iu; |
| 1929 | init_completion(&evt->comp); | 1931 | init_completion(&evt->comp); |
| @@ -1986,15 +1988,15 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) | |||
| 1986 | 1988 | ||
| 1987 | tmf = &evt->iu.cmd; | 1989 | tmf = &evt->iu.cmd; |
| 1988 | memset(tmf, 0, sizeof(*tmf)); | 1990 | memset(tmf, 0, sizeof(*tmf)); |
| 1989 | tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); | 1991 | tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); |
| 1990 | tmf->resp.len = sizeof(tmf->rsp); | 1992 | tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp)); |
| 1991 | tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; | 1993 | tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); |
| 1992 | tmf->payload_len = sizeof(tmf->iu); | 1994 | tmf->payload_len = cpu_to_be32(sizeof(tmf->iu)); |
| 1993 | tmf->resp_len = sizeof(tmf->rsp); | 1995 | tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp)); |
| 1994 | tmf->cancel_key = (unsigned long)sdev->hostdata; | 1996 | tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); |
| 1995 | tmf->tgt_scsi_id = rport->port_id; | 1997 | tmf->tgt_scsi_id = cpu_to_be64(rport->port_id); |
| 1996 | int_to_scsilun(sdev->lun, &tmf->iu.lun); | 1998 | int_to_scsilun(sdev->lun, &tmf->iu.lun); |
| 1997 | tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); | 1999 | tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); |
| 1998 | tmf->iu.tmf_flags = type; | 2000 | tmf->iu.tmf_flags = type; |
| 1999 | evt->sync_iu = &rsp_iu; | 2001 | evt->sync_iu = &rsp_iu; |
| 2000 | 2002 | ||
| @@ -2020,8 +2022,8 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) | |||
| 2020 | rsp_code = fc_rsp->data.info.rsp_code; | 2022 | rsp_code = fc_rsp->data.info.rsp_code; |
| 2021 | 2023 | ||
| 2022 | sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " | 2024 | sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " |
| 2023 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", | 2025 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, |
| 2024 | desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), | 2026 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), |
| 2025 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, | 2027 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, |
| 2026 | fc_rsp->scsi_status); | 2028 | fc_rsp->scsi_status); |
| 2027 | rsp_rc = -EIO; | 2029 | rsp_rc = -EIO; |
| @@ -2185,19 +2187,19 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) | |||
| 2185 | 2187 | ||
| 2186 | tmf = &evt->iu.tmf; | 2188 | tmf = &evt->iu.tmf; |
| 2187 | memset(tmf, 0, sizeof(*tmf)); | 2189 | memset(tmf, 0, sizeof(*tmf)); |
| 2188 | tmf->common.version = 1; | 2190 | tmf->common.version = cpu_to_be32(1); |
| 2189 | tmf->common.opcode = IBMVFC_TMF_MAD; | 2191 | tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); |
| 2190 | tmf->common.length = sizeof(*tmf); | 2192 | tmf->common.length = cpu_to_be16(sizeof(*tmf)); |
| 2191 | tmf->scsi_id = rport->port_id; | 2193 | tmf->scsi_id = cpu_to_be64(rport->port_id); |
| 2192 | int_to_scsilun(sdev->lun, &tmf->lun); | 2194 | int_to_scsilun(sdev->lun, &tmf->lun); |
| 2193 | if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) | 2195 | if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS)) |
| 2194 | type &= ~IBMVFC_TMF_SUPPRESS_ABTS; | 2196 | type &= ~IBMVFC_TMF_SUPPRESS_ABTS; |
| 2195 | if (vhost->state == IBMVFC_ACTIVE) | 2197 | if (vhost->state == IBMVFC_ACTIVE) |
| 2196 | tmf->flags = (type | IBMVFC_TMF_LUA_VALID); | 2198 | tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID)); |
| 2197 | else | 2199 | else |
| 2198 | tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); | 2200 | tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID)); |
| 2199 | tmf->cancel_key = (unsigned long)sdev->hostdata; | 2201 | tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); |
| 2200 | tmf->my_cancel_key = (unsigned long)starget->hostdata; | 2202 | tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata); |
| 2201 | 2203 | ||
| 2202 | evt->sync_iu = &rsp; | 2204 | evt->sync_iu = &rsp; |
| 2203 | init_completion(&evt->comp); | 2205 | init_completion(&evt->comp); |
| @@ -2217,7 +2219,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) | |||
| 2217 | sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); | 2219 | sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); |
| 2218 | 2220 | ||
| 2219 | wait_for_completion(&evt->comp); | 2221 | wait_for_completion(&evt->comp); |
| 2220 | status = rsp.mad_common.status; | 2222 | status = be16_to_cpu(rsp.mad_common.status); |
| 2221 | spin_lock_irqsave(vhost->host->host_lock, flags); | 2223 | spin_lock_irqsave(vhost->host->host_lock, flags); |
| 2222 | ibmvfc_free_event(evt); | 2224 | ibmvfc_free_event(evt); |
| 2223 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 2225 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
| @@ -2252,7 +2254,7 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) | |||
| 2252 | unsigned long cancel_key = (unsigned long)key; | 2254 | unsigned long cancel_key = (unsigned long)key; |
| 2253 | 2255 | ||
| 2254 | if (evt->crq.format == IBMVFC_CMD_FORMAT && | 2256 | if (evt->crq.format == IBMVFC_CMD_FORMAT && |
| 2255 | evt->iu.cmd.cancel_key == cancel_key) | 2257 | be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key) |
| 2256 | return 1; | 2258 | return 1; |
| 2257 | return 0; | 2259 | return 0; |
| 2258 | } | 2260 | } |
| @@ -2316,15 +2318,15 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) | |||
| 2316 | 2318 | ||
| 2317 | tmf = &evt->iu.cmd; | 2319 | tmf = &evt->iu.cmd; |
| 2318 | memset(tmf, 0, sizeof(*tmf)); | 2320 | memset(tmf, 0, sizeof(*tmf)); |
| 2319 | tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); | 2321 | tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); |
| 2320 | tmf->resp.len = sizeof(tmf->rsp); | 2322 | tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp)); |
| 2321 | tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; | 2323 | tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); |
| 2322 | tmf->payload_len = sizeof(tmf->iu); | 2324 | tmf->payload_len = cpu_to_be32(sizeof(tmf->iu)); |
| 2323 | tmf->resp_len = sizeof(tmf->rsp); | 2325 | tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp)); |
| 2324 | tmf->cancel_key = (unsigned long)sdev->hostdata; | 2326 | tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); |
| 2325 | tmf->tgt_scsi_id = rport->port_id; | 2327 | tmf->tgt_scsi_id = cpu_to_be64(rport->port_id); |
| 2326 | int_to_scsilun(sdev->lun, &tmf->iu.lun); | 2328 | int_to_scsilun(sdev->lun, &tmf->iu.lun); |
| 2327 | tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); | 2329 | tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); |
| 2328 | tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; | 2330 | tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; |
| 2329 | evt->sync_iu = &rsp_iu; | 2331 | evt->sync_iu = &rsp_iu; |
| 2330 | 2332 | ||
| @@ -2380,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) | |||
| 2380 | 2382 | ||
| 2381 | sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " | 2383 | sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " |
| 2382 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", | 2384 | "flags: %x fcp_rsp: %x, scsi_status: %x\n", |
| 2383 | ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), | 2385 | ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), |
| 2384 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, | 2386 | rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, |
| 2385 | fc_rsp->scsi_status); | 2387 | fc_rsp->scsi_status); |
| 2386 | rsp_rc = -EIO; | 2388 | rsp_rc = -EIO; |
| @@ -2641,14 +2643,14 @@ static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state) | |||
| 2641 | static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | 2643 | static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, |
| 2642 | struct ibmvfc_host *vhost) | 2644 | struct ibmvfc_host *vhost) |
| 2643 | { | 2645 | { |
| 2644 | const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event); | 2646 | const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); |
| 2645 | struct ibmvfc_target *tgt; | 2647 | struct ibmvfc_target *tgt; |
| 2646 | 2648 | ||
| 2647 | ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," | 2649 | ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," |
| 2648 | " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, | 2650 | " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, |
| 2649 | ibmvfc_get_link_state(crq->link_state)); | 2651 | ibmvfc_get_link_state(crq->link_state)); |
| 2650 | 2652 | ||
| 2651 | switch (crq->event) { | 2653 | switch (be64_to_cpu(crq->event)) { |
| 2652 | case IBMVFC_AE_RESUME: | 2654 | case IBMVFC_AE_RESUME: |
| 2653 | switch (crq->link_state) { | 2655 | switch (crq->link_state) { |
| 2654 | case IBMVFC_AE_LS_LINK_DOWN: | 2656 | case IBMVFC_AE_LS_LINK_DOWN: |
| @@ -2691,15 +2693,15 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
| 2691 | list_for_each_entry(tgt, &vhost->targets, queue) { | 2693 | list_for_each_entry(tgt, &vhost->targets, queue) { |
| 2692 | if (!crq->scsi_id && !crq->wwpn && !crq->node_name) | 2694 | if (!crq->scsi_id && !crq->wwpn && !crq->node_name) |
| 2693 | break; | 2695 | break; |
| 2694 | if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) | 2696 | if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) |
| 2695 | continue; | 2697 | continue; |
| 2696 | if (crq->wwpn && tgt->ids.port_name != crq->wwpn) | 2698 | if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) |
| 2697 | continue; | 2699 | continue; |
| 2698 | if (crq->node_name && tgt->ids.node_name != crq->node_name) | 2700 | if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) |
| 2699 | continue; | 2701 | continue; |
| 2700 | if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) | 2702 | if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) |
| 2701 | tgt->logo_rcvd = 1; | 2703 | tgt->logo_rcvd = 1; |
| 2702 | if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { | 2704 | if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { |
| 2703 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 2705 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 2704 | ibmvfc_reinit_host(vhost); | 2706 | ibmvfc_reinit_host(vhost); |
| 2705 | } | 2707 | } |
| @@ -2730,7 +2732,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
| 2730 | static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | 2732 | static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) |
| 2731 | { | 2733 | { |
| 2732 | long rc; | 2734 | long rc; |
| 2733 | struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba; | 2735 | struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); |
| 2734 | 2736 | ||
| 2735 | switch (crq->valid) { | 2737 | switch (crq->valid) { |
| 2736 | case IBMVFC_CRQ_INIT_RSP: | 2738 | case IBMVFC_CRQ_INIT_RSP: |
| @@ -3336,7 +3338,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
| 3336 | struct ibmvfc_host *vhost = evt->vhost; | 3338 | struct ibmvfc_host *vhost = evt->vhost; |
| 3337 | struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; | 3339 | struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; |
| 3338 | struct ibmvfc_prli_svc_parms *parms = &rsp->parms; | 3340 | struct ibmvfc_prli_svc_parms *parms = &rsp->parms; |
| 3339 | u32 status = rsp->common.status; | 3341 | u32 status = be16_to_cpu(rsp->common.status); |
| 3340 | int index, level = IBMVFC_DEFAULT_LOG_LEVEL; | 3342 | int index, level = IBMVFC_DEFAULT_LOG_LEVEL; |
| 3341 | 3343 | ||
| 3342 | vhost->discovery_threads--; | 3344 | vhost->discovery_threads--; |
| @@ -3347,14 +3349,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
| 3347 | parms->type, parms->flags, parms->service_parms); | 3349 | parms->type, parms->flags, parms->service_parms); |
| 3348 | 3350 | ||
| 3349 | if (parms->type == IBMVFC_SCSI_FCP_TYPE) { | 3351 | if (parms->type == IBMVFC_SCSI_FCP_TYPE) { |
| 3350 | index = ibmvfc_get_prli_rsp(parms->flags); | 3352 | index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags)); |
| 3351 | if (prli_rsp[index].logged_in) { | 3353 | if (prli_rsp[index].logged_in) { |
| 3352 | if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) { | 3354 | if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) { |
| 3353 | tgt->need_login = 0; | 3355 | tgt->need_login = 0; |
| 3354 | tgt->ids.roles = 0; | 3356 | tgt->ids.roles = 0; |
| 3355 | if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC) | 3357 | if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC) |
| 3356 | tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; | 3358 | tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; |
| 3357 | if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) | 3359 | if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC) |
| 3358 | tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; | 3360 | tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; |
| 3359 | tgt->add_rport = 1; | 3361 | tgt->add_rport = 1; |
| 3360 | } else | 3362 | } else |
| @@ -3373,17 +3375,18 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) | |||
| 3373 | break; | 3375 | break; |
| 3374 | case IBMVFC_MAD_FAILED: | 3376 | case IBMVFC_MAD_FAILED: |
| 3375 | default: | 3377 | default: |
| 3376 | if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) | 3378 | if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) && |
| 3379 | be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED) | ||
| 3377 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | 3380 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
| 3378 | else if (tgt->logo_rcvd) | 3381 | else if (tgt->logo_rcvd) |
| 3379 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | 3382 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
| 3380 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3383 | else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) |
| 3381 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); | 3384 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); |
| 3382 | else | 3385 | else |
| 3383 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3386 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3384 | 3387 | ||
| 3385 | tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", | 3388 | tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", |
| 3386 | ibmvfc_get_cmd_error(rsp->status, rsp->error), | 3389 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3387 | rsp->status, rsp->error, status); | 3390 | rsp->status, rsp->error, status); |
| 3388 | break; | 3391 | break; |
| 3389 | }; | 3392 | }; |
| @@ -3414,14 +3417,14 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt) | |||
| 3414 | evt->tgt = tgt; | 3417 | evt->tgt = tgt; |
| 3415 | prli = &evt->iu.prli; | 3418 | prli = &evt->iu.prli; |
| 3416 | memset(prli, 0, sizeof(*prli)); | 3419 | memset(prli, 0, sizeof(*prli)); |
| 3417 | prli->common.version = 1; | 3420 | prli->common.version = cpu_to_be32(1); |
| 3418 | prli->common.opcode = IBMVFC_PROCESS_LOGIN; | 3421 | prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN); |
| 3419 | prli->common.length = sizeof(*prli); | 3422 | prli->common.length = cpu_to_be16(sizeof(*prli)); |
| 3420 | prli->scsi_id = tgt->scsi_id; | 3423 | prli->scsi_id = cpu_to_be64(tgt->scsi_id); |
| 3421 | 3424 | ||
| 3422 | prli->parms.type = IBMVFC_SCSI_FCP_TYPE; | 3425 | prli->parms.type = IBMVFC_SCSI_FCP_TYPE; |
| 3423 | prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR; | 3426 | prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR); |
| 3424 | prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC; | 3427 | prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC); |
| 3425 | 3428 | ||
| 3426 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3429 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
| 3427 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { | 3430 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { |
| @@ -3442,7 +3445,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) | |||
| 3442 | struct ibmvfc_target *tgt = evt->tgt; | 3445 | struct ibmvfc_target *tgt = evt->tgt; |
| 3443 | struct ibmvfc_host *vhost = evt->vhost; | 3446 | struct ibmvfc_host *vhost = evt->vhost; |
| 3444 | struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; | 3447 | struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; |
| 3445 | u32 status = rsp->common.status; | 3448 | u32 status = be16_to_cpu(rsp->common.status); |
| 3446 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | 3449 | int level = IBMVFC_DEFAULT_LOG_LEVEL; |
| 3447 | 3450 | ||
| 3448 | vhost->discovery_threads--; | 3451 | vhost->discovery_threads--; |
| @@ -3472,15 +3475,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) | |||
| 3472 | break; | 3475 | break; |
| 3473 | case IBMVFC_MAD_FAILED: | 3476 | case IBMVFC_MAD_FAILED: |
| 3474 | default: | 3477 | default: |
| 3475 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3478 | if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) |
| 3476 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); | 3479 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); |
| 3477 | else | 3480 | else |
| 3478 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3481 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3479 | 3482 | ||
| 3480 | tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3483 | tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3481 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | 3484 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, |
| 3482 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | 3485 | ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, |
| 3483 | ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); | 3486 | ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); |
| 3484 | break; | 3487 | break; |
| 3485 | }; | 3488 | }; |
| 3486 | 3489 | ||
| @@ -3512,10 +3515,10 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) | |||
| 3512 | evt->tgt = tgt; | 3515 | evt->tgt = tgt; |
| 3513 | plogi = &evt->iu.plogi; | 3516 | plogi = &evt->iu.plogi; |
| 3514 | memset(plogi, 0, sizeof(*plogi)); | 3517 | memset(plogi, 0, sizeof(*plogi)); |
| 3515 | plogi->common.version = 1; | 3518 | plogi->common.version = cpu_to_be32(1); |
| 3516 | plogi->common.opcode = IBMVFC_PORT_LOGIN; | 3519 | plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); |
| 3517 | plogi->common.length = sizeof(*plogi); | 3520 | plogi->common.length = cpu_to_be16(sizeof(*plogi)); |
| 3518 | plogi->scsi_id = tgt->scsi_id; | 3521 | plogi->scsi_id = cpu_to_be64(tgt->scsi_id); |
| 3519 | 3522 | ||
| 3520 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { | 3523 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { |
| 3521 | vhost->discovery_threads--; | 3524 | vhost->discovery_threads--; |
| @@ -3535,7 +3538,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) | |||
| 3535 | struct ibmvfc_target *tgt = evt->tgt; | 3538 | struct ibmvfc_target *tgt = evt->tgt; |
| 3536 | struct ibmvfc_host *vhost = evt->vhost; | 3539 | struct ibmvfc_host *vhost = evt->vhost; |
| 3537 | struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; | 3540 | struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; |
| 3538 | u32 status = rsp->common.status; | 3541 | u32 status = be16_to_cpu(rsp->common.status); |
| 3539 | 3542 | ||
| 3540 | vhost->discovery_threads--; | 3543 | vhost->discovery_threads--; |
| 3541 | ibmvfc_free_event(evt); | 3544 | ibmvfc_free_event(evt); |
| @@ -3585,10 +3588,10 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) | |||
| 3585 | evt->tgt = tgt; | 3588 | evt->tgt = tgt; |
| 3586 | mad = &evt->iu.implicit_logout; | 3589 | mad = &evt->iu.implicit_logout; |
| 3587 | memset(mad, 0, sizeof(*mad)); | 3590 | memset(mad, 0, sizeof(*mad)); |
| 3588 | mad->common.version = 1; | 3591 | mad->common.version = cpu_to_be32(1); |
| 3589 | mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT; | 3592 | mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT); |
| 3590 | mad->common.length = sizeof(*mad); | 3593 | mad->common.length = cpu_to_be16(sizeof(*mad)); |
| 3591 | mad->old_scsi_id = tgt->scsi_id; | 3594 | mad->old_scsi_id = cpu_to_be64(tgt->scsi_id); |
| 3592 | 3595 | ||
| 3593 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3596 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
| 3594 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { | 3597 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { |
| @@ -3616,7 +3619,7 @@ static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, | |||
| 3616 | if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name, | 3619 | if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name, |
| 3617 | sizeof(tgt->ids.node_name))) | 3620 | sizeof(tgt->ids.node_name))) |
| 3618 | return 1; | 3621 | return 1; |
| 3619 | if (mad->fc_iu.response[6] != tgt->scsi_id) | 3622 | if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id) |
| 3620 | return 1; | 3623 | return 1; |
| 3621 | return 0; | 3624 | return 0; |
| 3622 | } | 3625 | } |
| @@ -3631,7 +3634,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) | |||
| 3631 | struct ibmvfc_target *tgt = evt->tgt; | 3634 | struct ibmvfc_target *tgt = evt->tgt; |
| 3632 | struct ibmvfc_host *vhost = evt->vhost; | 3635 | struct ibmvfc_host *vhost = evt->vhost; |
| 3633 | struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; | 3636 | struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; |
| 3634 | u32 status = mad->common.status; | 3637 | u32 status = be16_to_cpu(mad->common.status); |
| 3635 | u8 fc_reason, fc_explain; | 3638 | u8 fc_reason, fc_explain; |
| 3636 | 3639 | ||
| 3637 | vhost->discovery_threads--; | 3640 | vhost->discovery_threads--; |
| @@ -3649,10 +3652,10 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) | |||
| 3649 | case IBMVFC_MAD_FAILED: | 3652 | case IBMVFC_MAD_FAILED: |
| 3650 | default: | 3653 | default: |
| 3651 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3654 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3652 | fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; | 3655 | fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16; |
| 3653 | fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; | 3656 | fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; |
| 3654 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3657 | tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3655 | ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error), | 3658 | ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), |
| 3656 | mad->iu.status, mad->iu.error, | 3659 | mad->iu.status, mad->iu.error, |
| 3657 | ibmvfc_get_fc_type(fc_reason), fc_reason, | 3660 | ibmvfc_get_fc_type(fc_reason), fc_reason, |
| 3658 | ibmvfc_get_ls_explain(fc_explain), fc_explain, status); | 3661 | ibmvfc_get_ls_explain(fc_explain), fc_explain, status); |
| @@ -3674,22 +3677,22 @@ static void ibmvfc_init_passthru(struct ibmvfc_event *evt) | |||
| 3674 | struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; | 3677 | struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; |
| 3675 | 3678 | ||
| 3676 | memset(mad, 0, sizeof(*mad)); | 3679 | memset(mad, 0, sizeof(*mad)); |
| 3677 | mad->common.version = 1; | 3680 | mad->common.version = cpu_to_be32(1); |
| 3678 | mad->common.opcode = IBMVFC_PASSTHRU; | 3681 | mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); |
| 3679 | mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); | 3682 | mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); |
| 3680 | mad->cmd_ioba.va = (u64)evt->crq.ioba + | 3683 | mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + |
| 3681 | offsetof(struct ibmvfc_passthru_mad, iu); | 3684 | offsetof(struct ibmvfc_passthru_mad, iu)); |
| 3682 | mad->cmd_ioba.len = sizeof(mad->iu); | 3685 | mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); |
| 3683 | mad->iu.cmd_len = sizeof(mad->fc_iu.payload); | 3686 | mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload)); |
| 3684 | mad->iu.rsp_len = sizeof(mad->fc_iu.response); | 3687 | mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response)); |
| 3685 | mad->iu.cmd.va = (u64)evt->crq.ioba + | 3688 | mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + |
| 3686 | offsetof(struct ibmvfc_passthru_mad, fc_iu) + | 3689 | offsetof(struct ibmvfc_passthru_mad, fc_iu) + |
| 3687 | offsetof(struct ibmvfc_passthru_fc_iu, payload); | 3690 | offsetof(struct ibmvfc_passthru_fc_iu, payload)); |
| 3688 | mad->iu.cmd.len = sizeof(mad->fc_iu.payload); | 3691 | mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload)); |
| 3689 | mad->iu.rsp.va = (u64)evt->crq.ioba + | 3692 | mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + |
| 3690 | offsetof(struct ibmvfc_passthru_mad, fc_iu) + | 3693 | offsetof(struct ibmvfc_passthru_mad, fc_iu) + |
| 3691 | offsetof(struct ibmvfc_passthru_fc_iu, response); | 3694 | offsetof(struct ibmvfc_passthru_fc_iu, response)); |
| 3692 | mad->iu.rsp.len = sizeof(mad->fc_iu.response); | 3695 | mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response)); |
| 3693 | } | 3696 | } |
| 3694 | 3697 | ||
| 3695 | /** | 3698 | /** |
| @@ -3748,11 +3751,11 @@ static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt) | |||
| 3748 | evt->tgt = tgt; | 3751 | evt->tgt = tgt; |
| 3749 | tmf = &evt->iu.tmf; | 3752 | tmf = &evt->iu.tmf; |
| 3750 | memset(tmf, 0, sizeof(*tmf)); | 3753 | memset(tmf, 0, sizeof(*tmf)); |
| 3751 | tmf->common.version = 1; | 3754 | tmf->common.version = cpu_to_be32(1); |
| 3752 | tmf->common.opcode = IBMVFC_TMF_MAD; | 3755 | tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); |
| 3753 | tmf->common.length = sizeof(*tmf); | 3756 | tmf->common.length = cpu_to_be16(sizeof(*tmf)); |
| 3754 | tmf->scsi_id = tgt->scsi_id; | 3757 | tmf->scsi_id = cpu_to_be64(tgt->scsi_id); |
| 3755 | tmf->cancel_key = tgt->cancel_key; | 3758 | tmf->cancel_key = cpu_to_be32(tgt->cancel_key); |
| 3756 | 3759 | ||
| 3757 | rc = ibmvfc_send_event(evt, vhost, default_timeout); | 3760 | rc = ibmvfc_send_event(evt, vhost, default_timeout); |
| 3758 | 3761 | ||
| @@ -3794,16 +3797,16 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) | |||
| 3794 | 3797 | ||
| 3795 | ibmvfc_init_passthru(evt); | 3798 | ibmvfc_init_passthru(evt); |
| 3796 | mad = &evt->iu.passthru; | 3799 | mad = &evt->iu.passthru; |
| 3797 | mad->iu.flags = IBMVFC_FC_ELS; | 3800 | mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS); |
| 3798 | mad->iu.scsi_id = tgt->scsi_id; | 3801 | mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id); |
| 3799 | mad->iu.cancel_key = tgt->cancel_key; | 3802 | mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key); |
| 3800 | 3803 | ||
| 3801 | mad->fc_iu.payload[0] = IBMVFC_ADISC; | 3804 | mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC); |
| 3802 | memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, | 3805 | memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, |
| 3803 | sizeof(vhost->login_buf->resp.port_name)); | 3806 | sizeof(vhost->login_buf->resp.port_name)); |
| 3804 | memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, | 3807 | memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, |
| 3805 | sizeof(vhost->login_buf->resp.node_name)); | 3808 | sizeof(vhost->login_buf->resp.node_name)); |
| 3806 | mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff; | 3809 | mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff); |
| 3807 | 3810 | ||
| 3808 | if (timer_pending(&tgt->timer)) | 3811 | if (timer_pending(&tgt->timer)) |
| 3809 | mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); | 3812 | mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); |
| @@ -3834,7 +3837,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
| 3834 | struct ibmvfc_target *tgt = evt->tgt; | 3837 | struct ibmvfc_target *tgt = evt->tgt; |
| 3835 | struct ibmvfc_host *vhost = evt->vhost; | 3838 | struct ibmvfc_host *vhost = evt->vhost; |
| 3836 | struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; | 3839 | struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; |
| 3837 | u32 status = rsp->common.status; | 3840 | u32 status = be16_to_cpu(rsp->common.status); |
| 3838 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | 3841 | int level = IBMVFC_DEFAULT_LOG_LEVEL; |
| 3839 | 3842 | ||
| 3840 | vhost->discovery_threads--; | 3843 | vhost->discovery_threads--; |
| @@ -3842,8 +3845,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
| 3842 | switch (status) { | 3845 | switch (status) { |
| 3843 | case IBMVFC_MAD_SUCCESS: | 3846 | case IBMVFC_MAD_SUCCESS: |
| 3844 | tgt_dbg(tgt, "Query Target succeeded\n"); | 3847 | tgt_dbg(tgt, "Query Target succeeded\n"); |
| 3845 | tgt->new_scsi_id = rsp->scsi_id; | 3848 | tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id); |
| 3846 | if (rsp->scsi_id != tgt->scsi_id) | 3849 | if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id) |
| 3847 | ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); | 3850 | ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); |
| 3848 | else | 3851 | else |
| 3849 | ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); | 3852 | ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); |
| @@ -3855,19 +3858,20 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) | |||
| 3855 | break; | 3858 | break; |
| 3856 | case IBMVFC_MAD_FAILED: | 3859 | case IBMVFC_MAD_FAILED: |
| 3857 | default: | 3860 | default: |
| 3858 | if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && | 3861 | if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && |
| 3859 | rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && | 3862 | be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ && |
| 3860 | rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) | 3863 | be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG) |
| 3861 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3864 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3862 | else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 3865 | else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) |
| 3863 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); | 3866 | level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); |
| 3864 | else | 3867 | else |
| 3865 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); | 3868 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); |
| 3866 | 3869 | ||
| 3867 | tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", | 3870 | tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", |
| 3868 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, | 3871 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 3869 | ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, | 3872 | rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), |
| 3870 | ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); | 3873 | rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), |
| 3874 | rsp->fc_explain, status); | ||
| 3871 | break; | 3875 | break; |
| 3872 | }; | 3876 | }; |
| 3873 | 3877 | ||
| @@ -3897,10 +3901,10 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) | |||
| 3897 | ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); | 3901 | ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); |
| 3898 | query_tgt = &evt->iu.query_tgt; | 3902 | query_tgt = &evt->iu.query_tgt; |
| 3899 | memset(query_tgt, 0, sizeof(*query_tgt)); | 3903 | memset(query_tgt, 0, sizeof(*query_tgt)); |
| 3900 | query_tgt->common.version = 1; | 3904 | query_tgt->common.version = cpu_to_be32(1); |
| 3901 | query_tgt->common.opcode = IBMVFC_QUERY_TARGET; | 3905 | query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET); |
| 3902 | query_tgt->common.length = sizeof(*query_tgt); | 3906 | query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt)); |
| 3903 | query_tgt->wwpn = tgt->ids.port_name; | 3907 | query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name); |
| 3904 | 3908 | ||
| 3905 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); | 3909 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); |
| 3906 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { | 3910 | if (ibmvfc_send_event(evt, vhost, default_timeout)) { |
| @@ -3971,7 +3975,8 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost) | |||
| 3971 | 3975 | ||
| 3972 | for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) | 3976 | for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) |
| 3973 | rc = ibmvfc_alloc_target(vhost, | 3977 | rc = ibmvfc_alloc_target(vhost, |
| 3974 | vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK); | 3978 | be32_to_cpu(vhost->disc_buf->scsi_id[i]) & |
| 3979 | IBMVFC_DISC_TGT_SCSI_ID_MASK); | ||
| 3975 | 3980 | ||
| 3976 | return rc; | 3981 | return rc; |
| 3977 | } | 3982 | } |
| @@ -3985,19 +3990,20 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) | |||
| 3985 | { | 3990 | { |
| 3986 | struct ibmvfc_host *vhost = evt->vhost; | 3991 | struct ibmvfc_host *vhost = evt->vhost; |
| 3987 | struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; | 3992 | struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; |
| 3988 | u32 mad_status = rsp->common.status; | 3993 | u32 mad_status = be16_to_cpu(rsp->common.status); |
| 3989 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | 3994 | int level = IBMVFC_DEFAULT_LOG_LEVEL; |
| 3990 | 3995 | ||
| 3991 | switch (mad_status) { | 3996 | switch (mad_status) { |
| 3992 | case IBMVFC_MAD_SUCCESS: | 3997 | case IBMVFC_MAD_SUCCESS: |
| 3993 | ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); | 3998 | ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); |
| 3994 | vhost->num_targets = rsp->num_written; | 3999 | vhost->num_targets = be32_to_cpu(rsp->num_written); |
| 3995 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); | 4000 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); |
| 3996 | break; | 4001 | break; |
| 3997 | case IBMVFC_MAD_FAILED: | 4002 | case IBMVFC_MAD_FAILED: |
| 3998 | level += ibmvfc_retry_host_init(vhost); | 4003 | level += ibmvfc_retry_host_init(vhost); |
| 3999 | ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", | 4004 | ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", |
| 4000 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); | 4005 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 4006 | rsp->status, rsp->error); | ||
| 4001 | break; | 4007 | break; |
| 4002 | case IBMVFC_MAD_DRIVER_FAILED: | 4008 | case IBMVFC_MAD_DRIVER_FAILED: |
| 4003 | break; | 4009 | break; |
| @@ -4024,12 +4030,12 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) | |||
| 4024 | ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); | 4030 | ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); |
| 4025 | mad = &evt->iu.discover_targets; | 4031 | mad = &evt->iu.discover_targets; |
| 4026 | memset(mad, 0, sizeof(*mad)); | 4032 | memset(mad, 0, sizeof(*mad)); |
| 4027 | mad->common.version = 1; | 4033 | mad->common.version = cpu_to_be32(1); |
| 4028 | mad->common.opcode = IBMVFC_DISC_TARGETS; | 4034 | mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS); |
| 4029 | mad->common.length = sizeof(*mad); | 4035 | mad->common.length = cpu_to_be16(sizeof(*mad)); |
| 4030 | mad->bufflen = vhost->disc_buf_sz; | 4036 | mad->bufflen = cpu_to_be32(vhost->disc_buf_sz); |
| 4031 | mad->buffer.va = vhost->disc_buf_dma; | 4037 | mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma); |
| 4032 | mad->buffer.len = vhost->disc_buf_sz; | 4038 | mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz); |
| 4033 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); | 4039 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); |
| 4034 | 4040 | ||
| 4035 | if (!ibmvfc_send_event(evt, vhost, default_timeout)) | 4041 | if (!ibmvfc_send_event(evt, vhost, default_timeout)) |
| @@ -4046,7 +4052,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) | |||
| 4046 | static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | 4052 | static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) |
| 4047 | { | 4053 | { |
| 4048 | struct ibmvfc_host *vhost = evt->vhost; | 4054 | struct ibmvfc_host *vhost = evt->vhost; |
| 4049 | u32 mad_status = evt->xfer_iu->npiv_login.common.status; | 4055 | u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status); |
| 4050 | struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; | 4056 | struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; |
| 4051 | unsigned int npiv_max_sectors; | 4057 | unsigned int npiv_max_sectors; |
| 4052 | int level = IBMVFC_DEFAULT_LOG_LEVEL; | 4058 | int level = IBMVFC_DEFAULT_LOG_LEVEL; |
| @@ -4056,12 +4062,13 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
| 4056 | ibmvfc_free_event(evt); | 4062 | ibmvfc_free_event(evt); |
| 4057 | break; | 4063 | break; |
| 4058 | case IBMVFC_MAD_FAILED: | 4064 | case IBMVFC_MAD_FAILED: |
| 4059 | if (ibmvfc_retry_cmd(rsp->status, rsp->error)) | 4065 | if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) |
| 4060 | level += ibmvfc_retry_host_init(vhost); | 4066 | level += ibmvfc_retry_host_init(vhost); |
| 4061 | else | 4067 | else |
| 4062 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | 4068 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); |
| 4063 | ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", | 4069 | ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", |
| 4064 | ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); | 4070 | ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), |
| 4071 | rsp->status, rsp->error); | ||
| 4065 | ibmvfc_free_event(evt); | 4072 | ibmvfc_free_event(evt); |
| 4066 | return; | 4073 | return; |
| 4067 | case IBMVFC_MAD_CRQ_ERROR: | 4074 | case IBMVFC_MAD_CRQ_ERROR: |
| @@ -4078,7 +4085,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
| 4078 | 4085 | ||
| 4079 | vhost->client_migrated = 0; | 4086 | vhost->client_migrated = 0; |
| 4080 | 4087 | ||
| 4081 | if (!(rsp->flags & IBMVFC_NATIVE_FC)) { | 4088 | if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) { |
| 4082 | dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", | 4089 | dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", |
| 4083 | rsp->flags); | 4090 | rsp->flags); |
| 4084 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | 4091 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); |
| @@ -4086,7 +4093,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
| 4086 | return; | 4093 | return; |
| 4087 | } | 4094 | } |
| 4088 | 4095 | ||
| 4089 | if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) { | 4096 | if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) { |
| 4090 | dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", | 4097 | dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", |
| 4091 | rsp->max_cmds); | 4098 | rsp->max_cmds); |
| 4092 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); | 4099 | ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); |
| @@ -4095,27 +4102,27 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) | |||
| 4095 | } | 4102 | } |
| 4096 | 4103 | ||
| 4097 | vhost->logged_in = 1; | 4104 | vhost->logged_in = 1; |
| 4098 | npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); | 4105 | npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); |
| 4099 | dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", | 4106 | dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", |
| 4100 | rsp->partition_name, rsp->device_name, rsp->port_loc_code, | 4107 | rsp->partition_name, rsp->device_name, rsp->port_loc_code, |
| 4101 | rsp->drc_name, npiv_max_sectors); | 4108 | rsp->drc_name, npiv_max_sectors); |
| 4102 | 4109 | ||
| 4103 | fc_host_fabric_name(vhost->host) = rsp->node_name; | 4110 | fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name); |
| 4104 | fc_host_node_name(vhost->host) = rsp->node_name; | 4111 | fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name); |
| 4105 | fc_host_port_name(vhost->host) = rsp->port_name; | 4112 | fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name); |
| 4106 | fc_host_port_id(vhost->host) = rsp->scsi_id; | 4113 | fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id); |
| 4107 | fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; | 4114 | fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; |
| 4108 | fc_host_supported_classes(vhost->host) = 0; | 4115 | fc_host_supported_classes(vhost->host) = 0; |
| 4109 | if (rsp->service_parms.class1_parms[0] & 0x80000000) | 4116 | if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000) |
| 4110 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; | 4117 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; |
| 4111 | if (rsp->service_parms.class2_parms[0] & 0x80000000) | 4118 | if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000) |
| 4112 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; | 4119 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; |
| 4113 | if (rsp->service_parms.class3_parms[0] & 0x80000000) | 4120 | if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000) |
| 4114 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; | 4121 | fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; |
| 4115 | fc_host_maxframe_size(vhost->host) = | 4122 | fc_host_maxframe_size(vhost->host) = |
| 4116 | rsp->service_parms.common.bb_rcv_sz & 0x0fff; | 4123 | be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff; |
| 4117 | 4124 | ||
| 4118 | vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ; | 4125 | vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ; |
| 4119 | vhost->host->max_sectors = npiv_max_sectors; | 4126 | vhost->host->max_sectors = npiv_max_sectors; |
| 4120 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); | 4127 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); |
| 4121 | wake_up(&vhost->work_wait_q); | 4128 | wake_up(&vhost->work_wait_q); |
| @@ -4138,11 +4145,11 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) | |||
| 4138 | memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); | 4145 | memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); |
| 4139 | mad = &evt->iu.npiv_login; | 4146 | mad = &evt->iu.npiv_login; |
| 4140 | memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); | 4147 | memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); |
| 4141 | mad->common.version = 1; | 4148 | mad->common.version = cpu_to_be32(1); |
| 4142 | mad->common.opcode = IBMVFC_NPIV_LOGIN; | 4149 | mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN); |
| 4143 | mad->common.length = sizeof(struct ibmvfc_npiv_login_mad); | 4150 | mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad)); |
| 4144 | mad->buffer.va = vhost->login_buf_dma; | 4151 | mad->buffer.va = cpu_to_be64(vhost->login_buf_dma); |
| 4145 | mad->buffer.len = sizeof(*vhost->login_buf); | 4152 | mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf)); |
| 4146 | 4153 | ||
| 4147 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); | 4154 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); |
| 4148 | 4155 | ||
| @@ -4160,7 +4167,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) | |||
| 4160 | static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) | 4167 | static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) |
| 4161 | { | 4168 | { |
| 4162 | struct ibmvfc_host *vhost = evt->vhost; | 4169 | struct ibmvfc_host *vhost = evt->vhost; |
| 4163 | u32 mad_status = evt->xfer_iu->npiv_logout.common.status; | 4170 | u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status); |
| 4164 | 4171 | ||
| 4165 | ibmvfc_free_event(evt); | 4172 | ibmvfc_free_event(evt); |
| 4166 | 4173 | ||
| @@ -4199,9 +4206,9 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) | |||
| 4199 | 4206 | ||
| 4200 | mad = &evt->iu.npiv_logout; | 4207 | mad = &evt->iu.npiv_logout; |
| 4201 | memset(mad, 0, sizeof(*mad)); | 4208 | memset(mad, 0, sizeof(*mad)); |
| 4202 | mad->common.version = 1; | 4209 | mad->common.version = cpu_to_be32(1); |
| 4203 | mad->common.opcode = IBMVFC_NPIV_LOGOUT; | 4210 | mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT); |
| 4204 | mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); | 4211 | mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad)); |
| 4205 | 4212 | ||
| 4206 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); | 4213 | ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); |
| 4207 | 4214 | ||
| @@ -4343,14 +4350,14 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | |||
| 4343 | if (rport) { | 4350 | if (rport) { |
| 4344 | tgt_dbg(tgt, "rport add succeeded\n"); | 4351 | tgt_dbg(tgt, "rport add succeeded\n"); |
| 4345 | tgt->rport = rport; | 4352 | tgt->rport = rport; |
| 4346 | rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; | 4353 | rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff; |
| 4347 | rport->supported_classes = 0; | 4354 | rport->supported_classes = 0; |
| 4348 | tgt->target_id = rport->scsi_target_id; | 4355 | tgt->target_id = rport->scsi_target_id; |
| 4349 | if (tgt->service_parms.class1_parms[0] & 0x80000000) | 4356 | if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000) |
| 4350 | rport->supported_classes |= FC_COS_CLASS1; | 4357 | rport->supported_classes |= FC_COS_CLASS1; |
| 4351 | if (tgt->service_parms.class2_parms[0] & 0x80000000) | 4358 | if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000) |
| 4352 | rport->supported_classes |= FC_COS_CLASS2; | 4359 | rport->supported_classes |= FC_COS_CLASS2; |
| 4353 | if (tgt->service_parms.class3_parms[0] & 0x80000000) | 4360 | if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000) |
| 4354 | rport->supported_classes |= FC_COS_CLASS3; | 4361 | rport->supported_classes |= FC_COS_CLASS3; |
| 4355 | if (rport->rqst_q) | 4362 | if (rport->rqst_q) |
| 4356 | blk_queue_max_segments(rport->rqst_q, 1); | 4363 | blk_queue_max_segments(rport->rqst_q, 1); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 017a5290e8c1..8fae03215a85 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
| @@ -135,12 +135,12 @@ enum ibmvfc_mad_types { | |||
| 135 | }; | 135 | }; |
| 136 | 136 | ||
| 137 | struct ibmvfc_mad_common { | 137 | struct ibmvfc_mad_common { |
| 138 | u32 version; | 138 | __be32 version; |
| 139 | u32 reserved; | 139 | __be32 reserved; |
| 140 | u32 opcode; | 140 | __be32 opcode; |
| 141 | u16 status; | 141 | __be16 status; |
| 142 | u16 length; | 142 | __be16 length; |
| 143 | u64 tag; | 143 | __be64 tag; |
| 144 | }__attribute__((packed, aligned (8))); | 144 | }__attribute__((packed, aligned (8))); |
| 145 | 145 | ||
| 146 | struct ibmvfc_npiv_login_mad { | 146 | struct ibmvfc_npiv_login_mad { |
| @@ -155,76 +155,76 @@ struct ibmvfc_npiv_logout_mad { | |||
| 155 | #define IBMVFC_MAX_NAME 256 | 155 | #define IBMVFC_MAX_NAME 256 |
| 156 | 156 | ||
| 157 | struct ibmvfc_npiv_login { | 157 | struct ibmvfc_npiv_login { |
| 158 | u32 ostype; | 158 | __be32 ostype; |
| 159 | #define IBMVFC_OS_LINUX 0x02 | 159 | #define IBMVFC_OS_LINUX 0x02 |
| 160 | u32 pad; | 160 | __be32 pad; |
| 161 | u64 max_dma_len; | 161 | __be64 max_dma_len; |
| 162 | u32 max_payload; | 162 | __be32 max_payload; |
| 163 | u32 max_response; | 163 | __be32 max_response; |
| 164 | u32 partition_num; | 164 | __be32 partition_num; |
| 165 | u32 vfc_frame_version; | 165 | __be32 vfc_frame_version; |
| 166 | u16 fcp_version; | 166 | __be16 fcp_version; |
| 167 | u16 flags; | 167 | __be16 flags; |
| 168 | #define IBMVFC_CLIENT_MIGRATED 0x01 | 168 | #define IBMVFC_CLIENT_MIGRATED 0x01 |
| 169 | #define IBMVFC_FLUSH_ON_HALT 0x02 | 169 | #define IBMVFC_FLUSH_ON_HALT 0x02 |
| 170 | u32 max_cmds; | 170 | __be32 max_cmds; |
| 171 | u64 capabilities; | 171 | __be64 capabilities; |
| 172 | #define IBMVFC_CAN_MIGRATE 0x01 | 172 | #define IBMVFC_CAN_MIGRATE 0x01 |
| 173 | u64 node_name; | 173 | __be64 node_name; |
| 174 | struct srp_direct_buf async; | 174 | struct srp_direct_buf async; |
| 175 | u8 partition_name[IBMVFC_MAX_NAME]; | 175 | u8 partition_name[IBMVFC_MAX_NAME]; |
| 176 | u8 device_name[IBMVFC_MAX_NAME]; | 176 | u8 device_name[IBMVFC_MAX_NAME]; |
| 177 | u8 drc_name[IBMVFC_MAX_NAME]; | 177 | u8 drc_name[IBMVFC_MAX_NAME]; |
| 178 | u64 reserved2[2]; | 178 | __be64 reserved2[2]; |
| 179 | }__attribute__((packed, aligned (8))); | 179 | }__attribute__((packed, aligned (8))); |
| 180 | 180 | ||
| 181 | struct ibmvfc_common_svc_parms { | 181 | struct ibmvfc_common_svc_parms { |
| 182 | u16 fcph_version; | 182 | __be16 fcph_version; |
| 183 | u16 b2b_credit; | 183 | __be16 b2b_credit; |
| 184 | u16 features; | 184 | __be16 features; |
| 185 | u16 bb_rcv_sz; /* upper nibble is BB_SC_N */ | 185 | __be16 bb_rcv_sz; /* upper nibble is BB_SC_N */ |
| 186 | u32 ratov; | 186 | __be32 ratov; |
| 187 | u32 edtov; | 187 | __be32 edtov; |
| 188 | }__attribute__((packed, aligned (4))); | 188 | }__attribute__((packed, aligned (4))); |
| 189 | 189 | ||
| 190 | struct ibmvfc_service_parms { | 190 | struct ibmvfc_service_parms { |
| 191 | struct ibmvfc_common_svc_parms common; | 191 | struct ibmvfc_common_svc_parms common; |
| 192 | u8 port_name[8]; | 192 | u8 port_name[8]; |
| 193 | u8 node_name[8]; | 193 | u8 node_name[8]; |
| 194 | u32 class1_parms[4]; | 194 | __be32 class1_parms[4]; |
| 195 | u32 class2_parms[4]; | 195 | __be32 class2_parms[4]; |
| 196 | u32 class3_parms[4]; | 196 | __be32 class3_parms[4]; |
| 197 | u32 obsolete[4]; | 197 | __be32 obsolete[4]; |
| 198 | u32 vendor_version[4]; | 198 | __be32 vendor_version[4]; |
| 199 | u32 services_avail[2]; | 199 | __be32 services_avail[2]; |
| 200 | u32 ext_len; | 200 | __be32 ext_len; |
| 201 | u32 reserved[30]; | 201 | __be32 reserved[30]; |
| 202 | u32 clk_sync_qos[2]; | 202 | __be32 clk_sync_qos[2]; |
| 203 | }__attribute__((packed, aligned (4))); | 203 | }__attribute__((packed, aligned (4))); |
| 204 | 204 | ||
| 205 | struct ibmvfc_npiv_login_resp { | 205 | struct ibmvfc_npiv_login_resp { |
| 206 | u32 version; | 206 | __be32 version; |
| 207 | u16 status; | 207 | __be16 status; |
| 208 | u16 error; | 208 | __be16 error; |
| 209 | u32 flags; | 209 | __be32 flags; |
| 210 | #define IBMVFC_NATIVE_FC 0x01 | 210 | #define IBMVFC_NATIVE_FC 0x01 |
| 211 | u32 reserved; | 211 | __be32 reserved; |
| 212 | u64 capabilities; | 212 | __be64 capabilities; |
| 213 | #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 | 213 | #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 |
| 214 | #define IBMVFC_CAN_SUPPRESS_ABTS 0x10 | 214 | #define IBMVFC_CAN_SUPPRESS_ABTS 0x10 |
| 215 | u32 max_cmds; | 215 | __be32 max_cmds; |
| 216 | u32 scsi_id_sz; | 216 | __be32 scsi_id_sz; |
| 217 | u64 max_dma_len; | 217 | __be64 max_dma_len; |
| 218 | u64 scsi_id; | 218 | __be64 scsi_id; |
| 219 | u64 port_name; | 219 | __be64 port_name; |
| 220 | u64 node_name; | 220 | __be64 node_name; |
| 221 | u64 link_speed; | 221 | __be64 link_speed; |
| 222 | u8 partition_name[IBMVFC_MAX_NAME]; | 222 | u8 partition_name[IBMVFC_MAX_NAME]; |
| 223 | u8 device_name[IBMVFC_MAX_NAME]; | 223 | u8 device_name[IBMVFC_MAX_NAME]; |
| 224 | u8 port_loc_code[IBMVFC_MAX_NAME]; | 224 | u8 port_loc_code[IBMVFC_MAX_NAME]; |
| 225 | u8 drc_name[IBMVFC_MAX_NAME]; | 225 | u8 drc_name[IBMVFC_MAX_NAME]; |
| 226 | struct ibmvfc_service_parms service_parms; | 226 | struct ibmvfc_service_parms service_parms; |
| 227 | u64 reserved2; | 227 | __be64 reserved2; |
| 228 | }__attribute__((packed, aligned (8))); | 228 | }__attribute__((packed, aligned (8))); |
| 229 | 229 | ||
| 230 | union ibmvfc_npiv_login_data { | 230 | union ibmvfc_npiv_login_data { |
| @@ -233,20 +233,20 @@ union ibmvfc_npiv_login_data { | |||
| 233 | }__attribute__((packed, aligned (8))); | 233 | }__attribute__((packed, aligned (8))); |
| 234 | 234 | ||
| 235 | struct ibmvfc_discover_targets_buf { | 235 | struct ibmvfc_discover_targets_buf { |
| 236 | u32 scsi_id[1]; | 236 | __be32 scsi_id[1]; |
| 237 | #define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff | 237 | #define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff |
| 238 | }; | 238 | }; |
| 239 | 239 | ||
| 240 | struct ibmvfc_discover_targets { | 240 | struct ibmvfc_discover_targets { |
| 241 | struct ibmvfc_mad_common common; | 241 | struct ibmvfc_mad_common common; |
| 242 | struct srp_direct_buf buffer; | 242 | struct srp_direct_buf buffer; |
| 243 | u32 flags; | 243 | __be32 flags; |
| 244 | u16 status; | 244 | __be16 status; |
| 245 | u16 error; | 245 | __be16 error; |
| 246 | u32 bufflen; | 246 | __be32 bufflen; |
| 247 | u32 num_avail; | 247 | __be32 num_avail; |
| 248 | u32 num_written; | 248 | __be32 num_written; |
| 249 | u64 reserved[2]; | 249 | __be64 reserved[2]; |
| 250 | }__attribute__((packed, aligned (8))); | 250 | }__attribute__((packed, aligned (8))); |
| 251 | 251 | ||
| 252 | enum ibmvfc_fc_reason { | 252 | enum ibmvfc_fc_reason { |
| @@ -278,32 +278,32 @@ enum ibmvfc_gs_explain { | |||
| 278 | 278 | ||
| 279 | struct ibmvfc_port_login { | 279 | struct ibmvfc_port_login { |
| 280 | struct ibmvfc_mad_common common; | 280 | struct ibmvfc_mad_common common; |
| 281 | u64 scsi_id; | 281 | __be64 scsi_id; |
| 282 | u16 reserved; | 282 | __be16 reserved; |
| 283 | u16 fc_service_class; | 283 | __be16 fc_service_class; |
| 284 | u32 blksz; | 284 | __be32 blksz; |
| 285 | u32 hdr_per_blk; | 285 | __be32 hdr_per_blk; |
| 286 | u16 status; | 286 | __be16 status; |
| 287 | u16 error; /* also fc_reason */ | 287 | __be16 error; /* also fc_reason */ |
| 288 | u16 fc_explain; | 288 | __be16 fc_explain; |
| 289 | u16 fc_type; | 289 | __be16 fc_type; |
| 290 | u32 reserved2; | 290 | __be32 reserved2; |
| 291 | struct ibmvfc_service_parms service_parms; | 291 | struct ibmvfc_service_parms service_parms; |
| 292 | struct ibmvfc_service_parms service_parms_change; | 292 | struct ibmvfc_service_parms service_parms_change; |
| 293 | u64 reserved3[2]; | 293 | __be64 reserved3[2]; |
| 294 | }__attribute__((packed, aligned (8))); | 294 | }__attribute__((packed, aligned (8))); |
| 295 | 295 | ||
| 296 | struct ibmvfc_prli_svc_parms { | 296 | struct ibmvfc_prli_svc_parms { |
| 297 | u8 type; | 297 | u8 type; |
| 298 | #define IBMVFC_SCSI_FCP_TYPE 0x08 | 298 | #define IBMVFC_SCSI_FCP_TYPE 0x08 |
| 299 | u8 type_ext; | 299 | u8 type_ext; |
| 300 | u16 flags; | 300 | __be16 flags; |
| 301 | #define IBMVFC_PRLI_ORIG_PA_VALID 0x8000 | 301 | #define IBMVFC_PRLI_ORIG_PA_VALID 0x8000 |
| 302 | #define IBMVFC_PRLI_RESP_PA_VALID 0x4000 | 302 | #define IBMVFC_PRLI_RESP_PA_VALID 0x4000 |
| 303 | #define IBMVFC_PRLI_EST_IMG_PAIR 0x2000 | 303 | #define IBMVFC_PRLI_EST_IMG_PAIR 0x2000 |
| 304 | u32 orig_pa; | 304 | __be32 orig_pa; |
| 305 | u32 resp_pa; | 305 | __be32 resp_pa; |
| 306 | u32 service_parms; | 306 | __be32 service_parms; |
| 307 | #define IBMVFC_PRLI_TASK_RETRY 0x00000200 | 307 | #define IBMVFC_PRLI_TASK_RETRY 0x00000200 |
| 308 | #define IBMVFC_PRLI_RETRY 0x00000100 | 308 | #define IBMVFC_PRLI_RETRY 0x00000100 |
| 309 | #define IBMVFC_PRLI_DATA_OVERLAY 0x00000040 | 309 | #define IBMVFC_PRLI_DATA_OVERLAY 0x00000040 |
| @@ -315,47 +315,47 @@ struct ibmvfc_prli_svc_parms { | |||
| 315 | 315 | ||
| 316 | struct ibmvfc_process_login { | 316 | struct ibmvfc_process_login { |
| 317 | struct ibmvfc_mad_common common; | 317 | struct ibmvfc_mad_common common; |
| 318 | u64 scsi_id; | 318 | __be64 scsi_id; |
| 319 | struct ibmvfc_prli_svc_parms parms; | 319 | struct ibmvfc_prli_svc_parms parms; |
| 320 | u8 reserved[48]; | 320 | u8 reserved[48]; |
| 321 | u16 status; | 321 | __be16 status; |
| 322 | u16 error; /* also fc_reason */ | 322 | __be16 error; /* also fc_reason */ |
| 323 | u32 reserved2; | 323 | __be32 reserved2; |
| 324 | u64 reserved3[2]; | 324 | __be64 reserved3[2]; |
| 325 | }__attribute__((packed, aligned (8))); | 325 | }__attribute__((packed, aligned (8))); |
| 326 | 326 | ||
| 327 | struct ibmvfc_query_tgt { | 327 | struct ibmvfc_query_tgt { |
| 328 | struct ibmvfc_mad_common common; | 328 | struct ibmvfc_mad_common common; |
| 329 | u64 wwpn; | 329 | __be64 wwpn; |
| 330 | u64 scsi_id; | 330 | __be64 scsi_id; |
| 331 | u16 status; | 331 | __be16 status; |
| 332 | u16 error; | 332 | __be16 error; |
| 333 | u16 fc_explain; | 333 | __be16 fc_explain; |
| 334 | u16 fc_type; | 334 | __be16 fc_type; |
| 335 | u64 reserved[2]; | 335 | __be64 reserved[2]; |
| 336 | }__attribute__((packed, aligned (8))); | 336 | }__attribute__((packed, aligned (8))); |
| 337 | 337 | ||
| 338 | struct ibmvfc_implicit_logout { | 338 | struct ibmvfc_implicit_logout { |
| 339 | struct ibmvfc_mad_common common; | 339 | struct ibmvfc_mad_common common; |
| 340 | u64 old_scsi_id; | 340 | __be64 old_scsi_id; |
| 341 | u64 reserved[2]; | 341 | __be64 reserved[2]; |
| 342 | }__attribute__((packed, aligned (8))); | 342 | }__attribute__((packed, aligned (8))); |
| 343 | 343 | ||
| 344 | struct ibmvfc_tmf { | 344 | struct ibmvfc_tmf { |
| 345 | struct ibmvfc_mad_common common; | 345 | struct ibmvfc_mad_common common; |
| 346 | u64 scsi_id; | 346 | __be64 scsi_id; |
| 347 | struct scsi_lun lun; | 347 | struct scsi_lun lun; |
| 348 | u32 flags; | 348 | __be32 flags; |
| 349 | #define IBMVFC_TMF_ABORT_TASK 0x02 | 349 | #define IBMVFC_TMF_ABORT_TASK 0x02 |
| 350 | #define IBMVFC_TMF_ABORT_TASK_SET 0x04 | 350 | #define IBMVFC_TMF_ABORT_TASK_SET 0x04 |
| 351 | #define IBMVFC_TMF_LUN_RESET 0x10 | 351 | #define IBMVFC_TMF_LUN_RESET 0x10 |
| 352 | #define IBMVFC_TMF_TGT_RESET 0x20 | 352 | #define IBMVFC_TMF_TGT_RESET 0x20 |
| 353 | #define IBMVFC_TMF_LUA_VALID 0x40 | 353 | #define IBMVFC_TMF_LUA_VALID 0x40 |
| 354 | #define IBMVFC_TMF_SUPPRESS_ABTS 0x80 | 354 | #define IBMVFC_TMF_SUPPRESS_ABTS 0x80 |
| 355 | u32 cancel_key; | 355 | __be32 cancel_key; |
| 356 | u32 my_cancel_key; | 356 | __be32 my_cancel_key; |
| 357 | u32 pad; | 357 | __be32 pad; |
| 358 | u64 reserved[2]; | 358 | __be64 reserved[2]; |
| 359 | }__attribute__((packed, aligned (8))); | 359 | }__attribute__((packed, aligned (8))); |
| 360 | 360 | ||
| 361 | enum ibmvfc_fcp_rsp_info_codes { | 361 | enum ibmvfc_fcp_rsp_info_codes { |
| @@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes { | |||
| 366 | }; | 366 | }; |
| 367 | 367 | ||
| 368 | struct ibmvfc_fcp_rsp_info { | 368 | struct ibmvfc_fcp_rsp_info { |
| 369 | u16 reserved; | 369 | __be16 reserved; |
| 370 | u8 rsp_code; | 370 | u8 rsp_code; |
| 371 | u8 reserved2[4]; | 371 | u8 reserved2[4]; |
| 372 | }__attribute__((packed, aligned (2))); | 372 | }__attribute__((packed, aligned (2))); |
| @@ -388,13 +388,13 @@ union ibmvfc_fcp_rsp_data { | |||
| 388 | }__attribute__((packed, aligned (8))); | 388 | }__attribute__((packed, aligned (8))); |
| 389 | 389 | ||
| 390 | struct ibmvfc_fcp_rsp { | 390 | struct ibmvfc_fcp_rsp { |
| 391 | u64 reserved; | 391 | __be64 reserved; |
| 392 | u16 retry_delay_timer; | 392 | __be16 retry_delay_timer; |
| 393 | u8 flags; | 393 | u8 flags; |
| 394 | u8 scsi_status; | 394 | u8 scsi_status; |
| 395 | u32 fcp_resid; | 395 | __be32 fcp_resid; |
| 396 | u32 fcp_sense_len; | 396 | __be32 fcp_sense_len; |
| 397 | u32 fcp_rsp_len; | 397 | __be32 fcp_rsp_len; |
| 398 | union ibmvfc_fcp_rsp_data data; | 398 | union ibmvfc_fcp_rsp_data data; |
| 399 | }__attribute__((packed, aligned (8))); | 399 | }__attribute__((packed, aligned (8))); |
| 400 | 400 | ||
| @@ -429,58 +429,58 @@ struct ibmvfc_fcp_cmd_iu { | |||
| 429 | #define IBMVFC_RDDATA 0x02 | 429 | #define IBMVFC_RDDATA 0x02 |
| 430 | #define IBMVFC_WRDATA 0x01 | 430 | #define IBMVFC_WRDATA 0x01 |
| 431 | u8 cdb[IBMVFC_MAX_CDB_LEN]; | 431 | u8 cdb[IBMVFC_MAX_CDB_LEN]; |
| 432 | u32 xfer_len; | 432 | __be32 xfer_len; |
| 433 | }__attribute__((packed, aligned (4))); | 433 | }__attribute__((packed, aligned (4))); |
| 434 | 434 | ||
| 435 | struct ibmvfc_cmd { | 435 | struct ibmvfc_cmd { |
| 436 | u64 task_tag; | 436 | __be64 task_tag; |
| 437 | u32 frame_type; | 437 | __be32 frame_type; |
| 438 | u32 payload_len; | 438 | __be32 payload_len; |
| 439 | u32 resp_len; | 439 | __be32 resp_len; |
| 440 | u32 adapter_resid; | 440 | __be32 adapter_resid; |
| 441 | u16 status; | 441 | __be16 status; |
| 442 | u16 error; | 442 | __be16 error; |
| 443 | u16 flags; | 443 | __be16 flags; |
| 444 | u16 response_flags; | 444 | __be16 response_flags; |
| 445 | #define IBMVFC_ADAPTER_RESID_VALID 0x01 | 445 | #define IBMVFC_ADAPTER_RESID_VALID 0x01 |
| 446 | u32 cancel_key; | 446 | __be32 cancel_key; |
| 447 | u32 exchange_id; | 447 | __be32 exchange_id; |
| 448 | struct srp_direct_buf ext_func; | 448 | struct srp_direct_buf ext_func; |
| 449 | struct srp_direct_buf ioba; | 449 | struct srp_direct_buf ioba; |
| 450 | struct srp_direct_buf resp; | 450 | struct srp_direct_buf resp; |
| 451 | u64 correlation; | 451 | __be64 correlation; |
| 452 | u64 tgt_scsi_id; | 452 | __be64 tgt_scsi_id; |
| 453 | u64 tag; | 453 | __be64 tag; |
| 454 | u64 reserved3[2]; | 454 | __be64 reserved3[2]; |
| 455 | struct ibmvfc_fcp_cmd_iu iu; | 455 | struct ibmvfc_fcp_cmd_iu iu; |
| 456 | struct ibmvfc_fcp_rsp rsp; | 456 | struct ibmvfc_fcp_rsp rsp; |
| 457 | }__attribute__((packed, aligned (8))); | 457 | }__attribute__((packed, aligned (8))); |
| 458 | 458 | ||
| 459 | struct ibmvfc_passthru_fc_iu { | 459 | struct ibmvfc_passthru_fc_iu { |
| 460 | u32 payload[7]; | 460 | __be32 payload[7]; |
| 461 | #define IBMVFC_ADISC 0x52000000 | 461 | #define IBMVFC_ADISC 0x52000000 |
| 462 | u32 response[7]; | 462 | __be32 response[7]; |
| 463 | }; | 463 | }; |
| 464 | 464 | ||
| 465 | struct ibmvfc_passthru_iu { | 465 | struct ibmvfc_passthru_iu { |
| 466 | u64 task_tag; | 466 | __be64 task_tag; |
| 467 | u32 cmd_len; | 467 | __be32 cmd_len; |
| 468 | u32 rsp_len; | 468 | __be32 rsp_len; |
| 469 | u16 status; | 469 | __be16 status; |
| 470 | u16 error; | 470 | __be16 error; |
| 471 | u32 flags; | 471 | __be32 flags; |
| 472 | #define IBMVFC_FC_ELS 0x01 | 472 | #define IBMVFC_FC_ELS 0x01 |
| 473 | #define IBMVFC_FC_CT_IU 0x02 | 473 | #define IBMVFC_FC_CT_IU 0x02 |
| 474 | u32 cancel_key; | 474 | __be32 cancel_key; |
| 475 | #define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000 | 475 | #define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000 |
| 476 | #define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001 | 476 | #define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001 |
| 477 | u32 reserved; | 477 | __be32 reserved; |
| 478 | struct srp_direct_buf cmd; | 478 | struct srp_direct_buf cmd; |
| 479 | struct srp_direct_buf rsp; | 479 | struct srp_direct_buf rsp; |
| 480 | u64 correlation; | 480 | __be64 correlation; |
| 481 | u64 scsi_id; | 481 | __be64 scsi_id; |
| 482 | u64 tag; | 482 | __be64 tag; |
| 483 | u64 reserved2[2]; | 483 | __be64 reserved2[2]; |
| 484 | }__attribute__((packed, aligned (8))); | 484 | }__attribute__((packed, aligned (8))); |
| 485 | 485 | ||
| 486 | struct ibmvfc_passthru_mad { | 486 | struct ibmvfc_passthru_mad { |
| @@ -552,7 +552,7 @@ struct ibmvfc_crq { | |||
| 552 | volatile u8 valid; | 552 | volatile u8 valid; |
| 553 | volatile u8 format; | 553 | volatile u8 format; |
| 554 | u8 reserved[6]; | 554 | u8 reserved[6]; |
| 555 | volatile u64 ioba; | 555 | volatile __be64 ioba; |
| 556 | }__attribute__((packed, aligned (8))); | 556 | }__attribute__((packed, aligned (8))); |
| 557 | 557 | ||
| 558 | struct ibmvfc_crq_queue { | 558 | struct ibmvfc_crq_queue { |
| @@ -572,12 +572,12 @@ struct ibmvfc_async_crq { | |||
| 572 | volatile u8 valid; | 572 | volatile u8 valid; |
| 573 | u8 link_state; | 573 | u8 link_state; |
| 574 | u8 pad[2]; | 574 | u8 pad[2]; |
| 575 | u32 pad2; | 575 | __be32 pad2; |
| 576 | volatile u64 event; | 576 | volatile __be64 event; |
| 577 | volatile u64 scsi_id; | 577 | volatile __be64 scsi_id; |
| 578 | volatile u64 wwpn; | 578 | volatile __be64 wwpn; |
| 579 | volatile u64 node_name; | 579 | volatile __be64 node_name; |
| 580 | u64 reserved; | 580 | __be64 reserved; |
| 581 | }__attribute__((packed, aligned (8))); | 581 | }__attribute__((packed, aligned (8))); |
| 582 | 582 | ||
| 583 | struct ibmvfc_async_crq_queue { | 583 | struct ibmvfc_async_crq_queue { |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c deleted file mode 100644 index 56f8a861ed72..000000000000 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ /dev/null | |||
| @@ -1,1001 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * IBM eServer i/pSeries Virtual SCSI Target Driver | ||
| 3 | * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. | ||
| 4 | * Santiago Leon (santil@us.ibm.com) IBM Corp. | ||
| 5 | * Linda Xie (lxie@us.ibm.com) IBM Corp. | ||
| 6 | * | ||
| 7 | * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License | ||
| 20 | * along with this program; if not, write to the Free Software | ||
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
| 22 | * USA | ||
| 23 | */ | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <scsi/scsi.h> | ||
| 28 | #include <scsi/scsi_host.h> | ||
| 29 | #include <scsi/scsi_transport_srp.h> | ||
| 30 | #include <scsi/scsi_tgt.h> | ||
| 31 | #include <scsi/libsrp.h> | ||
| 32 | #include <asm/hvcall.h> | ||
| 33 | #include <asm/iommu.h> | ||
| 34 | #include <asm/prom.h> | ||
| 35 | #include <asm/vio.h> | ||
| 36 | |||
| 37 | #include "ibmvscsi.h" | ||
| 38 | |||
| 39 | #define INITIAL_SRP_LIMIT 16 | ||
| 40 | #define DEFAULT_MAX_SECTORS 256 | ||
| 41 | |||
| 42 | #define TGT_NAME "ibmvstgt" | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Hypervisor calls. | ||
| 46 | */ | ||
| 47 | #define h_copy_rdma(l, sa, sb, da, db) \ | ||
| 48 | plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) | ||
| 49 | #define h_send_crq(ua, l, h) \ | ||
| 50 | plpar_hcall_norets(H_SEND_CRQ, ua, l, h) | ||
| 51 | #define h_reg_crq(ua, tok, sz)\ | ||
| 52 | plpar_hcall_norets(H_REG_CRQ, ua, tok, sz); | ||
| 53 | #define h_free_crq(ua) \ | ||
| 54 | plpar_hcall_norets(H_FREE_CRQ, ua); | ||
| 55 | |||
| 56 | /* tmp - will replace with SCSI logging stuff */ | ||
| 57 | #define eprintk(fmt, args...) \ | ||
| 58 | do { \ | ||
| 59 | printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ | ||
| 60 | } while (0) | ||
| 61 | /* #define dprintk eprintk */ | ||
| 62 | #define dprintk(fmt, args...) | ||
| 63 | |||
| 64 | struct vio_port { | ||
| 65 | struct vio_dev *dma_dev; | ||
| 66 | |||
| 67 | struct crq_queue crq_queue; | ||
| 68 | struct work_struct crq_work; | ||
| 69 | |||
| 70 | unsigned long liobn; | ||
| 71 | unsigned long riobn; | ||
| 72 | struct srp_target *target; | ||
| 73 | |||
| 74 | struct srp_rport *rport; | ||
| 75 | }; | ||
| 76 | |||
| 77 | static struct workqueue_struct *vtgtd; | ||
| 78 | static struct scsi_transport_template *ibmvstgt_transport_template; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * These are fixed for the system and come from the Open Firmware device tree. | ||
| 82 | * We just store them here to save getting them every time. | ||
| 83 | */ | ||
| 84 | static char system_id[64] = ""; | ||
| 85 | static char partition_name[97] = "UNKNOWN"; | ||
| 86 | static unsigned int partition_number = -1; | ||
| 87 | |||
| 88 | static struct vio_port *target_to_port(struct srp_target *target) | ||
| 89 | { | ||
| 90 | return (struct vio_port *) target->ldata; | ||
| 91 | } | ||
| 92 | |||
| 93 | static inline union viosrp_iu *vio_iu(struct iu_entry *iue) | ||
| 94 | { | ||
| 95 | return (union viosrp_iu *) (iue->sbuf->buf); | ||
| 96 | } | ||
| 97 | |||
| 98 | static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format) | ||
| 99 | { | ||
| 100 | struct srp_target *target = iue->target; | ||
| 101 | struct vio_port *vport = target_to_port(target); | ||
| 102 | long rc, rc1; | ||
| 103 | union { | ||
| 104 | struct viosrp_crq cooked; | ||
| 105 | uint64_t raw[2]; | ||
| 106 | } crq; | ||
| 107 | |||
| 108 | /* First copy the SRP */ | ||
| 109 | rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma, | ||
| 110 | vport->riobn, iue->remote_token); | ||
| 111 | |||
| 112 | if (rc) | ||
| 113 | eprintk("Error %ld transferring data\n", rc); | ||
| 114 | |||
| 115 | crq.cooked.valid = 0x80; | ||
| 116 | crq.cooked.format = format; | ||
| 117 | crq.cooked.reserved = 0x00; | ||
| 118 | crq.cooked.timeout = 0x00; | ||
| 119 | crq.cooked.IU_length = length; | ||
| 120 | crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag; | ||
| 121 | |||
| 122 | if (rc == 0) | ||
| 123 | crq.cooked.status = 0x99; /* Just needs to be non-zero */ | ||
| 124 | else | ||
| 125 | crq.cooked.status = 0x00; | ||
| 126 | |||
| 127 | rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]); | ||
| 128 | |||
| 129 | if (rc1) { | ||
| 130 | eprintk("%ld sending response\n", rc1); | ||
| 131 | return rc1; | ||
| 132 | } | ||
| 133 | |||
| 134 | return rc; | ||
| 135 | } | ||
| 136 | |||
| 137 | #define SRP_RSP_SENSE_DATA_LEN 18 | ||
| 138 | |||
| 139 | static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc, | ||
| 140 | unsigned char status, unsigned char asc) | ||
| 141 | { | ||
| 142 | union viosrp_iu *iu = vio_iu(iue); | ||
| 143 | uint64_t tag = iu->srp.rsp.tag; | ||
| 144 | |||
| 145 | /* If the linked bit is on and status is good */ | ||
| 146 | if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE)) | ||
| 147 | status = 0x10; | ||
| 148 | |||
| 149 | memset(iu, 0, sizeof(struct srp_rsp)); | ||
| 150 | iu->srp.rsp.opcode = SRP_RSP; | ||
| 151 | iu->srp.rsp.req_lim_delta = 1; | ||
| 152 | iu->srp.rsp.tag = tag; | ||
| 153 | |||
| 154 | if (test_bit(V_DIOVER, &iue->flags)) | ||
| 155 | iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER; | ||
| 156 | |||
| 157 | iu->srp.rsp.data_in_res_cnt = 0; | ||
| 158 | iu->srp.rsp.data_out_res_cnt = 0; | ||
| 159 | |||
| 160 | iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; | ||
| 161 | |||
| 162 | iu->srp.rsp.resp_data_len = 0; | ||
| 163 | iu->srp.rsp.status = status; | ||
| 164 | if (status) { | ||
| 165 | uint8_t *sense = iu->srp.rsp.data; | ||
| 166 | |||
| 167 | if (sc) { | ||
| 168 | iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; | ||
| 169 | iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE; | ||
| 170 | memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE); | ||
| 171 | } else { | ||
| 172 | iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION; | ||
| 173 | iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; | ||
| 174 | iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN; | ||
| 175 | |||
| 176 | /* Valid bit and 'current errors' */ | ||
| 177 | sense[0] = (0x1 << 7 | 0x70); | ||
| 178 | /* Sense key */ | ||
| 179 | sense[2] = status; | ||
| 180 | /* Additional sense length */ | ||
| 181 | sense[7] = 0xa; /* 10 bytes */ | ||
| 182 | /* Additional sense code */ | ||
| 183 | sense[12] = asc; | ||
| 184 | } | ||
| 185 | } | ||
| 186 | |||
| 187 | send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN, | ||
| 188 | VIOSRP_SRP_FORMAT); | ||
| 189 | |||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | |||
| 193 | static void handle_cmd_queue(struct srp_target *target) | ||
| 194 | { | ||
| 195 | struct Scsi_Host *shost = target->shost; | ||
| 196 | struct srp_rport *rport = target_to_port(target)->rport; | ||
| 197 | struct iu_entry *iue; | ||
| 198 | struct srp_cmd *cmd; | ||
| 199 | unsigned long flags; | ||
| 200 | int err; | ||
| 201 | |||
| 202 | retry: | ||
| 203 | spin_lock_irqsave(&target->lock, flags); | ||
| 204 | |||
| 205 | list_for_each_entry(iue, &target->cmd_queue, ilist) { | ||
| 206 | if (!test_and_set_bit(V_FLYING, &iue->flags)) { | ||
| 207 | spin_unlock_irqrestore(&target->lock, flags); | ||
| 208 | cmd = iue->sbuf->buf; | ||
| 209 | err = srp_cmd_queue(shost, cmd, iue, | ||
| 210 | (unsigned long)rport, 0); | ||
| 211 | if (err) { | ||
| 212 | eprintk("cannot queue cmd %p %d\n", cmd, err); | ||
| 213 | srp_iu_put(iue); | ||
| 214 | } | ||
| 215 | goto retry; | ||
| 216 | } | ||
| 217 | } | ||
| 218 | |||
| 219 | spin_unlock_irqrestore(&target->lock, flags); | ||
| 220 | } | ||
| 221 | |||
| 222 | static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg, | ||
| 223 | struct srp_direct_buf *md, int nmd, | ||
| 224 | enum dma_data_direction dir, unsigned int rest) | ||
| 225 | { | ||
| 226 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; | ||
| 227 | struct srp_target *target = iue->target; | ||
| 228 | struct vio_port *vport = target_to_port(target); | ||
| 229 | dma_addr_t token; | ||
| 230 | long err; | ||
| 231 | unsigned int done = 0; | ||
| 232 | int i, sidx, soff; | ||
| 233 | |||
| 234 | sidx = soff = 0; | ||
| 235 | token = sg_dma_address(sg + sidx); | ||
| 236 | |||
| 237 | for (i = 0; i < nmd && rest; i++) { | ||
| 238 | unsigned int mdone, mlen; | ||
| 239 | |||
| 240 | mlen = min(rest, md[i].len); | ||
| 241 | for (mdone = 0; mlen;) { | ||
| 242 | int slen = min(sg_dma_len(sg + sidx) - soff, mlen); | ||
| 243 | |||
| 244 | if (dir == DMA_TO_DEVICE) | ||
| 245 | err = h_copy_rdma(slen, | ||
| 246 | vport->riobn, | ||
| 247 | md[i].va + mdone, | ||
| 248 | vport->liobn, | ||
| 249 | token + soff); | ||
| 250 | else | ||
| 251 | err = h_copy_rdma(slen, | ||
| 252 | vport->liobn, | ||
| 253 | token + soff, | ||
| 254 | vport->riobn, | ||
| 255 | md[i].va + mdone); | ||
| 256 | |||
| 257 | if (err != H_SUCCESS) { | ||
| 258 | eprintk("rdma error %d %d %ld\n", dir, slen, err); | ||
| 259 | return -EIO; | ||
| 260 | } | ||
| 261 | |||
| 262 | mlen -= slen; | ||
| 263 | mdone += slen; | ||
| 264 | soff += slen; | ||
| 265 | done += slen; | ||
| 266 | |||
| 267 | if (soff == sg_dma_len(sg + sidx)) { | ||
| 268 | sidx++; | ||
| 269 | soff = 0; | ||
| 270 | token = sg_dma_address(sg + sidx); | ||
| 271 | |||
| 272 | if (sidx > nsg) { | ||
| 273 | eprintk("out of sg %p %d %d\n", | ||
| 274 | iue, sidx, nsg); | ||
| 275 | return -EIO; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | }; | ||
| 279 | |||
| 280 | rest -= mlen; | ||
| 281 | } | ||
| 282 | return 0; | ||
| 283 | } | ||
| 284 | |||
| 285 | static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, | ||
| 286 | void (*done)(struct scsi_cmnd *)) | ||
| 287 | { | ||
| 288 | unsigned long flags; | ||
| 289 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; | ||
| 290 | struct srp_target *target = iue->target; | ||
| 291 | int err = 0; | ||
| 292 | |||
| 293 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], | ||
| 294 | scsi_sg_count(sc)); | ||
| 295 | |||
| 296 | if (scsi_sg_count(sc)) | ||
| 297 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); | ||
| 298 | |||
| 299 | spin_lock_irqsave(&target->lock, flags); | ||
| 300 | list_del(&iue->ilist); | ||
| 301 | spin_unlock_irqrestore(&target->lock, flags); | ||
| 302 | |||
| 303 | if (err|| sc->result != SAM_STAT_GOOD) { | ||
| 304 | eprintk("operation failed %p %d %x\n", | ||
| 305 | iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]); | ||
| 306 | send_rsp(iue, sc, HARDWARE_ERROR, 0x00); | ||
| 307 | } else | ||
| 308 | send_rsp(iue, sc, NO_SENSE, 0x00); | ||
| 309 | |||
| 310 | done(sc); | ||
| 311 | srp_iu_put(iue); | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | int send_adapter_info(struct iu_entry *iue, | ||
| 316 | dma_addr_t remote_buffer, uint16_t length) | ||
| 317 | { | ||
| 318 | struct srp_target *target = iue->target; | ||
| 319 | struct vio_port *vport = target_to_port(target); | ||
| 320 | struct Scsi_Host *shost = target->shost; | ||
| 321 | dma_addr_t data_token; | ||
| 322 | struct mad_adapter_info_data *info; | ||
| 323 | int err; | ||
| 324 | |||
| 325 | info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token, | ||
| 326 | GFP_KERNEL); | ||
| 327 | if (!info) { | ||
| 328 | eprintk("bad dma_alloc_coherent %p\n", target); | ||
| 329 | return 1; | ||
| 330 | } | ||
| 331 | |||
| 332 | /* Get remote info */ | ||
| 333 | err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer, | ||
| 334 | vport->liobn, data_token); | ||
| 335 | if (err == H_SUCCESS) { | ||
| 336 | dprintk("Client connect: %s (%d)\n", | ||
| 337 | info->partition_name, info->partition_number); | ||
| 338 | } | ||
| 339 | |||
| 340 | memset(info, 0, sizeof(*info)); | ||
| 341 | |||
| 342 | strcpy(info->srp_version, "16.a"); | ||
| 343 | strncpy(info->partition_name, partition_name, | ||
| 344 | sizeof(info->partition_name)); | ||
| 345 | info->partition_number = partition_number; | ||
| 346 | info->mad_version = 1; | ||
| 347 | info->os_type = 2; | ||
| 348 | info->port_max_txu[0] = shost->hostt->max_sectors << 9; | ||
| 349 | |||
| 350 | /* Send our info to remote */ | ||
| 351 | err = h_copy_rdma(sizeof(*info), vport->liobn, data_token, | ||
| 352 | vport->riobn, remote_buffer); | ||
| 353 | |||
| 354 | dma_free_coherent(target->dev, sizeof(*info), info, data_token); | ||
| 355 | |||
| 356 | if (err != H_SUCCESS) { | ||
| 357 | eprintk("Error sending adapter info %d\n", err); | ||
| 358 | return 1; | ||
| 359 | } | ||
| 360 | |||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | |||
| 364 | static void process_login(struct iu_entry *iue) | ||
| 365 | { | ||
| 366 | union viosrp_iu *iu = vio_iu(iue); | ||
| 367 | struct srp_login_rsp *rsp = &iu->srp.login_rsp; | ||
| 368 | uint64_t tag = iu->srp.rsp.tag; | ||
| 369 | struct Scsi_Host *shost = iue->target->shost; | ||
| 370 | struct srp_target *target = host_to_srp_target(shost); | ||
| 371 | struct vio_port *vport = target_to_port(target); | ||
| 372 | struct srp_rport_identifiers ids; | ||
| 373 | |||
| 374 | memset(&ids, 0, sizeof(ids)); | ||
| 375 | sprintf(ids.port_id, "%x", vport->dma_dev->unit_address); | ||
| 376 | ids.roles = SRP_RPORT_ROLE_INITIATOR; | ||
| 377 | if (!vport->rport) | ||
| 378 | vport->rport = srp_rport_add(shost, &ids); | ||
| 379 | |||
| 380 | /* TODO handle case that requested size is wrong and | ||
| 381 | * buffer format is wrong | ||
| 382 | */ | ||
| 383 | memset(iu, 0, sizeof(struct srp_login_rsp)); | ||
| 384 | rsp->opcode = SRP_LOGIN_RSP; | ||
| 385 | rsp->req_lim_delta = INITIAL_SRP_LIMIT; | ||
| 386 | rsp->tag = tag; | ||
| 387 | rsp->max_it_iu_len = sizeof(union srp_iu); | ||
| 388 | rsp->max_ti_iu_len = sizeof(union srp_iu); | ||
| 389 | /* direct and indirect */ | ||
| 390 | rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; | ||
| 391 | |||
| 392 | send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT); | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline void queue_cmd(struct iu_entry *iue) | ||
| 396 | { | ||
| 397 | struct srp_target *target = iue->target; | ||
| 398 | unsigned long flags; | ||
| 399 | |||
| 400 | spin_lock_irqsave(&target->lock, flags); | ||
| 401 | list_add_tail(&iue->ilist, &target->cmd_queue); | ||
| 402 | spin_unlock_irqrestore(&target->lock, flags); | ||
| 403 | } | ||
| 404 | |||
| 405 | static int process_tsk_mgmt(struct iu_entry *iue) | ||
| 406 | { | ||
| 407 | union viosrp_iu *iu = vio_iu(iue); | ||
| 408 | int fn; | ||
| 409 | |||
| 410 | dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func); | ||
| 411 | |||
| 412 | switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { | ||
| 413 | case SRP_TSK_ABORT_TASK: | ||
| 414 | fn = ABORT_TASK; | ||
| 415 | break; | ||
| 416 | case SRP_TSK_ABORT_TASK_SET: | ||
| 417 | fn = ABORT_TASK_SET; | ||
| 418 | break; | ||
| 419 | case SRP_TSK_CLEAR_TASK_SET: | ||
| 420 | fn = CLEAR_TASK_SET; | ||
| 421 | break; | ||
| 422 | case SRP_TSK_LUN_RESET: | ||
| 423 | fn = LOGICAL_UNIT_RESET; | ||
| 424 | break; | ||
| 425 | case SRP_TSK_CLEAR_ACA: | ||
| 426 | fn = CLEAR_ACA; | ||
| 427 | break; | ||
| 428 | default: | ||
| 429 | fn = 0; | ||
| 430 | } | ||
| 431 | if (fn) | ||
| 432 | scsi_tgt_tsk_mgmt_request(iue->target->shost, | ||
| 433 | (unsigned long)iue->target->shost, | ||
| 434 | fn, | ||
| 435 | iu->srp.tsk_mgmt.task_tag, | ||
| 436 | (struct scsi_lun *) &iu->srp.tsk_mgmt.lun, | ||
| 437 | iue); | ||
| 438 | else | ||
| 439 | send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20); | ||
| 440 | |||
| 441 | return !fn; | ||
| 442 | } | ||
| 443 | |||
| 444 | static int process_mad_iu(struct iu_entry *iue) | ||
| 445 | { | ||
| 446 | union viosrp_iu *iu = vio_iu(iue); | ||
| 447 | struct viosrp_adapter_info *info; | ||
| 448 | struct viosrp_host_config *conf; | ||
| 449 | |||
| 450 | switch (iu->mad.empty_iu.common.type) { | ||
| 451 | case VIOSRP_EMPTY_IU_TYPE: | ||
| 452 | eprintk("%s\n", "Unsupported EMPTY MAD IU"); | ||
| 453 | break; | ||
| 454 | case VIOSRP_ERROR_LOG_TYPE: | ||
| 455 | eprintk("%s\n", "Unsupported ERROR LOG MAD IU"); | ||
| 456 | iu->mad.error_log.common.status = 1; | ||
| 457 | send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT); | ||
| 458 | break; | ||
| 459 | case VIOSRP_ADAPTER_INFO_TYPE: | ||
| 460 | info = &iu->mad.adapter_info; | ||
| 461 | info->common.status = send_adapter_info(iue, info->buffer, | ||
| 462 | info->common.length); | ||
| 463 | send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT); | ||
| 464 | break; | ||
| 465 | case VIOSRP_HOST_CONFIG_TYPE: | ||
| 466 | conf = &iu->mad.host_config; | ||
| 467 | conf->common.status = 1; | ||
| 468 | send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT); | ||
| 469 | break; | ||
| 470 | default: | ||
| 471 | eprintk("Unknown type %u\n", iu->srp.rsp.opcode); | ||
| 472 | } | ||
| 473 | |||
| 474 | return 1; | ||
| 475 | } | ||
| 476 | |||
| 477 | static int process_srp_iu(struct iu_entry *iue) | ||
| 478 | { | ||
| 479 | union viosrp_iu *iu = vio_iu(iue); | ||
| 480 | int done = 1; | ||
| 481 | u8 opcode = iu->srp.rsp.opcode; | ||
| 482 | |||
| 483 | switch (opcode) { | ||
| 484 | case SRP_LOGIN_REQ: | ||
| 485 | process_login(iue); | ||
| 486 | break; | ||
| 487 | case SRP_TSK_MGMT: | ||
| 488 | done = process_tsk_mgmt(iue); | ||
| 489 | break; | ||
| 490 | case SRP_CMD: | ||
| 491 | queue_cmd(iue); | ||
| 492 | done = 0; | ||
| 493 | break; | ||
| 494 | case SRP_LOGIN_RSP: | ||
| 495 | case SRP_I_LOGOUT: | ||
| 496 | case SRP_T_LOGOUT: | ||
| 497 | case SRP_RSP: | ||
| 498 | case SRP_CRED_REQ: | ||
| 499 | case SRP_CRED_RSP: | ||
| 500 | case SRP_AER_REQ: | ||
| 501 | case SRP_AER_RSP: | ||
| 502 | eprintk("Unsupported type %u\n", opcode); | ||
| 503 | break; | ||
| 504 | default: | ||
| 505 | eprintk("Unknown type %u\n", opcode); | ||
| 506 | } | ||
| 507 | |||
| 508 | return done; | ||
| 509 | } | ||
| 510 | |||
| 511 | static void process_iu(struct viosrp_crq *crq, struct srp_target *target) | ||
| 512 | { | ||
| 513 | struct vio_port *vport = target_to_port(target); | ||
| 514 | struct iu_entry *iue; | ||
| 515 | long err; | ||
| 516 | int done = 1; | ||
| 517 | |||
| 518 | iue = srp_iu_get(target); | ||
| 519 | if (!iue) { | ||
| 520 | eprintk("Error getting IU from pool, %p\n", target); | ||
| 521 | return; | ||
| 522 | } | ||
| 523 | |||
| 524 | iue->remote_token = crq->IU_data_ptr; | ||
| 525 | |||
| 526 | err = h_copy_rdma(crq->IU_length, vport->riobn, | ||
| 527 | iue->remote_token, vport->liobn, iue->sbuf->dma); | ||
| 528 | |||
| 529 | if (err != H_SUCCESS) { | ||
| 530 | eprintk("%ld transferring data error %p\n", err, iue); | ||
| 531 | goto out; | ||
| 532 | } | ||
| 533 | |||
| 534 | if (crq->format == VIOSRP_MAD_FORMAT) | ||
| 535 | done = process_mad_iu(iue); | ||
| 536 | else | ||
| 537 | done = process_srp_iu(iue); | ||
| 538 | out: | ||
| 539 | if (done) | ||
| 540 | srp_iu_put(iue); | ||
| 541 | } | ||
| 542 | |||
| 543 | static irqreturn_t ibmvstgt_interrupt(int dummy, void *data) | ||
| 544 | { | ||
| 545 | struct srp_target *target = data; | ||
| 546 | struct vio_port *vport = target_to_port(target); | ||
| 547 | |||
| 548 | vio_disable_interrupts(vport->dma_dev); | ||
| 549 | queue_work(vtgtd, &vport->crq_work); | ||
| 550 | |||
| 551 | return IRQ_HANDLED; | ||
| 552 | } | ||
| 553 | |||
| 554 | static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) | ||
| 555 | { | ||
| 556 | int err; | ||
| 557 | struct vio_port *vport = target_to_port(target); | ||
| 558 | |||
| 559 | queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL); | ||
| 560 | if (!queue->msgs) | ||
| 561 | goto malloc_failed; | ||
| 562 | queue->size = PAGE_SIZE / sizeof(*queue->msgs); | ||
| 563 | |||
| 564 | queue->msg_token = dma_map_single(target->dev, queue->msgs, | ||
| 565 | queue->size * sizeof(*queue->msgs), | ||
| 566 | DMA_BIDIRECTIONAL); | ||
| 567 | |||
| 568 | if (dma_mapping_error(target->dev, queue->msg_token)) | ||
| 569 | goto map_failed; | ||
| 570 | |||
| 571 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, | ||
| 572 | PAGE_SIZE); | ||
| 573 | |||
| 574 | /* If the adapter was left active for some reason (like kexec) | ||
| 575 | * try freeing and re-registering | ||
| 576 | */ | ||
| 577 | if (err == H_RESOURCE) { | ||
| 578 | do { | ||
| 579 | err = h_free_crq(vport->dma_dev->unit_address); | ||
| 580 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); | ||
| 581 | |||
| 582 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, | ||
| 583 | PAGE_SIZE); | ||
| 584 | } | ||
| 585 | |||
| 586 | if (err != H_SUCCESS && err != 2) { | ||
| 587 | eprintk("Error 0x%x opening virtual adapter\n", err); | ||
| 588 | goto reg_crq_failed; | ||
| 589 | } | ||
| 590 | |||
| 591 | err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt, | ||
| 592 | 0, "ibmvstgt", target); | ||
| 593 | if (err) | ||
| 594 | goto req_irq_failed; | ||
| 595 | |||
| 596 | vio_enable_interrupts(vport->dma_dev); | ||
| 597 | |||
| 598 | h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0); | ||
| 599 | |||
| 600 | queue->cur = 0; | ||
| 601 | spin_lock_init(&queue->lock); | ||
| 602 | |||
| 603 | return 0; | ||
| 604 | |||
| 605 | req_irq_failed: | ||
| 606 | do { | ||
| 607 | err = h_free_crq(vport->dma_dev->unit_address); | ||
| 608 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); | ||
| 609 | |||
| 610 | reg_crq_failed: | ||
| 611 | dma_unmap_single(target->dev, queue->msg_token, | ||
| 612 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
| 613 | map_failed: | ||
| 614 | free_page((unsigned long) queue->msgs); | ||
| 615 | |||
| 616 | malloc_failed: | ||
| 617 | return -ENOMEM; | ||
| 618 | } | ||
| 619 | |||
| 620 | static void crq_queue_destroy(struct srp_target *target) | ||
| 621 | { | ||
| 622 | struct vio_port *vport = target_to_port(target); | ||
| 623 | struct crq_queue *queue = &vport->crq_queue; | ||
| 624 | int err; | ||
| 625 | |||
| 626 | free_irq(vport->dma_dev->irq, target); | ||
| 627 | do { | ||
| 628 | err = h_free_crq(vport->dma_dev->unit_address); | ||
| 629 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); | ||
| 630 | |||
| 631 | dma_unmap_single(target->dev, queue->msg_token, | ||
| 632 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); | ||
| 633 | |||
| 634 | free_page((unsigned long) queue->msgs); | ||
| 635 | } | ||
| 636 | |||
| 637 | static void process_crq(struct viosrp_crq *crq, struct srp_target *target) | ||
| 638 | { | ||
| 639 | struct vio_port *vport = target_to_port(target); | ||
| 640 | dprintk("%x %x\n", crq->valid, crq->format); | ||
| 641 | |||
| 642 | switch (crq->valid) { | ||
| 643 | case 0xC0: | ||
| 644 | /* initialization */ | ||
| 645 | switch (crq->format) { | ||
| 646 | case 0x01: | ||
| 647 | h_send_crq(vport->dma_dev->unit_address, | ||
| 648 | 0xC002000000000000, 0); | ||
| 649 | break; | ||
| 650 | case 0x02: | ||
| 651 | break; | ||
| 652 | default: | ||
| 653 | eprintk("Unknown format %u\n", crq->format); | ||
| 654 | } | ||
| 655 | break; | ||
| 656 | case 0xFF: | ||
| 657 | /* transport event */ | ||
| 658 | break; | ||
| 659 | case 0x80: | ||
| 660 | /* real payload */ | ||
| 661 | switch (crq->format) { | ||
| 662 | case VIOSRP_SRP_FORMAT: | ||
| 663 | case VIOSRP_MAD_FORMAT: | ||
| 664 | process_iu(crq, target); | ||
| 665 | break; | ||
| 666 | case VIOSRP_OS400_FORMAT: | ||
| 667 | case VIOSRP_AIX_FORMAT: | ||
| 668 | case VIOSRP_LINUX_FORMAT: | ||
| 669 | case VIOSRP_INLINE_FORMAT: | ||
| 670 | eprintk("Unsupported format %u\n", crq->format); | ||
| 671 | break; | ||
| 672 | default: | ||
| 673 | eprintk("Unknown format %u\n", crq->format); | ||
| 674 | } | ||
| 675 | break; | ||
| 676 | default: | ||
| 677 | eprintk("unknown message type 0x%02x!?\n", crq->valid); | ||
| 678 | } | ||
| 679 | } | ||
| 680 | |||
| 681 | static inline struct viosrp_crq *next_crq(struct crq_queue *queue) | ||
| 682 | { | ||
| 683 | struct viosrp_crq *crq; | ||
| 684 | unsigned long flags; | ||
| 685 | |||
| 686 | spin_lock_irqsave(&queue->lock, flags); | ||
| 687 | crq = &queue->msgs[queue->cur]; | ||
| 688 | if (crq->valid & 0x80) { | ||
| 689 | if (++queue->cur == queue->size) | ||
| 690 | queue->cur = 0; | ||
| 691 | } else | ||
| 692 | crq = NULL; | ||
| 693 | spin_unlock_irqrestore(&queue->lock, flags); | ||
| 694 | |||
| 695 | return crq; | ||
| 696 | } | ||
| 697 | |||
| 698 | static void handle_crq(struct work_struct *work) | ||
| 699 | { | ||
| 700 | struct vio_port *vport = container_of(work, struct vio_port, crq_work); | ||
| 701 | struct srp_target *target = vport->target; | ||
| 702 | struct viosrp_crq *crq; | ||
| 703 | int done = 0; | ||
| 704 | |||
| 705 | while (!done) { | ||
| 706 | while ((crq = next_crq(&vport->crq_queue)) != NULL) { | ||
| 707 | process_crq(crq, target); | ||
| 708 | crq->valid = 0x00; | ||
| 709 | } | ||
| 710 | |||
| 711 | vio_enable_interrupts(vport->dma_dev); | ||
| 712 | |||
| 713 | crq = next_crq(&vport->crq_queue); | ||
| 714 | if (crq) { | ||
| 715 | vio_disable_interrupts(vport->dma_dev); | ||
| 716 | process_crq(crq, target); | ||
| 717 | crq->valid = 0x00; | ||
| 718 | } else | ||
| 719 | done = 1; | ||
| 720 | } | ||
| 721 | |||
| 722 | handle_cmd_queue(target); | ||
| 723 | } | ||
| 724 | |||
| 725 | |||
| 726 | static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc) | ||
| 727 | { | ||
| 728 | unsigned long flags; | ||
| 729 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; | ||
| 730 | struct srp_target *target = iue->target; | ||
| 731 | |||
| 732 | dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); | ||
| 733 | |||
| 734 | spin_lock_irqsave(&target->lock, flags); | ||
| 735 | list_del(&iue->ilist); | ||
| 736 | spin_unlock_irqrestore(&target->lock, flags); | ||
| 737 | |||
| 738 | srp_iu_put(iue); | ||
| 739 | |||
| 740 | return 0; | ||
| 741 | } | ||
| 742 | |||
| 743 | static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host *shost, | ||
| 744 | u64 itn_id, u64 mid, int result) | ||
| 745 | { | ||
| 746 | struct iu_entry *iue = (struct iu_entry *) ((void *) mid); | ||
| 747 | union viosrp_iu *iu = vio_iu(iue); | ||
| 748 | unsigned char status, asc; | ||
| 749 | |||
| 750 | eprintk("%p %d\n", iue, result); | ||
| 751 | status = NO_SENSE; | ||
| 752 | asc = 0; | ||
| 753 | |||
| 754 | switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { | ||
| 755 | case SRP_TSK_ABORT_TASK: | ||
| 756 | asc = 0x14; | ||
| 757 | if (result) | ||
| 758 | status = ABORTED_COMMAND; | ||
| 759 | break; | ||
| 760 | default: | ||
| 761 | break; | ||
| 762 | } | ||
| 763 | |||
| 764 | send_rsp(iue, NULL, status, asc); | ||
| 765 | srp_iu_put(iue); | ||
| 766 | |||
| 767 | return 0; | ||
| 768 | } | ||
| 769 | |||
| 770 | static int ibmvstgt_it_nexus_response(struct Scsi_Host *shost, u64 itn_id, | ||
| 771 | int result) | ||
| 772 | { | ||
| 773 | struct srp_target *target = host_to_srp_target(shost); | ||
| 774 | struct vio_port *vport = target_to_port(target); | ||
| 775 | |||
| 776 | if (result) { | ||
| 777 | eprintk("%p %d\n", shost, result); | ||
| 778 | srp_rport_del(vport->rport); | ||
| 779 | vport->rport = NULL; | ||
| 780 | } | ||
| 781 | return 0; | ||
| 782 | } | ||
| 783 | |||
| 784 | static ssize_t system_id_show(struct device *dev, | ||
| 785 | struct device_attribute *attr, char *buf) | ||
| 786 | { | ||
| 787 | return snprintf(buf, PAGE_SIZE, "%s\n", system_id); | ||
| 788 | } | ||
| 789 | |||
| 790 | static ssize_t partition_number_show(struct device *dev, | ||
| 791 | struct device_attribute *attr, char *buf) | ||
| 792 | { | ||
| 793 | return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); | ||
| 794 | } | ||
| 795 | |||
| 796 | static ssize_t unit_address_show(struct device *dev, | ||
| 797 | struct device_attribute *attr, char *buf) | ||
| 798 | { | ||
| 799 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 800 | struct srp_target *target = host_to_srp_target(shost); | ||
| 801 | struct vio_port *vport = target_to_port(target); | ||
| 802 | return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address); | ||
| 803 | } | ||
| 804 | |||
| 805 | static DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL); | ||
| 806 | static DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL); | ||
| 807 | static DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL); | ||
| 808 | |||
| 809 | static struct device_attribute *ibmvstgt_attrs[] = { | ||
| 810 | &dev_attr_system_id, | ||
| 811 | &dev_attr_partition_number, | ||
| 812 | &dev_attr_unit_address, | ||
| 813 | NULL, | ||
| 814 | }; | ||
| 815 | |||
| 816 | static struct scsi_host_template ibmvstgt_sht = { | ||
| 817 | .name = TGT_NAME, | ||
| 818 | .module = THIS_MODULE, | ||
| 819 | .can_queue = INITIAL_SRP_LIMIT, | ||
| 820 | .sg_tablesize = SG_ALL, | ||
| 821 | .use_clustering = DISABLE_CLUSTERING, | ||
| 822 | .max_sectors = DEFAULT_MAX_SECTORS, | ||
| 823 | .transfer_response = ibmvstgt_cmd_done, | ||
| 824 | .eh_abort_handler = ibmvstgt_eh_abort_handler, | ||
| 825 | .shost_attrs = ibmvstgt_attrs, | ||
| 826 | .proc_name = TGT_NAME, | ||
| 827 | .supported_mode = MODE_TARGET, | ||
| 828 | }; | ||
| 829 | |||
| 830 | static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | ||
| 831 | { | ||
| 832 | struct Scsi_Host *shost; | ||
| 833 | struct srp_target *target; | ||
| 834 | struct vio_port *vport; | ||
| 835 | unsigned int *dma, dma_size; | ||
| 836 | int err = -ENOMEM; | ||
| 837 | |||
| 838 | vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL); | ||
| 839 | if (!vport) | ||
| 840 | return err; | ||
| 841 | shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target)); | ||
| 842 | if (!shost) | ||
| 843 | goto free_vport; | ||
| 844 | shost->transportt = ibmvstgt_transport_template; | ||
| 845 | |||
| 846 | target = host_to_srp_target(shost); | ||
| 847 | target->shost = shost; | ||
| 848 | vport->dma_dev = dev; | ||
| 849 | target->ldata = vport; | ||
| 850 | vport->target = target; | ||
| 851 | err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT, | ||
| 852 | SRP_MAX_IU_LEN); | ||
| 853 | if (err) | ||
| 854 | goto put_host; | ||
| 855 | |||
| 856 | dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window", | ||
| 857 | &dma_size); | ||
| 858 | if (!dma || dma_size != 40) { | ||
| 859 | eprintk("Couldn't get window property %d\n", dma_size); | ||
| 860 | err = -EIO; | ||
| 861 | goto free_srp_target; | ||
| 862 | } | ||
| 863 | vport->liobn = dma[0]; | ||
| 864 | vport->riobn = dma[5]; | ||
| 865 | |||
| 866 | INIT_WORK(&vport->crq_work, handle_crq); | ||
| 867 | |||
| 868 | err = scsi_add_host(shost, target->dev); | ||
| 869 | if (err) | ||
| 870 | goto free_srp_target; | ||
| 871 | |||
| 872 | err = scsi_tgt_alloc_queue(shost); | ||
| 873 | if (err) | ||
| 874 | goto remove_host; | ||
| 875 | |||
| 876 | err = crq_queue_create(&vport->crq_queue, target); | ||
| 877 | if (err) | ||
| 878 | goto free_queue; | ||
| 879 | |||
| 880 | return 0; | ||
| 881 | free_queue: | ||
| 882 | scsi_tgt_free_queue(shost); | ||
| 883 | remove_host: | ||
| 884 | scsi_remove_host(shost); | ||
| 885 | free_srp_target: | ||
| 886 | srp_target_free(target); | ||
| 887 | put_host: | ||
| 888 | scsi_host_put(shost); | ||
| 889 | free_vport: | ||
| 890 | kfree(vport); | ||
| 891 | return err; | ||
| 892 | } | ||
| 893 | |||
| 894 | static int ibmvstgt_remove(struct vio_dev *dev) | ||
| 895 | { | ||
| 896 | struct srp_target *target = dev_get_drvdata(&dev->dev); | ||
| 897 | struct Scsi_Host *shost = target->shost; | ||
| 898 | struct vio_port *vport = target->ldata; | ||
| 899 | |||
| 900 | crq_queue_destroy(target); | ||
| 901 | srp_remove_host(shost); | ||
| 902 | scsi_remove_host(shost); | ||
| 903 | scsi_tgt_free_queue(shost); | ||
| 904 | srp_target_free(target); | ||
| 905 | kfree(vport); | ||
| 906 | scsi_host_put(shost); | ||
| 907 | return 0; | ||
| 908 | } | ||
| 909 | |||
| 910 | static struct vio_device_id ibmvstgt_device_table[] = { | ||
| 911 | {"v-scsi-host", "IBM,v-scsi-host"}, | ||
| 912 | {"",""} | ||
| 913 | }; | ||
| 914 | |||
| 915 | MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table); | ||
| 916 | |||
| 917 | static struct vio_driver ibmvstgt_driver = { | ||
| 918 | .id_table = ibmvstgt_device_table, | ||
| 919 | .probe = ibmvstgt_probe, | ||
| 920 | .remove = ibmvstgt_remove, | ||
| 921 | .name = "ibmvscsis", | ||
| 922 | }; | ||
| 923 | |||
| 924 | static int get_system_info(void) | ||
| 925 | { | ||
| 926 | struct device_node *rootdn; | ||
| 927 | const char *id, *model, *name; | ||
| 928 | const unsigned int *num; | ||
| 929 | |||
| 930 | rootdn = of_find_node_by_path("/"); | ||
| 931 | if (!rootdn) | ||
| 932 | return -ENOENT; | ||
| 933 | |||
| 934 | model = of_get_property(rootdn, "model", NULL); | ||
| 935 | id = of_get_property(rootdn, "system-id", NULL); | ||
| 936 | if (model && id) | ||
| 937 | snprintf(system_id, sizeof(system_id), "%s-%s", model, id); | ||
| 938 | |||
| 939 | name = of_get_property(rootdn, "ibm,partition-name", NULL); | ||
| 940 | if (name) | ||
| 941 | strncpy(partition_name, name, sizeof(partition_name)); | ||
| 942 | |||
| 943 | num = of_get_property(rootdn, "ibm,partition-no", NULL); | ||
| 944 | if (num) | ||
| 945 | partition_number = *num; | ||
| 946 | |||
| 947 | of_node_put(rootdn); | ||
| 948 | return 0; | ||
| 949 | } | ||
| 950 | |||
| 951 | static struct srp_function_template ibmvstgt_transport_functions = { | ||
| 952 | .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response, | ||
| 953 | .it_nexus_response = ibmvstgt_it_nexus_response, | ||
| 954 | }; | ||
| 955 | |||
| 956 | static int __init ibmvstgt_init(void) | ||
| 957 | { | ||
| 958 | int err = -ENOMEM; | ||
| 959 | |||
| 960 | printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n"); | ||
| 961 | |||
| 962 | ibmvstgt_transport_template = | ||
| 963 | srp_attach_transport(&ibmvstgt_transport_functions); | ||
| 964 | if (!ibmvstgt_transport_template) | ||
| 965 | return err; | ||
| 966 | |||
| 967 | vtgtd = create_workqueue("ibmvtgtd"); | ||
| 968 | if (!vtgtd) | ||
| 969 | goto release_transport; | ||
| 970 | |||
| 971 | err = get_system_info(); | ||
| 972 | if (err) | ||
| 973 | goto destroy_wq; | ||
| 974 | |||
| 975 | err = vio_register_driver(&ibmvstgt_driver); | ||
| 976 | if (err) | ||
| 977 | goto destroy_wq; | ||
| 978 | |||
| 979 | return 0; | ||
| 980 | destroy_wq: | ||
| 981 | destroy_workqueue(vtgtd); | ||
| 982 | release_transport: | ||
| 983 | srp_release_transport(ibmvstgt_transport_template); | ||
| 984 | return err; | ||
| 985 | } | ||
| 986 | |||
| 987 | static void __exit ibmvstgt_exit(void) | ||
| 988 | { | ||
| 989 | printk("Unregister IBM virtual SCSI driver\n"); | ||
| 990 | |||
| 991 | destroy_workqueue(vtgtd); | ||
| 992 | vio_unregister_driver(&ibmvstgt_driver); | ||
| 993 | srp_release_transport(ibmvstgt_transport_template); | ||
| 994 | } | ||
| 995 | |||
| 996 | MODULE_DESCRIPTION("IBM Virtual SCSI Target"); | ||
| 997 | MODULE_AUTHOR("Santiago Leon"); | ||
| 998 | MODULE_LICENSE("GPL"); | ||
| 999 | |||
| 1000 | module_init(ibmvstgt_init); | ||
| 1001 | module_exit(ibmvstgt_exit); | ||
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index b1c4d831137d..ddf0694d87f0 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
| @@ -2251,14 +2251,14 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2251 | seq_printf(m, "\nconnected: "); | 2251 | seq_printf(m, "\nconnected: "); |
| 2252 | if (hd->connected) { | 2252 | if (hd->connected) { |
| 2253 | cmd = (Scsi_Cmnd *) hd->connected; | 2253 | cmd = (Scsi_Cmnd *) hd->connected; |
| 2254 | seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2254 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2255 | } | 2255 | } |
| 2256 | } | 2256 | } |
| 2257 | if (hd->proc & PR_INPUTQ) { | 2257 | if (hd->proc & PR_INPUTQ) { |
| 2258 | seq_printf(m, "\ninput_Q: "); | 2258 | seq_printf(m, "\ninput_Q: "); |
| 2259 | cmd = (Scsi_Cmnd *) hd->input_Q; | 2259 | cmd = (Scsi_Cmnd *) hd->input_Q; |
| 2260 | while (cmd) { | 2260 | while (cmd) { |
| 2261 | seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2261 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2262 | cmd = (Scsi_Cmnd *) cmd->host_scribble; | 2262 | cmd = (Scsi_Cmnd *) cmd->host_scribble; |
| 2263 | } | 2263 | } |
| 2264 | } | 2264 | } |
| @@ -2266,7 +2266,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2266 | seq_printf(m, "\ndisconnected_Q:"); | 2266 | seq_printf(m, "\ndisconnected_Q:"); |
| 2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; | 2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; |
| 2268 | while (cmd) { | 2268 | while (cmd) { |
| 2269 | seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2269 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2270 | cmd = (Scsi_Cmnd *) cmd->host_scribble; | 2270 | cmd = (Scsi_Cmnd *) cmd->host_scribble; |
| 2271 | } | 2271 | } |
| 2272 | } | 2272 | } |
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 695b34e9154e..2e890b1e2526 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c | |||
| @@ -75,7 +75,7 @@ MODULE_VERSION(DRV_VERSION); | |||
| 75 | 75 | ||
| 76 | static struct scsi_transport_template *isci_transport_template; | 76 | static struct scsi_transport_template *isci_transport_template; |
| 77 | 77 | ||
| 78 | static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { | 78 | static const struct pci_device_id isci_id_table[] = { |
| 79 | { PCI_VDEVICE(INTEL, 0x1D61),}, | 79 | { PCI_VDEVICE(INTEL, 0x1D61),}, |
| 80 | { PCI_VDEVICE(INTEL, 0x1D63),}, | 80 | { PCI_VDEVICE(INTEL, 0x1D63),}, |
| 81 | { PCI_VDEVICE(INTEL, 0x1D65),}, | 81 | { PCI_VDEVICE(INTEL, 0x1D65),}, |
| @@ -356,7 +356,7 @@ static int isci_setup_interrupts(struct pci_dev *pdev) | |||
| 356 | for (i = 0; i < num_msix; i++) | 356 | for (i = 0; i < num_msix; i++) |
| 357 | pci_info->msix_entries[i].entry = i; | 357 | pci_info->msix_entries[i].entry = i; |
| 358 | 358 | ||
| 359 | err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix); | 359 | err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix); |
| 360 | if (err) | 360 | if (err) |
| 361 | goto intx; | 361 | goto intx; |
| 362 | 362 | ||
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 3d1bc67bac9d..ea025e4806b6 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -260,7 +260,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) | |||
| 260 | { | 260 | { |
| 261 | struct iscsi_conn *conn = task->conn; | 261 | struct iscsi_conn *conn = task->conn; |
| 262 | struct iscsi_tm *tmf = &conn->tmhdr; | 262 | struct iscsi_tm *tmf = &conn->tmhdr; |
| 263 | unsigned int hdr_lun; | 263 | u64 hdr_lun; |
| 264 | 264 | ||
| 265 | if (conn->tmf_state == TMF_INITIAL) | 265 | if (conn->tmf_state == TMF_INITIAL) |
| 266 | return 0; | 266 | return 0; |
| @@ -1859,8 +1859,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
| 1859 | * Fail commands. session lock held and recv side suspended and xmit | 1859 | * Fail commands. session lock held and recv side suspended and xmit |
| 1860 | * thread flushed | 1860 | * thread flushed |
| 1861 | */ | 1861 | */ |
| 1862 | static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, | 1862 | static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) |
| 1863 | int error) | ||
| 1864 | { | 1863 | { |
| 1865 | struct iscsi_task *task; | 1864 | struct iscsi_task *task; |
| 1866 | int i; | 1865 | int i; |
| @@ -2098,7 +2097,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) | |||
| 2098 | conn->ping_timeout, conn->recv_timeout, | 2097 | conn->ping_timeout, conn->recv_timeout, |
| 2099 | last_recv, conn->last_ping, jiffies); | 2098 | last_recv, conn->last_ping, jiffies); |
| 2100 | spin_unlock(&session->frwd_lock); | 2099 | spin_unlock(&session->frwd_lock); |
| 2101 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 2100 | iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT); |
| 2102 | return; | 2101 | return; |
| 2103 | } | 2102 | } |
| 2104 | 2103 | ||
| @@ -2279,7 +2278,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
| 2279 | cls_session = starget_to_session(scsi_target(sc->device)); | 2278 | cls_session = starget_to_session(scsi_target(sc->device)); |
| 2280 | session = cls_session->dd_data; | 2279 | session = cls_session->dd_data; |
| 2281 | 2280 | ||
| 2282 | ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); | 2281 | ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc, |
| 2282 | sc->device->lun); | ||
| 2283 | 2283 | ||
| 2284 | mutex_lock(&session->eh_mutex); | 2284 | mutex_lock(&session->eh_mutex); |
| 2285 | spin_lock_bh(&session->frwd_lock); | 2285 | spin_lock_bh(&session->frwd_lock); |
| @@ -2971,7 +2971,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
| 2971 | */ | 2971 | */ |
| 2972 | for (;;) { | 2972 | for (;;) { |
| 2973 | spin_lock_irqsave(session->host->host_lock, flags); | 2973 | spin_lock_irqsave(session->host->host_lock, flags); |
| 2974 | if (!session->host->host_busy) { /* OK for ERL == 0 */ | 2974 | if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */ |
| 2975 | spin_unlock_irqrestore(session->host->host_lock, flags); | 2975 | spin_unlock_irqrestore(session->host->host_lock, flags); |
| 2976 | break; | 2976 | break; |
| 2977 | } | 2977 | } |
| @@ -2979,7 +2979,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
| 2979 | msleep_interruptible(500); | 2979 | msleep_interruptible(500); |
| 2980 | iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " | 2980 | iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " |
| 2981 | "host_busy %d host_failed %d\n", | 2981 | "host_busy %d host_failed %d\n", |
| 2982 | session->host->host_busy, | 2982 | atomic_read(&session->host->host_busy), |
| 2983 | session->host->host_failed); | 2983 | session->host->host_failed); |
| 2984 | /* | 2984 | /* |
| 2985 | * force eh_abort() to unblock | 2985 | * force eh_abort() to unblock |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 25d0f127424d..24e477d2ea70 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
| @@ -404,7 +404,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) | |||
| 404 | 404 | ||
| 405 | int_to_scsilun(cmd->device->lun, &lun); | 405 | int_to_scsilun(cmd->device->lun, &lun); |
| 406 | 406 | ||
| 407 | SAS_DPRINTK("eh: device %llx LUN %x has the task\n", | 407 | SAS_DPRINTK("eh: device %llx LUN %llx has the task\n", |
| 408 | SAS_ADDR(dev->sas_addr), | 408 | SAS_ADDR(dev->sas_addr), |
| 409 | cmd->device->lun); | 409 | cmd->device->lun); |
| 410 | 410 | ||
| @@ -490,7 +490,8 @@ static void sas_wait_eh(struct domain_device *dev) | |||
| 490 | } | 490 | } |
| 491 | EXPORT_SYMBOL(sas_wait_eh); | 491 | EXPORT_SYMBOL(sas_wait_eh); |
| 492 | 492 | ||
| 493 | static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait) | 493 | static int sas_queue_reset(struct domain_device *dev, int reset_type, |
| 494 | u64 lun, int wait) | ||
| 494 | { | 495 | { |
| 495 | struct sas_ha_struct *ha = dev->port->ha; | 496 | struct sas_ha_struct *ha = dev->port->ha; |
| 496 | int scheduled = 0, tries = 100; | 497 | int scheduled = 0, tries = 100; |
| @@ -689,7 +690,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * | |||
| 689 | reset: | 690 | reset: |
| 690 | tmf_resp = sas_recover_lu(task->dev, cmd); | 691 | tmf_resp = sas_recover_lu(task->dev, cmd); |
| 691 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 692 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
| 692 | SAS_DPRINTK("dev %016llx LU %x is " | 693 | SAS_DPRINTK("dev %016llx LU %llx is " |
| 693 | "recovered\n", | 694 | "recovered\n", |
| 694 | SAS_ADDR(task->dev), | 695 | SAS_ADDR(task->dev), |
| 695 | cmd->device->lun); | 696 | cmd->device->lun); |
| @@ -742,7 +743,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * | |||
| 742 | * of effort could recover from errors. Quite | 743 | * of effort could recover from errors. Quite |
| 743 | * possibly the HA just disappeared. | 744 | * possibly the HA just disappeared. |
| 744 | */ | 745 | */ |
| 745 | SAS_DPRINTK("error from device %llx, LUN %x " | 746 | SAS_DPRINTK("error from device %llx, LUN %llx " |
| 746 | "couldn't be recovered in any way\n", | 747 | "couldn't be recovered in any way\n", |
| 747 | SAS_ADDR(task->dev->sas_addr), | 748 | SAS_ADDR(task->dev->sas_addr), |
| 748 | cmd->device->lun); | 749 | cmd->device->lun); |
| @@ -812,7 +813,7 @@ retry: | |||
| 812 | spin_unlock_irq(shost->host_lock); | 813 | spin_unlock_irq(shost->host_lock); |
| 813 | 814 | ||
| 814 | SAS_DPRINTK("Enter %s busy: %d failed: %d\n", | 815 | SAS_DPRINTK("Enter %s busy: %d failed: %d\n", |
| 815 | __func__, shost->host_busy, shost->host_failed); | 816 | __func__, atomic_read(&shost->host_busy), shost->host_failed); |
| 816 | /* | 817 | /* |
| 817 | * Deal with commands that still have SAS tasks (i.e. they didn't | 818 | * Deal with commands that still have SAS tasks (i.e. they didn't |
| 818 | * complete via the normal sas_task completion mechanism), | 819 | * complete via the normal sas_task completion mechanism), |
| @@ -857,7 +858,8 @@ out: | |||
| 857 | goto retry; | 858 | goto retry; |
| 858 | 859 | ||
| 859 | SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", | 860 | SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", |
| 860 | __func__, shost->host_busy, shost->host_failed, tries); | 861 | __func__, atomic_read(&shost->host_busy), |
| 862 | shost->host_failed, tries); | ||
| 861 | } | 863 | } |
| 862 | 864 | ||
| 863 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) | 865 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) |
| @@ -941,7 +943,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev) | |||
| 941 | scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); | 943 | scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); |
| 942 | scsi_activate_tcq(scsi_dev, SAS_DEF_QD); | 944 | scsi_activate_tcq(scsi_dev, SAS_DEF_QD); |
| 943 | } else { | 945 | } else { |
| 944 | SAS_DPRINTK("device %llx, LUN %x doesn't support " | 946 | SAS_DPRINTK("device %llx, LUN %llx doesn't support " |
| 945 | "TCQ\n", SAS_ADDR(dev->sas_addr), | 947 | "TCQ\n", SAS_ADDR(dev->sas_addr), |
| 946 | scsi_dev->lun); | 948 | scsi_dev->lun); |
| 947 | scsi_dev->tagged_supported = 0; | 949 | scsi_dev->tagged_supported = 0; |
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c deleted file mode 100644 index 0707ecdbaa32..000000000000 --- a/drivers/scsi/libsrp.c +++ /dev/null | |||
| @@ -1,447 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SCSI RDMA Protocol lib functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License as | ||
| 8 | * published by the Free Software Foundation; either version 2 of the | ||
| 9 | * License, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, but | ||
| 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 19 | * 02110-1301 USA | ||
| 20 | */ | ||
| 21 | #include <linux/err.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | #include <linux/kfifo.h> | ||
| 24 | #include <linux/scatterlist.h> | ||
| 25 | #include <linux/dma-mapping.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <scsi/scsi.h> | ||
| 28 | #include <scsi/scsi_cmnd.h> | ||
| 29 | #include <scsi/scsi_tcq.h> | ||
| 30 | #include <scsi/scsi_tgt.h> | ||
| 31 | #include <scsi/srp.h> | ||
| 32 | #include <scsi/libsrp.h> | ||
| 33 | |||
| 34 | enum srp_task_attributes { | ||
| 35 | SRP_SIMPLE_TASK = 0, | ||
| 36 | SRP_HEAD_TASK = 1, | ||
| 37 | SRP_ORDERED_TASK = 2, | ||
| 38 | SRP_ACA_TASK = 4 | ||
| 39 | }; | ||
| 40 | |||
| 41 | /* tmp - will replace with SCSI logging stuff */ | ||
| 42 | #define eprintk(fmt, args...) \ | ||
| 43 | do { \ | ||
| 44 | printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ | ||
| 45 | } while (0) | ||
| 46 | /* #define dprintk eprintk */ | ||
| 47 | #define dprintk(fmt, args...) | ||
| 48 | |||
| 49 | static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, | ||
| 50 | struct srp_buf **ring) | ||
| 51 | { | ||
| 52 | int i; | ||
| 53 | struct iu_entry *iue; | ||
| 54 | |||
| 55 | q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); | ||
| 56 | if (!q->pool) | ||
| 57 | return -ENOMEM; | ||
| 58 | q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); | ||
| 59 | if (!q->items) | ||
| 60 | goto free_pool; | ||
| 61 | |||
| 62 | spin_lock_init(&q->lock); | ||
| 63 | kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *)); | ||
| 64 | |||
| 65 | for (i = 0, iue = q->items; i < max; i++) { | ||
| 66 | kfifo_in(&q->queue, (void *) &iue, sizeof(void *)); | ||
| 67 | iue->sbuf = ring[i]; | ||
| 68 | iue++; | ||
| 69 | } | ||
| 70 | return 0; | ||
| 71 | |||
| 72 | kfree(q->items); | ||
| 73 | free_pool: | ||
| 74 | kfree(q->pool); | ||
| 75 | return -ENOMEM; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void srp_iu_pool_free(struct srp_queue *q) | ||
| 79 | { | ||
| 80 | kfree(q->items); | ||
| 81 | kfree(q->pool); | ||
| 82 | } | ||
| 83 | |||
| 84 | static struct srp_buf **srp_ring_alloc(struct device *dev, | ||
| 85 | size_t max, size_t size) | ||
| 86 | { | ||
| 87 | int i; | ||
| 88 | struct srp_buf **ring; | ||
| 89 | |||
| 90 | ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); | ||
| 91 | if (!ring) | ||
| 92 | return NULL; | ||
| 93 | |||
| 94 | for (i = 0; i < max; i++) { | ||
| 95 | ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL); | ||
| 96 | if (!ring[i]) | ||
| 97 | goto out; | ||
| 98 | ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, | ||
| 99 | GFP_KERNEL); | ||
| 100 | if (!ring[i]->buf) | ||
| 101 | goto out; | ||
| 102 | } | ||
| 103 | return ring; | ||
| 104 | |||
| 105 | out: | ||
| 106 | for (i = 0; i < max && ring[i]; i++) { | ||
| 107 | if (ring[i]->buf) | ||
| 108 | dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); | ||
| 109 | kfree(ring[i]); | ||
| 110 | } | ||
| 111 | kfree(ring); | ||
| 112 | |||
| 113 | return NULL; | ||
| 114 | } | ||
| 115 | |||
| 116 | static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, | ||
| 117 | size_t size) | ||
| 118 | { | ||
| 119 | int i; | ||
| 120 | |||
| 121 | for (i = 0; i < max; i++) { | ||
| 122 | dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); | ||
| 123 | kfree(ring[i]); | ||
| 124 | } | ||
| 125 | kfree(ring); | ||
| 126 | } | ||
| 127 | |||
| 128 | int srp_target_alloc(struct srp_target *target, struct device *dev, | ||
| 129 | size_t nr, size_t iu_size) | ||
| 130 | { | ||
| 131 | int err; | ||
| 132 | |||
| 133 | spin_lock_init(&target->lock); | ||
| 134 | INIT_LIST_HEAD(&target->cmd_queue); | ||
| 135 | |||
| 136 | target->dev = dev; | ||
| 137 | dev_set_drvdata(target->dev, target); | ||
| 138 | |||
| 139 | target->srp_iu_size = iu_size; | ||
| 140 | target->rx_ring_size = nr; | ||
| 141 | target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); | ||
| 142 | if (!target->rx_ring) | ||
| 143 | return -ENOMEM; | ||
| 144 | err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); | ||
| 145 | if (err) | ||
| 146 | goto free_ring; | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | |||
| 150 | free_ring: | ||
| 151 | srp_ring_free(target->dev, target->rx_ring, nr, iu_size); | ||
| 152 | return -ENOMEM; | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL_GPL(srp_target_alloc); | ||
| 155 | |||
| 156 | void srp_target_free(struct srp_target *target) | ||
| 157 | { | ||
| 158 | srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, | ||
| 159 | target->srp_iu_size); | ||
| 160 | srp_iu_pool_free(&target->iu_queue); | ||
| 161 | } | ||
| 162 | EXPORT_SYMBOL_GPL(srp_target_free); | ||
| 163 | |||
| 164 | struct iu_entry *srp_iu_get(struct srp_target *target) | ||
| 165 | { | ||
| 166 | struct iu_entry *iue = NULL; | ||
| 167 | |||
| 168 | if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue, | ||
| 169 | sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) { | ||
| 170 | WARN_ONCE(1, "unexpected fifo state"); | ||
| 171 | return NULL; | ||
| 172 | } | ||
| 173 | if (!iue) | ||
| 174 | return iue; | ||
| 175 | iue->target = target; | ||
| 176 | INIT_LIST_HEAD(&iue->ilist); | ||
| 177 | iue->flags = 0; | ||
| 178 | return iue; | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL_GPL(srp_iu_get); | ||
| 181 | |||
| 182 | void srp_iu_put(struct iu_entry *iue) | ||
| 183 | { | ||
| 184 | kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue, | ||
| 185 | sizeof(void *), &iue->target->iu_queue.lock); | ||
| 186 | } | ||
| 187 | EXPORT_SYMBOL_GPL(srp_iu_put); | ||
| 188 | |||
| 189 | static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md, | ||
| 190 | enum dma_data_direction dir, srp_rdma_t rdma_io, | ||
| 191 | int dma_map, int ext_desc) | ||
| 192 | { | ||
| 193 | struct iu_entry *iue = NULL; | ||
| 194 | struct scatterlist *sg = NULL; | ||
| 195 | int err, nsg = 0, len; | ||
| 196 | |||
| 197 | if (dma_map) { | ||
| 198 | iue = (struct iu_entry *) sc->SCp.ptr; | ||
| 199 | sg = scsi_sglist(sc); | ||
| 200 | |||
| 201 | dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc), | ||
| 202 | md->len, scsi_sg_count(sc)); | ||
| 203 | |||
| 204 | nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc), | ||
| 205 | DMA_BIDIRECTIONAL); | ||
| 206 | if (!nsg) { | ||
| 207 | printk("fail to map %p %d\n", iue, scsi_sg_count(sc)); | ||
| 208 | return 0; | ||
| 209 | } | ||
| 210 | len = min(scsi_bufflen(sc), md->len); | ||
| 211 | } else | ||
| 212 | len = md->len; | ||
| 213 | |||
| 214 | err = rdma_io(sc, sg, nsg, md, 1, dir, len); | ||
| 215 | |||
| 216 | if (dma_map) | ||
| 217 | dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); | ||
| 218 | |||
| 219 | return err; | ||
| 220 | } | ||
| 221 | |||
| 222 | static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd, | ||
| 223 | struct srp_indirect_buf *id, | ||
| 224 | enum dma_data_direction dir, srp_rdma_t rdma_io, | ||
| 225 | int dma_map, int ext_desc) | ||
| 226 | { | ||
| 227 | struct iu_entry *iue = NULL; | ||
| 228 | struct srp_direct_buf *md = NULL; | ||
| 229 | struct scatterlist dummy, *sg = NULL; | ||
| 230 | dma_addr_t token = 0; | ||
| 231 | int err = 0; | ||
| 232 | int nmd, nsg = 0, len; | ||
| 233 | |||
| 234 | if (dma_map || ext_desc) { | ||
| 235 | iue = (struct iu_entry *) sc->SCp.ptr; | ||
| 236 | sg = scsi_sglist(sc); | ||
| 237 | |||
| 238 | dprintk("%p %u %u %d %d\n", | ||
| 239 | iue, scsi_bufflen(sc), id->len, | ||
| 240 | cmd->data_in_desc_cnt, cmd->data_out_desc_cnt); | ||
| 241 | } | ||
| 242 | |||
| 243 | nmd = id->table_desc.len / sizeof(struct srp_direct_buf); | ||
| 244 | |||
| 245 | if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) || | ||
| 246 | (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) { | ||
| 247 | md = &id->desc_list[0]; | ||
| 248 | goto rdma; | ||
| 249 | } | ||
| 250 | |||
| 251 | if (ext_desc && dma_map) { | ||
| 252 | md = dma_alloc_coherent(iue->target->dev, id->table_desc.len, | ||
| 253 | &token, GFP_KERNEL); | ||
| 254 | if (!md) { | ||
| 255 | eprintk("Can't get dma memory %u\n", id->table_desc.len); | ||
| 256 | return -ENOMEM; | ||
| 257 | } | ||
| 258 | |||
| 259 | sg_init_one(&dummy, md, id->table_desc.len); | ||
| 260 | sg_dma_address(&dummy) = token; | ||
| 261 | sg_dma_len(&dummy) = id->table_desc.len; | ||
| 262 | err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, | ||
| 263 | id->table_desc.len); | ||
| 264 | if (err) { | ||
| 265 | eprintk("Error copying indirect table %d\n", err); | ||
| 266 | goto free_mem; | ||
| 267 | } | ||
| 268 | } else { | ||
| 269 | eprintk("This command uses external indirect buffer\n"); | ||
| 270 | return -EINVAL; | ||
| 271 | } | ||
| 272 | |||
| 273 | rdma: | ||
| 274 | if (dma_map) { | ||
| 275 | nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc), | ||
| 276 | DMA_BIDIRECTIONAL); | ||
| 277 | if (!nsg) { | ||
| 278 | eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc)); | ||
| 279 | err = -EIO; | ||
| 280 | goto free_mem; | ||
| 281 | } | ||
| 282 | len = min(scsi_bufflen(sc), id->len); | ||
| 283 | } else | ||
| 284 | len = id->len; | ||
| 285 | |||
| 286 | err = rdma_io(sc, sg, nsg, md, nmd, dir, len); | ||
| 287 | |||
| 288 | if (dma_map) | ||
| 289 | dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); | ||
| 290 | |||
| 291 | free_mem: | ||
| 292 | if (token && dma_map) | ||
| 293 | dma_free_coherent(iue->target->dev, id->table_desc.len, md, token); | ||
| 294 | |||
| 295 | return err; | ||
| 296 | } | ||
| 297 | |||
| 298 | static int data_out_desc_size(struct srp_cmd *cmd) | ||
| 299 | { | ||
| 300 | int size = 0; | ||
| 301 | u8 fmt = cmd->buf_fmt >> 4; | ||
| 302 | |||
| 303 | switch (fmt) { | ||
| 304 | case SRP_NO_DATA_DESC: | ||
| 305 | break; | ||
| 306 | case SRP_DATA_DESC_DIRECT: | ||
| 307 | size = sizeof(struct srp_direct_buf); | ||
| 308 | break; | ||
| 309 | case SRP_DATA_DESC_INDIRECT: | ||
| 310 | size = sizeof(struct srp_indirect_buf) + | ||
| 311 | sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; | ||
| 312 | break; | ||
| 313 | default: | ||
| 314 | eprintk("client error. Invalid data_out_format %x\n", fmt); | ||
| 315 | break; | ||
| 316 | } | ||
| 317 | return size; | ||
| 318 | } | ||
| 319 | |||
| 320 | /* | ||
| 321 | * TODO: this can be called multiple times for a single command if it | ||
| 322 | * has very long data. | ||
| 323 | */ | ||
| 324 | int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd, | ||
| 325 | srp_rdma_t rdma_io, int dma_map, int ext_desc) | ||
| 326 | { | ||
| 327 | struct srp_direct_buf *md; | ||
| 328 | struct srp_indirect_buf *id; | ||
| 329 | enum dma_data_direction dir; | ||
| 330 | int offset, err = 0; | ||
| 331 | u8 format; | ||
| 332 | |||
| 333 | offset = cmd->add_cdb_len & ~3; | ||
| 334 | |||
| 335 | dir = srp_cmd_direction(cmd); | ||
| 336 | if (dir == DMA_FROM_DEVICE) | ||
| 337 | offset += data_out_desc_size(cmd); | ||
| 338 | |||
| 339 | if (dir == DMA_TO_DEVICE) | ||
| 340 | format = cmd->buf_fmt >> 4; | ||
| 341 | else | ||
| 342 | format = cmd->buf_fmt & ((1U << 4) - 1); | ||
| 343 | |||
| 344 | switch (format) { | ||
| 345 | case SRP_NO_DATA_DESC: | ||
| 346 | break; | ||
| 347 | case SRP_DATA_DESC_DIRECT: | ||
| 348 | md = (struct srp_direct_buf *) | ||
| 349 | (cmd->add_data + offset); | ||
| 350 | err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc); | ||
| 351 | break; | ||
| 352 | case SRP_DATA_DESC_INDIRECT: | ||
| 353 | id = (struct srp_indirect_buf *) | ||
| 354 | (cmd->add_data + offset); | ||
| 355 | err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map, | ||
| 356 | ext_desc); | ||
| 357 | break; | ||
| 358 | default: | ||
| 359 | eprintk("Unknown format %d %x\n", dir, format); | ||
| 360 | err = -EINVAL; | ||
| 361 | } | ||
| 362 | |||
| 363 | return err; | ||
| 364 | } | ||
| 365 | EXPORT_SYMBOL_GPL(srp_transfer_data); | ||
| 366 | |||
| 367 | static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) | ||
| 368 | { | ||
| 369 | struct srp_direct_buf *md; | ||
| 370 | struct srp_indirect_buf *id; | ||
| 371 | int len = 0, offset = cmd->add_cdb_len & ~3; | ||
| 372 | u8 fmt; | ||
| 373 | |||
| 374 | if (dir == DMA_TO_DEVICE) | ||
| 375 | fmt = cmd->buf_fmt >> 4; | ||
| 376 | else { | ||
| 377 | fmt = cmd->buf_fmt & ((1U << 4) - 1); | ||
| 378 | offset += data_out_desc_size(cmd); | ||
| 379 | } | ||
| 380 | |||
| 381 | switch (fmt) { | ||
| 382 | case SRP_NO_DATA_DESC: | ||
| 383 | break; | ||
| 384 | case SRP_DATA_DESC_DIRECT: | ||
| 385 | md = (struct srp_direct_buf *) (cmd->add_data + offset); | ||
| 386 | len = md->len; | ||
| 387 | break; | ||
| 388 | case SRP_DATA_DESC_INDIRECT: | ||
| 389 | id = (struct srp_indirect_buf *) (cmd->add_data + offset); | ||
| 390 | len = id->len; | ||
| 391 | break; | ||
| 392 | default: | ||
| 393 | eprintk("invalid data format %x\n", fmt); | ||
| 394 | break; | ||
| 395 | } | ||
| 396 | return len; | ||
| 397 | } | ||
| 398 | |||
| 399 | int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info, | ||
| 400 | u64 itn_id, u64 addr) | ||
| 401 | { | ||
| 402 | enum dma_data_direction dir; | ||
| 403 | struct scsi_cmnd *sc; | ||
| 404 | int tag, len, err; | ||
| 405 | |||
| 406 | switch (cmd->task_attr) { | ||
| 407 | case SRP_SIMPLE_TASK: | ||
| 408 | tag = MSG_SIMPLE_TAG; | ||
| 409 | break; | ||
| 410 | case SRP_ORDERED_TASK: | ||
| 411 | tag = MSG_ORDERED_TAG; | ||
| 412 | break; | ||
| 413 | case SRP_HEAD_TASK: | ||
| 414 | tag = MSG_HEAD_TAG; | ||
| 415 | break; | ||
| 416 | default: | ||
| 417 | eprintk("Task attribute %d not supported\n", cmd->task_attr); | ||
| 418 | tag = MSG_ORDERED_TAG; | ||
| 419 | } | ||
| 420 | |||
| 421 | dir = srp_cmd_direction(cmd); | ||
| 422 | len = vscsis_data_length(cmd, dir); | ||
| 423 | |||
| 424 | dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0], | ||
| 425 | cmd->lun, dir, len, tag, (unsigned long long) cmd->tag); | ||
| 426 | |||
| 427 | sc = scsi_host_get_command(shost, dir, GFP_KERNEL); | ||
| 428 | if (!sc) | ||
| 429 | return -ENOMEM; | ||
| 430 | |||
| 431 | sc->SCp.ptr = info; | ||
| 432 | memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE); | ||
| 433 | sc->sdb.length = len; | ||
| 434 | sc->sdb.table.sgl = (void *) (unsigned long) addr; | ||
| 435 | sc->tag = tag; | ||
| 436 | err = scsi_tgt_queue_command(sc, itn_id, (struct scsi_lun *)&cmd->lun, | ||
| 437 | cmd->tag); | ||
| 438 | if (err) | ||
| 439 | scsi_host_put_command(shost, sc); | ||
| 440 | |||
| 441 | return err; | ||
| 442 | } | ||
| 443 | EXPORT_SYMBOL_GPL(srp_cmd_queue); | ||
| 444 | |||
| 445 | MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); | ||
| 446 | MODULE_AUTHOR("FUJITA Tomonori"); | ||
| 447 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 1d7a5c34ee8c..6eed9e76a166 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -1998,6 +1998,14 @@ lpfc_vport_param_show(name)\ | |||
| 1998 | lpfc_vport_param_init(name, defval, minval, maxval)\ | 1998 | lpfc_vport_param_init(name, defval, minval, maxval)\ |
| 1999 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | 1999 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) |
| 2000 | 2000 | ||
| 2001 | #define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ | ||
| 2002 | static uint64_t lpfc_##name = defval;\ | ||
| 2003 | module_param(lpfc_##name, ullong, S_IRUGO);\ | ||
| 2004 | MODULE_PARM_DESC(lpfc_##name, desc);\ | ||
| 2005 | lpfc_vport_param_show(name)\ | ||
| 2006 | lpfc_vport_param_init(name, defval, minval, maxval)\ | ||
| 2007 | static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) | ||
| 2008 | |||
| 2001 | #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ | 2009 | #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ |
| 2002 | static uint lpfc_##name = defval;\ | 2010 | static uint lpfc_##name = defval;\ |
| 2003 | module_param(lpfc_##name, uint, S_IRUGO);\ | 2011 | module_param(lpfc_##name, uint, S_IRUGO);\ |
| @@ -4596,7 +4604,7 @@ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " | |||
| 4596 | # Value range is [0,65535]. Default value is 255. | 4604 | # Value range is [0,65535]. Default value is 255. |
| 4597 | # NOTE: The SCSI layer might probe all allowed LUN on some old targets. | 4605 | # NOTE: The SCSI layer might probe all allowed LUN on some old targets. |
| 4598 | */ | 4606 | */ |
| 4599 | LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); | 4607 | LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); |
| 4600 | 4608 | ||
| 4601 | /* | 4609 | /* |
| 4602 | # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. | 4610 | # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 06f9a5b79e66..a5769a9960ac 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -8242,7 +8242,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) | |||
| 8242 | if (rc) { | 8242 | if (rc) { |
| 8243 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 8243 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8244 | "0420 PCI enable MSI-X failed (%d)\n", rc); | 8244 | "0420 PCI enable MSI-X failed (%d)\n", rc); |
| 8245 | goto msi_fail_out; | 8245 | goto vec_fail_out; |
| 8246 | } | 8246 | } |
| 8247 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | 8247 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) |
| 8248 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 8248 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| @@ -8320,6 +8320,8 @@ irq_fail_out: | |||
| 8320 | msi_fail_out: | 8320 | msi_fail_out: |
| 8321 | /* Unconfigure MSI-X capability structure */ | 8321 | /* Unconfigure MSI-X capability structure */ |
| 8322 | pci_disable_msix(phba->pcidev); | 8322 | pci_disable_msix(phba->pcidev); |
| 8323 | |||
| 8324 | vec_fail_out: | ||
| 8323 | return rc; | 8325 | return rc; |
| 8324 | } | 8326 | } |
| 8325 | 8327 | ||
| @@ -8812,7 +8814,7 @@ enable_msix_vectors: | |||
| 8812 | } else if (rc) { | 8814 | } else if (rc) { |
| 8813 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 8815 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
| 8814 | "0484 PCI enable MSI-X failed (%d)\n", rc); | 8816 | "0484 PCI enable MSI-X failed (%d)\n", rc); |
| 8815 | goto msi_fail_out; | 8817 | goto vec_fail_out; |
| 8816 | } | 8818 | } |
| 8817 | 8819 | ||
| 8818 | /* Log MSI-X vector assignment */ | 8820 | /* Log MSI-X vector assignment */ |
| @@ -8875,9 +8877,10 @@ cfg_fail_out: | |||
| 8875 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8877 | &phba->sli4_hba.fcp_eq_hdl[index]); |
| 8876 | } | 8878 | } |
| 8877 | 8879 | ||
| 8878 | msi_fail_out: | ||
| 8879 | /* Unconfigure MSI-X capability structure */ | 8880 | /* Unconfigure MSI-X capability structure */ |
| 8880 | pci_disable_msix(phba->pcidev); | 8881 | pci_disable_msix(phba->pcidev); |
| 8882 | |||
| 8883 | vec_fail_out: | ||
| 8881 | return rc; | 8884 | return rc; |
| 8882 | } | 8885 | } |
| 8883 | 8886 | ||
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 2df11daad85b..7862c5540861 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
| @@ -258,7 +258,7 @@ static void | |||
| 258 | lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | 258 | lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, |
| 259 | struct lpfc_vport *vport, | 259 | struct lpfc_vport *vport, |
| 260 | struct lpfc_nodelist *ndlp, | 260 | struct lpfc_nodelist *ndlp, |
| 261 | uint32_t lun, | 261 | uint64_t lun, |
| 262 | uint32_t old_val, | 262 | uint32_t old_val, |
| 263 | uint32_t new_val) | 263 | uint32_t new_val) |
| 264 | { | 264 | { |
| @@ -3823,7 +3823,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 3823 | if (rsplen != 0 && rsplen != 4 && rsplen != 8) { | 3823 | if (rsplen != 0 && rsplen != 4 && rsplen != 8) { |
| 3824 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3824 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 3825 | "2719 Invalid response length: " | 3825 | "2719 Invalid response length: " |
| 3826 | "tgt x%x lun x%x cmnd x%x rsplen x%x\n", | 3826 | "tgt x%x lun x%llx cmnd x%x rsplen x%x\n", |
| 3827 | cmnd->device->id, | 3827 | cmnd->device->id, |
| 3828 | cmnd->device->lun, cmnd->cmnd[0], | 3828 | cmnd->device->lun, cmnd->cmnd[0], |
| 3829 | rsplen); | 3829 | rsplen); |
| @@ -3834,7 +3834,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 3834 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 3834 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 3835 | "2757 Protocol failure detected during " | 3835 | "2757 Protocol failure detected during " |
| 3836 | "processing of FCP I/O op: " | 3836 | "processing of FCP I/O op: " |
| 3837 | "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", | 3837 | "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", |
| 3838 | cmnd->device->id, | 3838 | cmnd->device->id, |
| 3839 | cmnd->device->lun, cmnd->cmnd[0], | 3839 | cmnd->device->lun, cmnd->cmnd[0], |
| 3840 | fcprsp->rspInfo3); | 3840 | fcprsp->rspInfo3); |
| @@ -4045,7 +4045,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 4045 | else | 4045 | else |
| 4046 | logit = LOG_FCP | LOG_FCP_UNDER; | 4046 | logit = LOG_FCP | LOG_FCP_UNDER; |
| 4047 | lpfc_printf_vlog(vport, KERN_WARNING, logit, | 4047 | lpfc_printf_vlog(vport, KERN_WARNING, logit, |
| 4048 | "9030 FCP cmd x%x failed <%d/%d> " | 4048 | "9030 FCP cmd x%x failed <%d/%lld> " |
| 4049 | "status: x%x result: x%x " | 4049 | "status: x%x result: x%x " |
| 4050 | "sid: x%x did: x%x oxid: x%x " | 4050 | "sid: x%x did: x%x oxid: x%x " |
| 4051 | "Data: x%x x%x\n", | 4051 | "Data: x%x x%x\n", |
| @@ -4157,7 +4157,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
| 4157 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; | 4157 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; |
| 4158 | 4158 | ||
| 4159 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 4159 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
| 4160 | "0710 Iodone <%d/%d> cmd %p, error " | 4160 | "0710 Iodone <%d/%llu> cmd %p, error " |
| 4161 | "x%x SNS x%x x%x Data: x%x x%x\n", | 4161 | "x%x SNS x%x x%x Data: x%x x%x\n", |
| 4162 | cmd->device->id, cmd->device->lun, cmd, | 4162 | cmd->device->id, cmd->device->lun, cmd, |
| 4163 | cmd->result, *lp, *(lp + 3), cmd->retries, | 4163 | cmd->result, *lp, *(lp + 3), cmd->retries, |
| @@ -4390,7 +4390,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
| 4390 | static int | 4390 | static int |
| 4391 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, | 4391 | lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, |
| 4392 | struct lpfc_scsi_buf *lpfc_cmd, | 4392 | struct lpfc_scsi_buf *lpfc_cmd, |
| 4393 | unsigned int lun, | 4393 | uint64_t lun, |
| 4394 | uint8_t task_mgmt_cmd) | 4394 | uint8_t task_mgmt_cmd) |
| 4395 | { | 4395 | { |
| 4396 | struct lpfc_iocbq *piocbq; | 4396 | struct lpfc_iocbq *piocbq; |
| @@ -4719,12 +4719,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
| 4719 | atomic_dec(&ndlp->cmd_pending); | 4719 | atomic_dec(&ndlp->cmd_pending); |
| 4720 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 4720 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
| 4721 | "3376 FCP could not issue IOCB err %x" | 4721 | "3376 FCP could not issue IOCB err %x" |
| 4722 | "FCP cmd x%x <%d/%d> " | 4722 | "FCP cmd x%x <%d/%llu> " |
| 4723 | "sid: x%x did: x%x oxid: x%x " | 4723 | "sid: x%x did: x%x oxid: x%x " |
| 4724 | "Data: x%x x%x x%x x%x\n", | 4724 | "Data: x%x x%x x%x x%x\n", |
| 4725 | err, cmnd->cmnd[0], | 4725 | err, cmnd->cmnd[0], |
| 4726 | cmnd->device ? cmnd->device->id : 0xffff, | 4726 | cmnd->device ? cmnd->device->id : 0xffff, |
| 4727 | cmnd->device ? cmnd->device->lun : 0xffff, | 4727 | cmnd->device ? cmnd->device->lun : (u64) -1, |
| 4728 | vport->fc_myDID, ndlp->nlp_DID, | 4728 | vport->fc_myDID, ndlp->nlp_DID, |
| 4729 | phba->sli_rev == LPFC_SLI_REV4 ? | 4729 | phba->sli_rev == LPFC_SLI_REV4 ? |
| 4730 | lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, | 4730 | lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, |
| @@ -4807,7 +4807,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
| 4807 | spin_unlock_irqrestore(&phba->hbalock, flags); | 4807 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 4808 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4808 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4809 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " | 4809 | "2873 SCSI Layer I/O Abort Request IO CMPL Status " |
| 4810 | "x%x ID %d LUN %d\n", | 4810 | "x%x ID %d LUN %llu\n", |
| 4811 | SUCCESS, cmnd->device->id, cmnd->device->lun); | 4811 | SUCCESS, cmnd->device->id, cmnd->device->lun); |
| 4812 | return SUCCESS; | 4812 | return SUCCESS; |
| 4813 | } | 4813 | } |
| @@ -4924,7 +4924,7 @@ wait_for_cmpl: | |||
| 4924 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 4924 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 4925 | "0748 abort handler timed out waiting " | 4925 | "0748 abort handler timed out waiting " |
| 4926 | "for abortng I/O (xri:x%x) to complete: " | 4926 | "for abortng I/O (xri:x%x) to complete: " |
| 4927 | "ret %#x, ID %d, LUN %d\n", | 4927 | "ret %#x, ID %d, LUN %llu\n", |
| 4928 | iocb->sli4_xritag, ret, | 4928 | iocb->sli4_xritag, ret, |
| 4929 | cmnd->device->id, cmnd->device->lun); | 4929 | cmnd->device->id, cmnd->device->lun); |
| 4930 | } | 4930 | } |
| @@ -4935,7 +4935,7 @@ out_unlock: | |||
| 4935 | out: | 4935 | out: |
| 4936 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4936 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
| 4937 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " | 4937 | "0749 SCSI Layer I/O Abort Request Status x%x ID %d " |
| 4938 | "LUN %d\n", ret, cmnd->device->id, | 4938 | "LUN %llu\n", ret, cmnd->device->id, |
| 4939 | cmnd->device->lun); | 4939 | cmnd->device->lun); |
| 4940 | return ret; | 4940 | return ret; |
| 4941 | } | 4941 | } |
| @@ -5047,7 +5047,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) | |||
| 5047 | **/ | 5047 | **/ |
| 5048 | static int | 5048 | static int |
| 5049 | lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | 5049 | lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, |
| 5050 | unsigned tgt_id, unsigned int lun_id, | 5050 | unsigned tgt_id, uint64_t lun_id, |
| 5051 | uint8_t task_mgmt_cmd) | 5051 | uint8_t task_mgmt_cmd) |
| 5052 | { | 5052 | { |
| 5053 | struct lpfc_hba *phba = vport->phba; | 5053 | struct lpfc_hba *phba = vport->phba; |
| @@ -5083,7 +5083,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
| 5083 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; | 5083 | iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; |
| 5084 | 5084 | ||
| 5085 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | 5085 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, |
| 5086 | "0702 Issue %s to TGT %d LUN %d " | 5086 | "0702 Issue %s to TGT %d LUN %llu " |
| 5087 | "rpi x%x nlp_flag x%x Data: x%x x%x\n", | 5087 | "rpi x%x nlp_flag x%x Data: x%x x%x\n", |
| 5088 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, | 5088 | lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, |
| 5089 | pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, | 5089 | pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, |
| @@ -5094,7 +5094,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
| 5094 | if ((status != IOCB_SUCCESS) || | 5094 | if ((status != IOCB_SUCCESS) || |
| 5095 | (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { | 5095 | (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { |
| 5096 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 5096 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 5097 | "0727 TMF %s to TGT %d LUN %d failed (%d, %d) " | 5097 | "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) " |
| 5098 | "iocb_flag x%x\n", | 5098 | "iocb_flag x%x\n", |
| 5099 | lpfc_taskmgmt_name(task_mgmt_cmd), | 5099 | lpfc_taskmgmt_name(task_mgmt_cmd), |
| 5100 | tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, | 5100 | tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, |
| @@ -5238,7 +5238,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
| 5238 | struct lpfc_rport_data *rdata; | 5238 | struct lpfc_rport_data *rdata; |
| 5239 | struct lpfc_nodelist *pnode; | 5239 | struct lpfc_nodelist *pnode; |
| 5240 | unsigned tgt_id = cmnd->device->id; | 5240 | unsigned tgt_id = cmnd->device->id; |
| 5241 | unsigned int lun_id = cmnd->device->lun; | 5241 | uint64_t lun_id = cmnd->device->lun; |
| 5242 | struct lpfc_scsi_event_header scsi_event; | 5242 | struct lpfc_scsi_event_header scsi_event; |
| 5243 | int status; | 5243 | int status; |
| 5244 | 5244 | ||
| @@ -5273,7 +5273,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |||
| 5273 | FCP_LUN_RESET); | 5273 | FCP_LUN_RESET); |
| 5274 | 5274 | ||
| 5275 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 5275 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 5276 | "0713 SCSI layer issued Device Reset (%d, %d) " | 5276 | "0713 SCSI layer issued Device Reset (%d, %llu) " |
| 5277 | "return x%x\n", tgt_id, lun_id, status); | 5277 | "return x%x\n", tgt_id, lun_id, status); |
| 5278 | 5278 | ||
| 5279 | /* | 5279 | /* |
| @@ -5308,7 +5308,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) | |||
| 5308 | struct lpfc_rport_data *rdata; | 5308 | struct lpfc_rport_data *rdata; |
| 5309 | struct lpfc_nodelist *pnode; | 5309 | struct lpfc_nodelist *pnode; |
| 5310 | unsigned tgt_id = cmnd->device->id; | 5310 | unsigned tgt_id = cmnd->device->id; |
| 5311 | unsigned int lun_id = cmnd->device->lun; | 5311 | uint64_t lun_id = cmnd->device->lun; |
| 5312 | struct lpfc_scsi_event_header scsi_event; | 5312 | struct lpfc_scsi_event_header scsi_event; |
| 5313 | int status; | 5313 | int status; |
| 5314 | 5314 | ||
| @@ -5343,7 +5343,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) | |||
| 5343 | FCP_TARGET_RESET); | 5343 | FCP_TARGET_RESET); |
| 5344 | 5344 | ||
| 5345 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | 5345 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, |
| 5346 | "0723 SCSI layer issued Target Reset (%d, %d) " | 5346 | "0723 SCSI layer issued Target Reset (%d, %llu) " |
| 5347 | "return x%x\n", tgt_id, lun_id, status); | 5347 | "return x%x\n", tgt_id, lun_id, status); |
| 5348 | 5348 | ||
| 5349 | /* | 5349 | /* |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index b7770516f4c2..ac5d94cfd52f 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
| @@ -1860,7 +1860,7 @@ megaraid_info(struct Scsi_Host *host) | |||
| 1860 | "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", | 1860 | "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns", |
| 1861 | adapter->fw_version, adapter->product_info.max_commands, | 1861 | adapter->fw_version, adapter->product_info.max_commands, |
| 1862 | adapter->host->max_id, adapter->host->max_channel, | 1862 | adapter->host->max_id, adapter->host->max_channel, |
| 1863 | adapter->host->max_lun); | 1863 | (u32)adapter->host->max_lun); |
| 1864 | return buffer; | 1864 | return buffer; |
| 1865 | } | 1865 | } |
| 1866 | 1866 | ||
| @@ -1941,8 +1941,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) | |||
| 1941 | 1941 | ||
| 1942 | printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n", | 1942 | printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n", |
| 1943 | (aor == SCB_ABORT)? "ABORTING":"RESET", | 1943 | (aor == SCB_ABORT)? "ABORTING":"RESET", |
| 1944 | cmd->cmnd[0], cmd->device->channel, | 1944 | cmd->cmnd[0], cmd->device->channel, |
| 1945 | cmd->device->id, cmd->device->lun); | 1945 | cmd->device->id, (u32)cmd->device->lun); |
| 1946 | 1946 | ||
| 1947 | if(list_empty(&adapter->pending_list)) | 1947 | if(list_empty(&adapter->pending_list)) |
| 1948 | return FALSE; | 1948 | return FALSE; |
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h index 5ead1283a844..1d037ed52c33 100644 --- a/drivers/scsi/megaraid/mega_common.h +++ b/drivers/scsi/megaraid/mega_common.h | |||
| @@ -204,7 +204,7 @@ typedef struct { | |||
| 204 | #define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state | 204 | #define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state |
| 205 | #define SCP2CHANNEL(scp) (scp)->device->channel // to channel | 205 | #define SCP2CHANNEL(scp) (scp)->device->channel // to channel |
| 206 | #define SCP2TARGET(scp) (scp)->device->id // to target | 206 | #define SCP2TARGET(scp) (scp)->device->id // to target |
| 207 | #define SCP2LUN(scp) (scp)->device->lun // to LUN | 207 | #define SCP2LUN(scp) (u32)(scp)->device->lun // to LUN |
| 208 | 208 | ||
| 209 | // generic macro to convert scsi command and host to controller's soft state | 209 | // generic macro to convert scsi command and host to controller's soft state |
| 210 | #define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0]) | 210 | #define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0]) |
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index e2237a97cb9d..531dce419c18 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c | |||
| @@ -998,8 +998,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
| 998 | * Allocate the common 16-byte aligned memory for the handshake | 998 | * Allocate the common 16-byte aligned memory for the handshake |
| 999 | * mailbox. | 999 | * mailbox. |
| 1000 | */ | 1000 | */ |
| 1001 | raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev, | 1001 | raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev, |
| 1002 | sizeof(mbox64_t), &raid_dev->una_mbox64_dma); | 1002 | sizeof(mbox64_t), |
| 1003 | &raid_dev->una_mbox64_dma); | ||
| 1003 | 1004 | ||
| 1004 | if (!raid_dev->una_mbox64) { | 1005 | if (!raid_dev->una_mbox64) { |
| 1005 | con_log(CL_ANN, (KERN_WARNING | 1006 | con_log(CL_ANN, (KERN_WARNING |
| @@ -1007,7 +1008,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
| 1007 | __LINE__)); | 1008 | __LINE__)); |
| 1008 | return -1; | 1009 | return -1; |
| 1009 | } | 1010 | } |
| 1010 | memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t)); | ||
| 1011 | 1011 | ||
| 1012 | /* | 1012 | /* |
| 1013 | * Align the mailbox at 16-byte boundary | 1013 | * Align the mailbox at 16-byte boundary |
| @@ -1026,8 +1026,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
| 1026 | align; | 1026 | align; |
| 1027 | 1027 | ||
| 1028 | // Allocate memory for commands issued internally | 1028 | // Allocate memory for commands issued internally |
| 1029 | adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE, | 1029 | adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE, |
| 1030 | &adapter->ibuf_dma_h); | 1030 | &adapter->ibuf_dma_h); |
| 1031 | if (!adapter->ibuf) { | 1031 | if (!adapter->ibuf) { |
| 1032 | 1032 | ||
| 1033 | con_log(CL_ANN, (KERN_WARNING | 1033 | con_log(CL_ANN, (KERN_WARNING |
| @@ -1036,7 +1036,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) | |||
| 1036 | 1036 | ||
| 1037 | goto out_free_common_mbox; | 1037 | goto out_free_common_mbox; |
| 1038 | } | 1038 | } |
| 1039 | memset(adapter->ibuf, 0, MBOX_IBUF_SIZE); | ||
| 1040 | 1039 | ||
| 1041 | // Allocate memory for our SCSI Command Blocks and their associated | 1040 | // Allocate memory for our SCSI Command Blocks and their associated |
| 1042 | // memory | 1041 | // memory |
| @@ -2972,8 +2971,8 @@ megaraid_mbox_product_info(adapter_t *adapter) | |||
| 2972 | * Issue an ENQUIRY3 command to find out certain adapter parameters, | 2971 | * Issue an ENQUIRY3 command to find out certain adapter parameters, |
| 2973 | * e.g., max channels, max commands etc. | 2972 | * e.g., max channels, max commands etc. |
| 2974 | */ | 2973 | */ |
| 2975 | pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), | 2974 | pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), |
| 2976 | &pinfo_dma_h); | 2975 | &pinfo_dma_h); |
| 2977 | 2976 | ||
| 2978 | if (pinfo == NULL) { | 2977 | if (pinfo == NULL) { |
| 2979 | con_log(CL_ANN, (KERN_WARNING | 2978 | con_log(CL_ANN, (KERN_WARNING |
| @@ -2982,7 +2981,6 @@ megaraid_mbox_product_info(adapter_t *adapter) | |||
| 2982 | 2981 | ||
| 2983 | return -1; | 2982 | return -1; |
| 2984 | } | 2983 | } |
| 2985 | memset(pinfo, 0, sizeof(mraid_pinfo_t)); | ||
| 2986 | 2984 | ||
| 2987 | mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; | 2985 | mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; |
| 2988 | memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); | 2986 | memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 112799b131a9..22a04e37b70a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -2038,9 +2038,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, | |||
| 2038 | 2038 | ||
| 2039 | if (initial) { | 2039 | if (initial) { |
| 2040 | instance->hb_host_mem = | 2040 | instance->hb_host_mem = |
| 2041 | pci_alloc_consistent(instance->pdev, | 2041 | pci_zalloc_consistent(instance->pdev, |
| 2042 | sizeof(struct MR_CTRL_HB_HOST_MEM), | 2042 | sizeof(struct MR_CTRL_HB_HOST_MEM), |
| 2043 | &instance->hb_host_mem_h); | 2043 | &instance->hb_host_mem_h); |
| 2044 | if (!instance->hb_host_mem) { | 2044 | if (!instance->hb_host_mem) { |
| 2045 | printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" | 2045 | printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" |
| 2046 | " memory for heartbeat host memory for " | 2046 | " memory for heartbeat host memory for " |
| @@ -2048,8 +2048,6 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, | |||
| 2048 | retval = -ENOMEM; | 2048 | retval = -ENOMEM; |
| 2049 | goto out; | 2049 | goto out; |
| 2050 | } | 2050 | } |
| 2051 | memset(instance->hb_host_mem, 0, | ||
| 2052 | sizeof(struct MR_CTRL_HB_HOST_MEM)); | ||
| 2053 | } | 2051 | } |
| 2054 | 2052 | ||
| 2055 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | 2053 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 22600419ae9f..3ed03dfab76c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -1690,7 +1690,7 @@ NonFastPath: | |||
| 1690 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 1690 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
| 1691 | } | 1691 | } |
| 1692 | io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); | 1692 | io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); |
| 1693 | io_request->LUN[1] = scmd->device->lun; | 1693 | int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN); |
| 1694 | } | 1694 | } |
| 1695 | 1695 | ||
| 1696 | /** | 1696 | /** |
| @@ -1713,7 +1713,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, | |||
| 1713 | device_id = MEGASAS_DEV_INDEX(instance, scp); | 1713 | device_id = MEGASAS_DEV_INDEX(instance, scp); |
| 1714 | 1714 | ||
| 1715 | /* Zero out some fields so they don't get reused */ | 1715 | /* Zero out some fields so they don't get reused */ |
| 1716 | io_request->LUN[1] = 0; | 1716 | memset(io_request->LUN, 0x0, 8); |
| 1717 | io_request->CDB.EEDP32.PrimaryReferenceTag = 0; | 1717 | io_request->CDB.EEDP32.PrimaryReferenceTag = 0; |
| 1718 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; | 1718 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; |
| 1719 | io_request->EEDPFlags = 0; | 1719 | io_request->EEDPFlags = 0; |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index e8a04ae3276a..57a95e2c3442 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
| @@ -1230,7 +1230,7 @@ static void handle_msgin(struct mesh_state *ms) | |||
| 1230 | ms->msgphase = msg_out; | 1230 | ms->msgphase = msg_out; |
| 1231 | } else if (code != cmd->device->lun + IDENTIFY_BASE) { | 1231 | } else if (code != cmd->device->lun + IDENTIFY_BASE) { |
| 1232 | printk(KERN_WARNING "mesh: lun mismatch " | 1232 | printk(KERN_WARNING "mesh: lun mismatch " |
| 1233 | "(%d != %d) on reselection from " | 1233 | "(%d != %llu) on reselection from " |
| 1234 | "target %d\n", code - IDENTIFY_BASE, | 1234 | "target %d\n", code - IDENTIFY_BASE, |
| 1235 | cmd->device->lun, ms->conn_tgt); | 1235 | cmd->device->lun, ms->conn_tgt); |
| 1236 | } | 1236 | } |
| @@ -1915,14 +1915,12 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) | |||
| 1915 | /* We use the PCI APIs for now until the generic one gets fixed | 1915 | /* We use the PCI APIs for now until the generic one gets fixed |
| 1916 | * enough or until we get some macio-specific versions | 1916 | * enough or until we get some macio-specific versions |
| 1917 | */ | 1917 | */ |
| 1918 | dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev), | 1918 | dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev), |
| 1919 | ms->dma_cmd_size, | 1919 | ms->dma_cmd_size, &dma_cmd_bus); |
| 1920 | &dma_cmd_bus); | ||
| 1921 | if (dma_cmd_space == NULL) { | 1920 | if (dma_cmd_space == NULL) { |
| 1922 | printk(KERN_ERR "mesh: can't allocate DMA table\n"); | 1921 | printk(KERN_ERR "mesh: can't allocate DMA table\n"); |
| 1923 | goto out_unmap; | 1922 | goto out_unmap; |
| 1924 | } | 1923 | } |
| 1925 | memset(dma_cmd_space, 0, ms->dma_cmd_size); | ||
| 1926 | 1924 | ||
| 1927 | ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); | 1925 | ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); |
| 1928 | ms->dma_cmd_space = dma_cmd_space; | 1926 | ms->dma_cmd_space = dma_cmd_space; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 8b88118e20e6..2f262be890c5 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
| @@ -277,7 +277,7 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) | |||
| 277 | ioc->fault_reset_work_q = NULL; | 277 | ioc->fault_reset_work_q = NULL; |
| 278 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 278 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
| 279 | if (wq) { | 279 | if (wq) { |
| 280 | if (!cancel_delayed_work(&ioc->fault_reset_work)) | 280 | if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) |
| 281 | flush_workqueue(wq); | 281 | flush_workqueue(wq); |
| 282 | destroy_workqueue(wq); | 282 | destroy_workqueue(wq); |
| 283 | } | 283 | } |
| @@ -1332,53 +1332,35 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
| 1332 | static void | 1332 | static void |
| 1333 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) | 1333 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) |
| 1334 | { | 1334 | { |
| 1335 | struct adapter_reply_queue *reply_q; | 1335 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
| 1336 | int cpu_id; | ||
| 1337 | int cpu_grouping, loop, grouping, grouping_mod; | ||
| 1338 | 1336 | ||
| 1339 | if (!_base_is_controller_msix_enabled(ioc)) | 1337 | if (!_base_is_controller_msix_enabled(ioc)) |
| 1340 | return; | 1338 | return; |
| 1341 | 1339 | ||
| 1342 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); | 1340 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); |
| 1343 | /* when there are more cpus than available msix vectors, | 1341 | |
| 1344 | * then group cpus togeather on same irq | 1342 | nr_cpus = num_online_cpus(); |
| 1345 | */ | 1343 | nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, |
| 1346 | if (ioc->cpu_count > ioc->msix_vector_count) { | 1344 | ioc->facts.MaxMSIxVectors); |
| 1347 | grouping = ioc->cpu_count / ioc->msix_vector_count; | 1345 | if (!nr_msix) |
| 1348 | grouping_mod = ioc->cpu_count % ioc->msix_vector_count; | 1346 | return; |
| 1349 | if (grouping < 2 || (grouping == 2 && !grouping_mod)) | 1347 | |
| 1350 | cpu_grouping = 2; | 1348 | cpu = cpumask_first(cpu_online_mask); |
| 1351 | else if (grouping < 4 || (grouping == 4 && !grouping_mod)) | 1349 | |
| 1352 | cpu_grouping = 4; | 1350 | do { |
| 1353 | else if (grouping < 8 || (grouping == 8 && !grouping_mod)) | 1351 | unsigned int i, group = nr_cpus / nr_msix; |
| 1354 | cpu_grouping = 8; | 1352 | |
| 1355 | else | 1353 | if (index < nr_cpus % nr_msix) |
| 1356 | cpu_grouping = 16; | 1354 | group++; |
| 1357 | } else | 1355 | |
| 1358 | cpu_grouping = 0; | 1356 | for (i = 0 ; i < group ; i++) { |
| 1359 | 1357 | ioc->cpu_msix_table[cpu] = index; | |
| 1360 | loop = 0; | 1358 | cpu = cpumask_next(cpu, cpu_online_mask); |
| 1361 | reply_q = list_entry(ioc->reply_queue_list.next, | ||
| 1362 | struct adapter_reply_queue, list); | ||
| 1363 | for_each_online_cpu(cpu_id) { | ||
| 1364 | if (!cpu_grouping) { | ||
| 1365 | ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; | ||
| 1366 | reply_q = list_entry(reply_q->list.next, | ||
| 1367 | struct adapter_reply_queue, list); | ||
| 1368 | } else { | ||
| 1369 | if (loop < cpu_grouping) { | ||
| 1370 | ioc->cpu_msix_table[cpu_id] = | ||
| 1371 | reply_q->msix_index; | ||
| 1372 | loop++; | ||
| 1373 | } else { | ||
| 1374 | reply_q = list_entry(reply_q->list.next, | ||
| 1375 | struct adapter_reply_queue, list); | ||
| 1376 | ioc->cpu_msix_table[cpu_id] = | ||
| 1377 | reply_q->msix_index; | ||
| 1378 | loop = 1; | ||
| 1379 | } | ||
| 1380 | } | 1359 | } |
| 1381 | } | 1360 | |
| 1361 | index++; | ||
| 1362 | |||
| 1363 | } while (cpu < nr_cpus); | ||
| 1382 | } | 1364 | } |
| 1383 | 1365 | ||
| 1384 | /** | 1366 | /** |
| @@ -4295,12 +4277,13 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
| 4295 | goto out_free_resources; | 4277 | goto out_free_resources; |
| 4296 | 4278 | ||
| 4297 | if (ioc->is_warpdrive) { | 4279 | if (ioc->is_warpdrive) { |
| 4298 | ioc->reply_post_host_index[0] = | 4280 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) |
| 4299 | (resource_size_t *)&ioc->chip->ReplyPostHostIndex; | 4281 | &ioc->chip->ReplyPostHostIndex; |
| 4300 | 4282 | ||
| 4301 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | 4283 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) |
| 4302 | ioc->reply_post_host_index[i] = (resource_size_t *) | 4284 | ioc->reply_post_host_index[i] = |
| 4303 | ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | 4285 | (resource_size_t __iomem *) |
| 4286 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
| 4304 | * 4))); | 4287 | * 4))); |
| 4305 | } | 4288 | } |
| 4306 | 4289 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index fd3b998c75b1..0ac5815a7f91 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
| @@ -837,7 +837,7 @@ struct MPT2SAS_ADAPTER { | |||
| 837 | u8 msix_enable; | 837 | u8 msix_enable; |
| 838 | u16 msix_vector_count; | 838 | u16 msix_vector_count; |
| 839 | u8 *cpu_msix_table; | 839 | u8 *cpu_msix_table; |
| 840 | resource_size_t **reply_post_host_index; | 840 | resource_size_t __iomem **reply_post_host_index; |
| 841 | u16 cpu_msix_table_sz; | 841 | u16 cpu_msix_table_sz; |
| 842 | u32 ioc_reset_count; | 842 | u32 ioc_reset_count; |
| 843 | MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; | 843 | MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5055f925d2cd..dd461015813f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
| @@ -173,7 +173,7 @@ struct fw_event_work { | |||
| 173 | u8 VP_ID; | 173 | u8 VP_ID; |
| 174 | u8 ignore; | 174 | u8 ignore; |
| 175 | u16 event; | 175 | u16 event; |
| 176 | void *event_data; | 176 | char event_data[0] __aligned(4); |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | /* raid transport support */ | 179 | /* raid transport support */ |
| @@ -1292,7 +1292,8 @@ _scsih_target_alloc(struct scsi_target *starget) | |||
| 1292 | unsigned long flags; | 1292 | unsigned long flags; |
| 1293 | struct sas_rphy *rphy; | 1293 | struct sas_rphy *rphy; |
| 1294 | 1294 | ||
| 1295 | sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL); | 1295 | sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), |
| 1296 | GFP_KERNEL); | ||
| 1296 | if (!sas_target_priv_data) | 1297 | if (!sas_target_priv_data) |
| 1297 | return -ENOMEM; | 1298 | return -ENOMEM; |
| 1298 | 1299 | ||
| @@ -1406,7 +1407,8 @@ _scsih_slave_alloc(struct scsi_device *sdev) | |||
| 1406 | struct _sas_device *sas_device; | 1407 | struct _sas_device *sas_device; |
| 1407 | unsigned long flags; | 1408 | unsigned long flags; |
| 1408 | 1409 | ||
| 1409 | sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); | 1410 | sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), |
| 1411 | GFP_KERNEL); | ||
| 1410 | if (!sas_device_priv_data) | 1412 | if (!sas_device_priv_data) |
| 1411 | return -ENOMEM; | 1413 | return -ENOMEM; |
| 1412 | 1414 | ||
| @@ -2832,7 +2834,6 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work | |||
| 2832 | 2834 | ||
| 2833 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 2835 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
| 2834 | list_del(&fw_event->list); | 2836 | list_del(&fw_event->list); |
| 2835 | kfree(fw_event->event_data); | ||
| 2836 | kfree(fw_event); | 2837 | kfree(fw_event); |
| 2837 | spin_unlock_irqrestore(&ioc->fw_event_lock, flags); | 2838 | spin_unlock_irqrestore(&ioc->fw_event_lock, flags); |
| 2838 | } | 2839 | } |
| @@ -2899,11 +2900,10 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc) | |||
| 2899 | return; | 2900 | return; |
| 2900 | 2901 | ||
| 2901 | list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { | 2902 | list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { |
| 2902 | if (cancel_delayed_work(&fw_event->delayed_work)) { | 2903 | if (cancel_delayed_work_sync(&fw_event->delayed_work)) { |
| 2903 | _scsih_fw_event_free(ioc, fw_event); | 2904 | _scsih_fw_event_free(ioc, fw_event); |
| 2904 | continue; | 2905 | continue; |
| 2905 | } | 2906 | } |
| 2906 | fw_event->cancel_pending_work = 1; | ||
| 2907 | } | 2907 | } |
| 2908 | } | 2908 | } |
| 2909 | 2909 | ||
| @@ -3518,7 +3518,8 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, | |||
| 3518 | if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || | 3518 | if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || |
| 3519 | fw_event->ignore) | 3519 | fw_event->ignore) |
| 3520 | continue; | 3520 | continue; |
| 3521 | local_event_data = fw_event->event_data; | 3521 | local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) |
| 3522 | fw_event->event_data; | ||
| 3522 | if (local_event_data->ExpStatus == | 3523 | if (local_event_data->ExpStatus == |
| 3523 | MPI2_EVENT_SAS_TOPO_ES_ADDED || | 3524 | MPI2_EVENT_SAS_TOPO_ES_ADDED || |
| 3524 | local_event_data->ExpStatus == | 3525 | local_event_data->ExpStatus == |
| @@ -5502,7 +5503,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5502 | u64 sas_address; | 5503 | u64 sas_address; |
| 5503 | unsigned long flags; | 5504 | unsigned long flags; |
| 5504 | u8 link_rate, prev_link_rate; | 5505 | u8 link_rate, prev_link_rate; |
| 5505 | Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; | 5506 | Mpi2EventDataSasTopologyChangeList_t *event_data = |
| 5507 | (Mpi2EventDataSasTopologyChangeList_t *) | ||
| 5508 | fw_event->event_data; | ||
| 5506 | 5509 | ||
| 5507 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 5510 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 5508 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5511 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -5697,7 +5700,8 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5697 | u64 sas_address; | 5700 | u64 sas_address; |
| 5698 | unsigned long flags; | 5701 | unsigned long flags; |
| 5699 | Mpi2EventDataSasDeviceStatusChange_t *event_data = | 5702 | Mpi2EventDataSasDeviceStatusChange_t *event_data = |
| 5700 | fw_event->event_data; | 5703 | (Mpi2EventDataSasDeviceStatusChange_t *) |
| 5704 | fw_event->event_data; | ||
| 5701 | 5705 | ||
| 5702 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 5706 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 5703 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5707 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -5792,6 +5796,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5792 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 5796 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 5793 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5797 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| 5794 | _scsih_sas_enclosure_dev_status_change_event_debug(ioc, | 5798 | _scsih_sas_enclosure_dev_status_change_event_debug(ioc, |
| 5799 | (Mpi2EventDataSasEnclDevStatusChange_t *) | ||
| 5795 | fw_event->event_data); | 5800 | fw_event->event_data); |
| 5796 | #endif | 5801 | #endif |
| 5797 | } | 5802 | } |
| @@ -5816,7 +5821,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 5816 | u32 termination_count; | 5821 | u32 termination_count; |
| 5817 | u32 query_count; | 5822 | u32 query_count; |
| 5818 | Mpi2SCSITaskManagementReply_t *mpi_reply; | 5823 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
| 5819 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; | 5824 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = |
| 5825 | (Mpi2EventDataSasBroadcastPrimitive_t *) | ||
| 5826 | fw_event->event_data; | ||
| 5820 | u16 ioc_status; | 5827 | u16 ioc_status; |
| 5821 | unsigned long flags; | 5828 | unsigned long flags; |
| 5822 | int r; | 5829 | int r; |
| @@ -5967,7 +5974,9 @@ static void | |||
| 5967 | _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc, | 5974 | _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc, |
| 5968 | struct fw_event_work *fw_event) | 5975 | struct fw_event_work *fw_event) |
| 5969 | { | 5976 | { |
| 5970 | Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; | 5977 | Mpi2EventDataSasDiscovery_t *event_data = |
| 5978 | (Mpi2EventDataSasDiscovery_t *) | ||
| 5979 | fw_event->event_data; | ||
| 5971 | 5980 | ||
| 5972 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 5981 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 5973 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { | 5982 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { |
| @@ -6355,7 +6364,9 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6355 | Mpi2EventIrConfigElement_t *element; | 6364 | Mpi2EventIrConfigElement_t *element; |
| 6356 | int i; | 6365 | int i; |
| 6357 | u8 foreign_config; | 6366 | u8 foreign_config; |
| 6358 | Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; | 6367 | Mpi2EventDataIrConfigChangeList_t *event_data = |
| 6368 | (Mpi2EventDataIrConfigChangeList_t *) | ||
| 6369 | fw_event->event_data; | ||
| 6359 | 6370 | ||
| 6360 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 6371 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
| 6361 | if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 6372 | if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -6423,7 +6434,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6423 | u16 handle; | 6434 | u16 handle; |
| 6424 | u32 state; | 6435 | u32 state; |
| 6425 | int rc; | 6436 | int rc; |
| 6426 | Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; | 6437 | Mpi2EventDataIrVolume_t *event_data = |
| 6438 | (Mpi2EventDataIrVolume_t *) | ||
| 6439 | fw_event->event_data; | ||
| 6427 | 6440 | ||
| 6428 | if (ioc->shost_recovery) | 6441 | if (ioc->shost_recovery) |
| 6429 | return; | 6442 | return; |
| @@ -6507,7 +6520,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, | |||
| 6507 | Mpi2ConfigReply_t mpi_reply; | 6520 | Mpi2ConfigReply_t mpi_reply; |
| 6508 | Mpi2SasDevicePage0_t sas_device_pg0; | 6521 | Mpi2SasDevicePage0_t sas_device_pg0; |
| 6509 | u32 ioc_status; | 6522 | u32 ioc_status; |
| 6510 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; | 6523 | Mpi2EventDataIrPhysicalDisk_t *event_data = |
| 6524 | (Mpi2EventDataIrPhysicalDisk_t *) | ||
| 6525 | fw_event->event_data; | ||
| 6511 | u64 sas_address; | 6526 | u64 sas_address; |
| 6512 | 6527 | ||
| 6513 | if (ioc->shost_recovery) | 6528 | if (ioc->shost_recovery) |
| @@ -6630,7 +6645,9 @@ static void | |||
| 6630 | _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, | 6645 | _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, |
| 6631 | struct fw_event_work *fw_event) | 6646 | struct fw_event_work *fw_event) |
| 6632 | { | 6647 | { |
| 6633 | Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; | 6648 | Mpi2EventDataIrOperationStatus_t *event_data = |
| 6649 | (Mpi2EventDataIrOperationStatus_t *) | ||
| 6650 | fw_event->event_data; | ||
| 6634 | static struct _raid_device *raid_device; | 6651 | static struct _raid_device *raid_device; |
| 6635 | unsigned long flags; | 6652 | unsigned long flags; |
| 6636 | u16 handle; | 6653 | u16 handle; |
| @@ -7401,7 +7418,7 @@ _firmware_event_work(struct work_struct *work) | |||
| 7401 | struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; | 7418 | struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; |
| 7402 | 7419 | ||
| 7403 | /* the queue is being flushed so ignore this event */ | 7420 | /* the queue is being flushed so ignore this event */ |
| 7404 | if (ioc->remove_host || fw_event->cancel_pending_work || | 7421 | if (ioc->remove_host || |
| 7405 | ioc->pci_error_recovery) { | 7422 | ioc->pci_error_recovery) { |
| 7406 | _scsih_fw_event_free(ioc, fw_event); | 7423 | _scsih_fw_event_free(ioc, fw_event); |
| 7407 | return; | 7424 | return; |
| @@ -7590,23 +7607,15 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
| 7590 | return; | 7607 | return; |
| 7591 | } | 7608 | } |
| 7592 | 7609 | ||
| 7593 | fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); | ||
| 7594 | if (!fw_event) { | ||
| 7595 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 7596 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 7597 | return; | ||
| 7598 | } | ||
| 7599 | sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; | 7610 | sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; |
| 7600 | fw_event->event_data = kzalloc(sz, GFP_ATOMIC); | 7611 | fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC); |
| 7601 | if (!fw_event->event_data) { | 7612 | if (!fw_event) { |
| 7602 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | 7613 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
| 7603 | ioc->name, __FILE__, __LINE__, __func__); | 7614 | ioc->name, __FILE__, __LINE__, __func__); |
| 7604 | kfree(fw_event); | ||
| 7605 | return; | 7615 | return; |
| 7606 | } | 7616 | } |
| 7607 | 7617 | ||
| 7608 | memcpy(fw_event->event_data, mpi_reply->EventData, | 7618 | memcpy(fw_event->event_data, mpi_reply->EventData, sz); |
| 7609 | sz); | ||
| 7610 | fw_event->ioc = ioc; | 7619 | fw_event->ioc = ioc; |
| 7611 | fw_event->VF_ID = mpi_reply->VF_ID; | 7620 | fw_event->VF_ID = mpi_reply->VF_ID; |
| 7612 | fw_event->VP_ID = mpi_reply->VP_ID; | 7621 | fw_event->VP_ID = mpi_reply->VP_ID; |
| @@ -7857,9 +7866,9 @@ _scsih_remove(struct pci_dev *pdev) | |||
| 7857 | } | 7866 | } |
| 7858 | 7867 | ||
| 7859 | sas_remove_host(shost); | 7868 | sas_remove_host(shost); |
| 7869 | scsi_remove_host(shost); | ||
| 7860 | mpt2sas_base_detach(ioc); | 7870 | mpt2sas_base_detach(ioc); |
| 7861 | list_del(&ioc->list); | 7871 | list_del(&ioc->list); |
| 7862 | scsi_remove_host(shost); | ||
| 7863 | scsi_host_put(shost); | 7872 | scsi_host_put(shost); |
| 7864 | } | 7873 | } |
| 7865 | 7874 | ||
| @@ -8200,13 +8209,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 8200 | } | 8209 | } |
| 8201 | } | 8210 | } |
| 8202 | 8211 | ||
| 8203 | if ((scsi_add_host(shost, &pdev->dev))) { | ||
| 8204 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 8205 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 8206 | list_del(&ioc->list); | ||
| 8207 | goto out_add_shost_fail; | ||
| 8208 | } | ||
| 8209 | |||
| 8210 | /* register EEDP capabilities with SCSI layer */ | 8212 | /* register EEDP capabilities with SCSI layer */ |
| 8211 | if (prot_mask) | 8213 | if (prot_mask) |
| 8212 | scsi_host_set_prot(shost, prot_mask); | 8214 | scsi_host_set_prot(shost, prot_mask); |
| @@ -8248,16 +8250,23 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 8248 | } | 8250 | } |
| 8249 | } else | 8251 | } else |
| 8250 | ioc->hide_drives = 0; | 8252 | ioc->hide_drives = 0; |
| 8253 | |||
| 8254 | if ((scsi_add_host(shost, &pdev->dev))) { | ||
| 8255 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
| 8256 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 8257 | goto out_add_shost_fail; | ||
| 8258 | } | ||
| 8259 | |||
| 8251 | scsi_scan_host(shost); | 8260 | scsi_scan_host(shost); |
| 8252 | 8261 | ||
| 8253 | return 0; | 8262 | return 0; |
| 8254 | 8263 | ||
| 8264 | out_add_shost_fail: | ||
| 8265 | mpt2sas_base_detach(ioc); | ||
| 8255 | out_attach_fail: | 8266 | out_attach_fail: |
| 8256 | destroy_workqueue(ioc->firmware_event_thread); | 8267 | destroy_workqueue(ioc->firmware_event_thread); |
| 8257 | out_thread_fail: | 8268 | out_thread_fail: |
| 8258 | list_del(&ioc->list); | 8269 | list_del(&ioc->list); |
| 8259 | scsi_remove_host(shost); | ||
| 8260 | out_add_shost_fail: | ||
| 8261 | scsi_host_put(shost); | 8270 | scsi_host_put(shost); |
| 8262 | return -ENODEV; | 8271 | return -ENODEV; |
| 8263 | } | 8272 | } |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 0cf4f7000f94..93ce2b2baa41 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
| @@ -266,7 +266,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) | |||
| 266 | ioc->fault_reset_work_q = NULL; | 266 | ioc->fault_reset_work_q = NULL; |
| 267 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 267 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
| 268 | if (wq) { | 268 | if (wq) { |
| 269 | if (!cancel_delayed_work(&ioc->fault_reset_work)) | 269 | if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) |
| 270 | flush_workqueue(wq); | 270 | flush_workqueue(wq); |
| 271 | destroy_workqueue(wq); | 271 | destroy_workqueue(wq); |
| 272 | } | 272 | } |
| @@ -1624,66 +1624,35 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
| 1624 | static void | 1624 | static void |
| 1625 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | 1625 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) |
| 1626 | { | 1626 | { |
| 1627 | struct adapter_reply_queue *reply_q; | 1627 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
| 1628 | int cpu_id; | ||
| 1629 | int cpu_grouping, loop, grouping, grouping_mod; | ||
| 1630 | int reply_queue; | ||
| 1631 | 1628 | ||
| 1632 | if (!_base_is_controller_msix_enabled(ioc)) | 1629 | if (!_base_is_controller_msix_enabled(ioc)) |
| 1633 | return; | 1630 | return; |
| 1634 | 1631 | ||
| 1635 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); | 1632 | memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); |
| 1636 | 1633 | ||
| 1637 | /* NUMA Hardware bug workaround - drop to less reply queues */ | 1634 | nr_cpus = num_online_cpus(); |
| 1638 | if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { | 1635 | nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, |
| 1639 | ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; | 1636 | ioc->facts.MaxMSIxVectors); |
| 1640 | reply_queue = 0; | 1637 | if (!nr_msix) |
| 1641 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { | 1638 | return; |
| 1642 | reply_q->msix_index = reply_queue; | ||
| 1643 | if (++reply_queue == ioc->reply_queue_count) | ||
| 1644 | reply_queue = 0; | ||
| 1645 | } | ||
| 1646 | } | ||
| 1647 | 1639 | ||
| 1648 | /* when there are more cpus than available msix vectors, | 1640 | cpu = cpumask_first(cpu_online_mask); |
| 1649 | * then group cpus togeather on same irq | 1641 | |
| 1650 | */ | 1642 | do { |
| 1651 | if (ioc->cpu_count > ioc->msix_vector_count) { | 1643 | unsigned int i, group = nr_cpus / nr_msix; |
| 1652 | grouping = ioc->cpu_count / ioc->msix_vector_count; | 1644 | |
| 1653 | grouping_mod = ioc->cpu_count % ioc->msix_vector_count; | 1645 | if (index < nr_cpus % nr_msix) |
| 1654 | if (grouping < 2 || (grouping == 2 && !grouping_mod)) | 1646 | group++; |
| 1655 | cpu_grouping = 2; | 1647 | |
| 1656 | else if (grouping < 4 || (grouping == 4 && !grouping_mod)) | 1648 | for (i = 0 ; i < group ; i++) { |
| 1657 | cpu_grouping = 4; | 1649 | ioc->cpu_msix_table[cpu] = index; |
| 1658 | else if (grouping < 8 || (grouping == 8 && !grouping_mod)) | 1650 | cpu = cpumask_next(cpu, cpu_online_mask); |
| 1659 | cpu_grouping = 8; | ||
| 1660 | else | ||
| 1661 | cpu_grouping = 16; | ||
| 1662 | } else | ||
| 1663 | cpu_grouping = 0; | ||
| 1664 | |||
| 1665 | loop = 0; | ||
| 1666 | reply_q = list_entry(ioc->reply_queue_list.next, | ||
| 1667 | struct adapter_reply_queue, list); | ||
| 1668 | for_each_online_cpu(cpu_id) { | ||
| 1669 | if (!cpu_grouping) { | ||
| 1670 | ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; | ||
| 1671 | reply_q = list_entry(reply_q->list.next, | ||
| 1672 | struct adapter_reply_queue, list); | ||
| 1673 | } else { | ||
| 1674 | if (loop < cpu_grouping) { | ||
| 1675 | ioc->cpu_msix_table[cpu_id] = | ||
| 1676 | reply_q->msix_index; | ||
| 1677 | loop++; | ||
| 1678 | } else { | ||
| 1679 | reply_q = list_entry(reply_q->list.next, | ||
| 1680 | struct adapter_reply_queue, list); | ||
| 1681 | ioc->cpu_msix_table[cpu_id] = | ||
| 1682 | reply_q->msix_index; | ||
| 1683 | loop = 1; | ||
| 1684 | } | ||
| 1685 | } | 1651 | } |
| 1686 | } | 1652 | |
| 1653 | index++; | ||
| 1654 | |||
| 1655 | } while (cpu < nr_cpus); | ||
| 1687 | } | 1656 | } |
| 1688 | 1657 | ||
| 1689 | /** | 1658 | /** |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 18e713db1d32..135f12c20ecf 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
| @@ -112,8 +112,8 @@ MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); | |||
| 112 | 112 | ||
| 113 | /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ | 113 | /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ |
| 114 | #define MPT3SAS_MAX_LUN (16895) | 114 | #define MPT3SAS_MAX_LUN (16895) |
| 115 | static int max_lun = MPT3SAS_MAX_LUN; | 115 | static u64 max_lun = MPT3SAS_MAX_LUN; |
| 116 | module_param(max_lun, int, 0); | 116 | module_param(max_lun, ullong, 0); |
| 117 | MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); | 117 | MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); |
| 118 | 118 | ||
| 119 | 119 | ||
| @@ -190,7 +190,7 @@ struct fw_event_work { | |||
| 190 | u8 VP_ID; | 190 | u8 VP_ID; |
| 191 | u8 ignore; | 191 | u8 ignore; |
| 192 | u16 event; | 192 | u16 event; |
| 193 | void *event_data; | 193 | char event_data[0] __aligned(4); |
| 194 | }; | 194 | }; |
| 195 | 195 | ||
| 196 | /* raid transport support */ | 196 | /* raid transport support */ |
| @@ -247,7 +247,7 @@ struct _scsi_io_transfer { | |||
| 247 | /* | 247 | /* |
| 248 | * The pci device ids are defined in mpi/mpi2_cnfg.h. | 248 | * The pci device ids are defined in mpi/mpi2_cnfg.h. |
| 249 | */ | 249 | */ |
| 250 | static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = { | 250 | static const struct pci_device_id scsih_pci_table[] = { |
| 251 | /* Fury ~ 3004 and 3008 */ | 251 | /* Fury ~ 3004 and 3008 */ |
| 252 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, | 252 | { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, |
| 253 | PCI_ANY_ID, PCI_ANY_ID }, | 253 | PCI_ANY_ID, PCI_ANY_ID }, |
| @@ -1163,7 +1163,8 @@ _scsih_target_alloc(struct scsi_target *starget) | |||
| 1163 | unsigned long flags; | 1163 | unsigned long flags; |
| 1164 | struct sas_rphy *rphy; | 1164 | struct sas_rphy *rphy; |
| 1165 | 1165 | ||
| 1166 | sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL); | 1166 | sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), |
| 1167 | GFP_KERNEL); | ||
| 1167 | if (!sas_target_priv_data) | 1168 | if (!sas_target_priv_data) |
| 1168 | return -ENOMEM; | 1169 | return -ENOMEM; |
| 1169 | 1170 | ||
| @@ -1277,7 +1278,8 @@ _scsih_slave_alloc(struct scsi_device *sdev) | |||
| 1277 | struct _sas_device *sas_device; | 1278 | struct _sas_device *sas_device; |
| 1278 | unsigned long flags; | 1279 | unsigned long flags; |
| 1279 | 1280 | ||
| 1280 | sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); | 1281 | sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), |
| 1282 | GFP_KERNEL); | ||
| 1281 | if (!sas_device_priv_data) | 1283 | if (!sas_device_priv_data) |
| 1282 | return -ENOMEM; | 1284 | return -ENOMEM; |
| 1283 | 1285 | ||
| @@ -2490,7 +2492,6 @@ _scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work | |||
| 2490 | 2492 | ||
| 2491 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 2493 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
| 2492 | list_del(&fw_event->list); | 2494 | list_del(&fw_event->list); |
| 2493 | kfree(fw_event->event_data); | ||
| 2494 | kfree(fw_event); | 2495 | kfree(fw_event); |
| 2495 | spin_unlock_irqrestore(&ioc->fw_event_lock, flags); | 2496 | spin_unlock_irqrestore(&ioc->fw_event_lock, flags); |
| 2496 | } | 2497 | } |
| @@ -2511,12 +2512,10 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 2511 | 2512 | ||
| 2512 | if (ioc->is_driver_loading) | 2513 | if (ioc->is_driver_loading) |
| 2513 | return; | 2514 | return; |
| 2514 | fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); | 2515 | fw_event = kzalloc(sizeof(*fw_event) + sizeof(*event_data), |
| 2516 | GFP_ATOMIC); | ||
| 2515 | if (!fw_event) | 2517 | if (!fw_event) |
| 2516 | return; | 2518 | return; |
| 2517 | fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC); | ||
| 2518 | if (!fw_event->event_data) | ||
| 2519 | return; | ||
| 2520 | fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; | 2519 | fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; |
| 2521 | fw_event->ioc = ioc; | 2520 | fw_event->ioc = ioc; |
| 2522 | memcpy(fw_event->event_data, event_data, sizeof(*event_data)); | 2521 | memcpy(fw_event->event_data, event_data, sizeof(*event_data)); |
| @@ -2582,11 +2581,10 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) | |||
| 2582 | return; | 2581 | return; |
| 2583 | 2582 | ||
| 2584 | list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { | 2583 | list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { |
| 2585 | if (cancel_delayed_work(&fw_event->delayed_work)) { | 2584 | if (cancel_delayed_work_sync(&fw_event->delayed_work)) { |
| 2586 | _scsih_fw_event_free(ioc, fw_event); | 2585 | _scsih_fw_event_free(ioc, fw_event); |
| 2587 | continue; | 2586 | continue; |
| 2588 | } | 2587 | } |
| 2589 | fw_event->cancel_pending_work = 1; | ||
| 2590 | } | 2588 | } |
| 2591 | } | 2589 | } |
| 2592 | 2590 | ||
| @@ -3211,7 +3209,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, | |||
| 3211 | if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || | 3209 | if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || |
| 3212 | fw_event->ignore) | 3210 | fw_event->ignore) |
| 3213 | continue; | 3211 | continue; |
| 3214 | local_event_data = fw_event->event_data; | 3212 | local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) |
| 3213 | fw_event->event_data; | ||
| 3215 | if (local_event_data->ExpStatus == | 3214 | if (local_event_data->ExpStatus == |
| 3216 | MPI2_EVENT_SAS_TOPO_ES_ADDED || | 3215 | MPI2_EVENT_SAS_TOPO_ES_ADDED || |
| 3217 | local_event_data->ExpStatus == | 3216 | local_event_data->ExpStatus == |
| @@ -5043,7 +5042,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5043 | u64 sas_address; | 5042 | u64 sas_address; |
| 5044 | unsigned long flags; | 5043 | unsigned long flags; |
| 5045 | u8 link_rate, prev_link_rate; | 5044 | u8 link_rate, prev_link_rate; |
| 5046 | Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; | 5045 | Mpi2EventDataSasTopologyChangeList_t *event_data = |
| 5046 | (Mpi2EventDataSasTopologyChangeList_t *) | ||
| 5047 | fw_event->event_data; | ||
| 5047 | 5048 | ||
| 5048 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | 5049 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING |
| 5049 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5050 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -5241,7 +5242,8 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5241 | u64 sas_address; | 5242 | u64 sas_address; |
| 5242 | unsigned long flags; | 5243 | unsigned long flags; |
| 5243 | Mpi2EventDataSasDeviceStatusChange_t *event_data = | 5244 | Mpi2EventDataSasDeviceStatusChange_t *event_data = |
| 5244 | fw_event->event_data; | 5245 | (Mpi2EventDataSasDeviceStatusChange_t *) |
| 5246 | fw_event->event_data; | ||
| 5245 | 5247 | ||
| 5246 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | 5248 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING |
| 5247 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5249 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -5337,6 +5339,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5337 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | 5339 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING |
| 5338 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 5340 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| 5339 | _scsih_sas_enclosure_dev_status_change_event_debug(ioc, | 5341 | _scsih_sas_enclosure_dev_status_change_event_debug(ioc, |
| 5342 | (Mpi2EventDataSasEnclDevStatusChange_t *) | ||
| 5340 | fw_event->event_data); | 5343 | fw_event->event_data); |
| 5341 | #endif | 5344 | #endif |
| 5342 | } | 5345 | } |
| @@ -5361,7 +5364,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5361 | u32 termination_count; | 5364 | u32 termination_count; |
| 5362 | u32 query_count; | 5365 | u32 query_count; |
| 5363 | Mpi2SCSITaskManagementReply_t *mpi_reply; | 5366 | Mpi2SCSITaskManagementReply_t *mpi_reply; |
| 5364 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; | 5367 | Mpi2EventDataSasBroadcastPrimitive_t *event_data = |
| 5368 | (Mpi2EventDataSasBroadcastPrimitive_t *) | ||
| 5369 | fw_event->event_data; | ||
| 5365 | u16 ioc_status; | 5370 | u16 ioc_status; |
| 5366 | unsigned long flags; | 5371 | unsigned long flags; |
| 5367 | int r; | 5372 | int r; |
| @@ -5513,7 +5518,8 @@ static void | |||
| 5513 | _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, | 5518 | _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, |
| 5514 | struct fw_event_work *fw_event) | 5519 | struct fw_event_work *fw_event) |
| 5515 | { | 5520 | { |
| 5516 | Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; | 5521 | Mpi2EventDataSasDiscovery_t *event_data = |
| 5522 | (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; | ||
| 5517 | 5523 | ||
| 5518 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | 5524 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING |
| 5519 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { | 5525 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { |
| @@ -5999,7 +6005,9 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 5999 | Mpi2EventIrConfigElement_t *element; | 6005 | Mpi2EventIrConfigElement_t *element; |
| 6000 | int i; | 6006 | int i; |
| 6001 | u8 foreign_config; | 6007 | u8 foreign_config; |
| 6002 | Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; | 6008 | Mpi2EventDataIrConfigChangeList_t *event_data = |
| 6009 | (Mpi2EventDataIrConfigChangeList_t *) | ||
| 6010 | fw_event->event_data; | ||
| 6003 | 6011 | ||
| 6004 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING | 6012 | #ifdef CONFIG_SCSI_MPT3SAS_LOGGING |
| 6005 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) | 6013 | if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
| @@ -6069,7 +6077,8 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 6069 | u16 handle; | 6077 | u16 handle; |
| 6070 | u32 state; | 6078 | u32 state; |
| 6071 | int rc; | 6079 | int rc; |
| 6072 | Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; | 6080 | Mpi2EventDataIrVolume_t *event_data = |
| 6081 | (Mpi2EventDataIrVolume_t *) fw_event->event_data; | ||
| 6073 | 6082 | ||
| 6074 | if (ioc->shost_recovery) | 6083 | if (ioc->shost_recovery) |
| 6075 | return; | 6084 | return; |
| @@ -6152,7 +6161,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, | |||
| 6152 | Mpi2ConfigReply_t mpi_reply; | 6161 | Mpi2ConfigReply_t mpi_reply; |
| 6153 | Mpi2SasDevicePage0_t sas_device_pg0; | 6162 | Mpi2SasDevicePage0_t sas_device_pg0; |
| 6154 | u32 ioc_status; | 6163 | u32 ioc_status; |
| 6155 | Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; | 6164 | Mpi2EventDataIrPhysicalDisk_t *event_data = |
| 6165 | (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; | ||
| 6156 | u64 sas_address; | 6166 | u64 sas_address; |
| 6157 | 6167 | ||
| 6158 | if (ioc->shost_recovery) | 6168 | if (ioc->shost_recovery) |
| @@ -6272,7 +6282,9 @@ static void | |||
| 6272 | _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, | 6282 | _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, |
| 6273 | struct fw_event_work *fw_event) | 6283 | struct fw_event_work *fw_event) |
| 6274 | { | 6284 | { |
| 6275 | Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; | 6285 | Mpi2EventDataIrOperationStatus_t *event_data = |
| 6286 | (Mpi2EventDataIrOperationStatus_t *) | ||
| 6287 | fw_event->event_data; | ||
| 6276 | static struct _raid_device *raid_device; | 6288 | static struct _raid_device *raid_device; |
| 6277 | unsigned long flags; | 6289 | unsigned long flags; |
| 6278 | u16 handle; | 6290 | u16 handle; |
| @@ -7026,7 +7038,7 @@ static void | |||
| 7026 | _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | 7038 | _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) |
| 7027 | { | 7039 | { |
| 7028 | /* the queue is being flushed so ignore this event */ | 7040 | /* the queue is being flushed so ignore this event */ |
| 7029 | if (ioc->remove_host || fw_event->cancel_pending_work || | 7041 | if (ioc->remove_host || |
| 7030 | ioc->pci_error_recovery) { | 7042 | ioc->pci_error_recovery) { |
| 7031 | _scsih_fw_event_free(ioc, fw_event); | 7043 | _scsih_fw_event_free(ioc, fw_event); |
| 7032 | return; | 7044 | return; |
| @@ -7034,7 +7046,9 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | |||
| 7034 | 7046 | ||
| 7035 | switch (fw_event->event) { | 7047 | switch (fw_event->event) { |
| 7036 | case MPT3SAS_PROCESS_TRIGGER_DIAG: | 7048 | case MPT3SAS_PROCESS_TRIGGER_DIAG: |
| 7037 | mpt3sas_process_trigger_data(ioc, fw_event->event_data); | 7049 | mpt3sas_process_trigger_data(ioc, |
| 7050 | (struct SL_WH_TRIGGERS_EVENT_DATA_T *) | ||
| 7051 | fw_event->event_data); | ||
| 7038 | break; | 7052 | break; |
| 7039 | case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: | 7053 | case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: |
| 7040 | while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) | 7054 | while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) |
| @@ -7192,18 +7206,11 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
| 7192 | return 1; | 7206 | return 1; |
| 7193 | } | 7207 | } |
| 7194 | 7208 | ||
| 7195 | fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); | ||
| 7196 | if (!fw_event) { | ||
| 7197 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | ||
| 7198 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 7199 | return 1; | ||
| 7200 | } | ||
| 7201 | sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; | 7209 | sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; |
| 7202 | fw_event->event_data = kzalloc(sz, GFP_ATOMIC); | 7210 | fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC); |
| 7203 | if (!fw_event->event_data) { | 7211 | if (!fw_event) { |
| 7204 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | 7212 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", |
| 7205 | ioc->name, __FILE__, __LINE__, __func__); | 7213 | ioc->name, __FILE__, __LINE__, __func__); |
| 7206 | kfree(fw_event); | ||
| 7207 | return 1; | 7214 | return 1; |
| 7208 | } | 7215 | } |
| 7209 | 7216 | ||
| @@ -7431,9 +7438,9 @@ static void _scsih_remove(struct pci_dev *pdev) | |||
| 7431 | } | 7438 | } |
| 7432 | 7439 | ||
| 7433 | sas_remove_host(shost); | 7440 | sas_remove_host(shost); |
| 7441 | scsi_remove_host(shost); | ||
| 7434 | mpt3sas_base_detach(ioc); | 7442 | mpt3sas_base_detach(ioc); |
| 7435 | list_del(&ioc->list); | 7443 | list_del(&ioc->list); |
| 7436 | scsi_remove_host(shost); | ||
| 7437 | scsi_host_put(shost); | 7444 | scsi_host_put(shost); |
| 7438 | } | 7445 | } |
| 7439 | 7446 | ||
| @@ -7801,13 +7808,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 7801 | } | 7808 | } |
| 7802 | } | 7809 | } |
| 7803 | 7810 | ||
| 7804 | if ((scsi_add_host(shost, &pdev->dev))) { | ||
| 7805 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | ||
| 7806 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 7807 | list_del(&ioc->list); | ||
| 7808 | goto out_add_shost_fail; | ||
| 7809 | } | ||
| 7810 | |||
| 7811 | /* register EEDP capabilities with SCSI layer */ | 7811 | /* register EEDP capabilities with SCSI layer */ |
| 7812 | if (prot_mask > 0) | 7812 | if (prot_mask > 0) |
| 7813 | scsi_host_set_prot(shost, prot_mask); | 7813 | scsi_host_set_prot(shost, prot_mask); |
| @@ -7835,15 +7835,21 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 7835 | ioc->name, __FILE__, __LINE__, __func__); | 7835 | ioc->name, __FILE__, __LINE__, __func__); |
| 7836 | goto out_attach_fail; | 7836 | goto out_attach_fail; |
| 7837 | } | 7837 | } |
| 7838 | if ((scsi_add_host(shost, &pdev->dev))) { | ||
| 7839 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | ||
| 7840 | ioc->name, __FILE__, __LINE__, __func__); | ||
| 7841 | list_del(&ioc->list); | ||
| 7842 | goto out_add_shost_fail; | ||
| 7843 | } | ||
| 7844 | |||
| 7838 | scsi_scan_host(shost); | 7845 | scsi_scan_host(shost); |
| 7839 | return 0; | 7846 | return 0; |
| 7840 | 7847 | out_add_shost_fail: | |
| 7848 | mpt3sas_base_detach(ioc); | ||
| 7841 | out_attach_fail: | 7849 | out_attach_fail: |
| 7842 | destroy_workqueue(ioc->firmware_event_thread); | 7850 | destroy_workqueue(ioc->firmware_event_thread); |
| 7843 | out_thread_fail: | 7851 | out_thread_fail: |
| 7844 | list_del(&ioc->list); | 7852 | list_del(&ioc->list); |
| 7845 | scsi_remove_host(shost); | ||
| 7846 | out_add_shost_fail: | ||
| 7847 | scsi_host_put(shost); | 7853 | scsi_host_put(shost); |
| 7848 | return -ENODEV; | 7854 | return -ENODEV; |
| 7849 | } | 7855 | } |
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6c1f223a8e1d..ac52f7c99513 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c | |||
| @@ -1344,19 +1344,23 @@ void mvs_dev_gone_notify(struct domain_device *dev) | |||
| 1344 | { | 1344 | { |
| 1345 | unsigned long flags = 0; | 1345 | unsigned long flags = 0; |
| 1346 | struct mvs_device *mvi_dev = dev->lldd_dev; | 1346 | struct mvs_device *mvi_dev = dev->lldd_dev; |
| 1347 | struct mvs_info *mvi = mvi_dev->mvi_info; | 1347 | struct mvs_info *mvi; |
| 1348 | |||
| 1349 | spin_lock_irqsave(&mvi->lock, flags); | ||
| 1350 | 1348 | ||
| 1351 | if (mvi_dev) { | 1349 | if (!mvi_dev) { |
| 1352 | mv_dprintk("found dev[%d:%x] is gone.\n", | ||
| 1353 | mvi_dev->device_id, mvi_dev->dev_type); | ||
| 1354 | mvs_release_task(mvi, dev); | ||
| 1355 | mvs_free_reg_set(mvi, mvi_dev); | ||
| 1356 | mvs_free_dev(mvi_dev); | ||
| 1357 | } else { | ||
| 1358 | mv_dprintk("found dev has gone.\n"); | 1350 | mv_dprintk("found dev has gone.\n"); |
| 1351 | return; | ||
| 1359 | } | 1352 | } |
| 1353 | |||
| 1354 | mvi = mvi_dev->mvi_info; | ||
| 1355 | |||
| 1356 | spin_lock_irqsave(&mvi->lock, flags); | ||
| 1357 | |||
| 1358 | mv_dprintk("found dev[%d:%x] is gone.\n", | ||
| 1359 | mvi_dev->device_id, mvi_dev->dev_type); | ||
| 1360 | mvs_release_task(mvi, dev); | ||
| 1361 | mvs_free_reg_set(mvi, mvi_dev); | ||
| 1362 | mvs_free_dev(mvi_dev); | ||
| 1363 | |||
| 1360 | dev->lldd_dev = NULL; | 1364 | dev->lldd_dev = NULL; |
| 1361 | mvi_dev->sas_device = NULL; | 1365 | mvi_dev->sas_device = NULL; |
| 1362 | 1366 | ||
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index edbee8dc62c9..3e6b866759fe 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c | |||
| @@ -48,7 +48,7 @@ MODULE_LICENSE("GPL"); | |||
| 48 | MODULE_AUTHOR("jyli@marvell.com"); | 48 | MODULE_AUTHOR("jyli@marvell.com"); |
| 49 | MODULE_DESCRIPTION("Marvell UMI Driver"); | 49 | MODULE_DESCRIPTION("Marvell UMI Driver"); |
| 50 | 50 | ||
| 51 | static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { | 51 | static const struct pci_device_id mvumi_pci_table[] = { |
| 52 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, | 52 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, |
| 53 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, | 53 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, |
| 54 | { 0 } | 54 | { 0 } |
| @@ -142,8 +142,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | |||
| 142 | 142 | ||
| 143 | case RESOURCE_UNCACHED_MEMORY: | 143 | case RESOURCE_UNCACHED_MEMORY: |
| 144 | size = round_up(size, 8); | 144 | size = round_up(size, 8); |
| 145 | res->virt_addr = pci_alloc_consistent(mhba->pdev, size, | 145 | res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, |
| 146 | &res->bus_addr); | 146 | &res->bus_addr); |
| 147 | if (!res->virt_addr) { | 147 | if (!res->virt_addr) { |
| 148 | dev_err(&mhba->pdev->dev, | 148 | dev_err(&mhba->pdev->dev, |
| 149 | "unable to allocate consistent mem," | 149 | "unable to allocate consistent mem," |
| @@ -151,7 +151,6 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, | |||
| 151 | kfree(res); | 151 | kfree(res); |
| 152 | return NULL; | 152 | return NULL; |
| 153 | } | 153 | } |
| 154 | memset(res->virt_addr, 0, size); | ||
| 155 | break; | 154 | break; |
| 156 | 155 | ||
| 157 | default: | 156 | default: |
| @@ -258,12 +257,10 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, | |||
| 258 | if (size == 0) | 257 | if (size == 0) |
| 259 | return 0; | 258 | return 0; |
| 260 | 259 | ||
| 261 | virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr); | 260 | virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); |
| 262 | if (!virt_addr) | 261 | if (!virt_addr) |
| 263 | return -1; | 262 | return -1; |
| 264 | 263 | ||
| 265 | memset(virt_addr, 0, size); | ||
| 266 | |||
| 267 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; | 264 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
| 268 | cmd->frame->sg_counts = 1; | 265 | cmd->frame->sg_counts = 1; |
| 269 | cmd->data_buf = virt_addr; | 266 | cmd->data_buf = virt_addr; |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 7d014b11df62..a7305ffc359d 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
| @@ -6633,7 +6633,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp) | |||
| 6633 | ** patch requested size into sense command | 6633 | ** patch requested size into sense command |
| 6634 | */ | 6634 | */ |
| 6635 | cp->sensecmd[0] = 0x03; | 6635 | cp->sensecmd[0] = 0x03; |
| 6636 | cp->sensecmd[1] = cmd->device->lun << 5; | 6636 | cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5; |
| 6637 | cp->sensecmd[4] = sizeof(cp->sense_buf); | 6637 | cp->sensecmd[4] = sizeof(cp->sense_buf); |
| 6638 | 6638 | ||
| 6639 | /* | 6639 | /* |
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h index 0e008dacf679..02901c54b08b 100644 --- a/drivers/scsi/ncr53c8xx.h +++ b/drivers/scsi/ncr53c8xx.h | |||
| @@ -264,11 +264,7 @@ | |||
| 264 | #define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER) | 264 | #define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER) |
| 265 | #define SCSI_NCR_TIMER_INTERVAL (HZ) | 265 | #define SCSI_NCR_TIMER_INTERVAL (HZ) |
| 266 | 266 | ||
| 267 | #if 1 /* defined CONFIG_SCSI_MULTI_LUN */ | ||
| 268 | #define SCSI_NCR_MAX_LUN (16) | 267 | #define SCSI_NCR_MAX_LUN (16) |
| 269 | #else | ||
| 270 | #define SCSI_NCR_MAX_LUN (1) | ||
| 271 | #endif | ||
| 272 | 268 | ||
| 273 | /* | 269 | /* |
| 274 | * IO functions definition for big/little endian CPU support. | 270 | * IO functions definition for big/little endian CPU support. |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 0665f9cfdb02..50b086aef178 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
| @@ -915,7 +915,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s | |||
| 915 | int ret; | 915 | int ret; |
| 916 | 916 | ||
| 917 | nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, | 917 | nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, |
| 918 | "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " | 918 | "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x " |
| 919 | "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", | 919 | "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", |
| 920 | SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, | 920 | SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, |
| 921 | scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); | 921 | scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); |
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 0d78a4d5576c..80bacb5dc1d4 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c | |||
| @@ -607,8 +607,6 @@ static int pas16_release(struct Scsi_Host *shost) | |||
| 607 | if (shost->irq) | 607 | if (shost->irq) |
| 608 | free_irq(shost->irq, shost); | 608 | free_irq(shost->irq, shost); |
| 609 | NCR5380_exit(shost); | 609 | NCR5380_exit(shost); |
| 610 | if (shost->dma_channel != 0xff) | ||
| 611 | free_dma(shost->dma_channel); | ||
| 612 | if (shost->io_port && shost->n_io_port) | 610 | if (shost->io_port && shost->n_io_port) |
| 613 | release_region(shost->io_port, shost->n_io_port); | 611 | release_region(shost->io_port, shost->n_io_port); |
| 614 | scsi_unregister(shost); | 612 | scsi_unregister(shost); |
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 987fbb1b244e..340ceff03823 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
| @@ -195,7 +195,7 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, | |||
| 195 | nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; | 195 | nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; |
| 196 | 196 | ||
| 197 | nsp_dbg(NSP_DEBUG_QUEUECOMMAND, | 197 | nsp_dbg(NSP_DEBUG_QUEUECOMMAND, |
| 198 | "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d", | 198 | "SCpnt=0x%p target=%d lun=%llu sglist=0x%p bufflen=%d sg_count=%d", |
| 199 | SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), | 199 | SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), |
| 200 | scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); | 200 | scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); |
| 201 | //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); | 201 | //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); |
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index f5b52731abd9..155f9573021f 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c | |||
| @@ -558,7 +558,7 @@ SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | |||
| 558 | 558 | ||
| 559 | DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", | 559 | DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", |
| 560 | SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, | 560 | SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, |
| 561 | SCpnt->device->lun, scsi_bufflen(SCpnt))); | 561 | (u8)SCpnt->device->lun, scsi_bufflen(SCpnt))); |
| 562 | 562 | ||
| 563 | VDEB(for (i = 0; i < SCpnt->cmd_len; i++) | 563 | VDEB(for (i = 0; i < SCpnt->cmd_len; i++) |
| 564 | printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); | 564 | printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); |
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index a368d77b8d41..7abbf284da1a 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c | |||
| @@ -397,7 +397,10 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, | |||
| 397 | payload.func_specific = kzalloc(4096, GFP_KERNEL); | 397 | payload.func_specific = kzalloc(4096, GFP_KERNEL); |
| 398 | if (!payload.func_specific) | 398 | if (!payload.func_specific) |
| 399 | return -ENOMEM; | 399 | return -ENOMEM; |
| 400 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); | 400 | if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) { |
| 401 | kfree(payload.func_specific); | ||
| 402 | return -ENOMEM; | ||
| 403 | } | ||
| 401 | wait_for_completion(&completion); | 404 | wait_for_completion(&completion); |
| 402 | virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; | 405 | virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; |
| 403 | for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; | 406 | for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; |
| @@ -523,18 +526,19 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) | |||
| 523 | { | 526 | { |
| 524 | struct pm8001_ioctl_payload *payload; | 527 | struct pm8001_ioctl_payload *payload; |
| 525 | DECLARE_COMPLETION_ONSTACK(completion); | 528 | DECLARE_COMPLETION_ONSTACK(completion); |
| 526 | u8 *ioctlbuffer = NULL; | 529 | u8 *ioctlbuffer; |
| 527 | u32 length = 0; | 530 | u32 ret; |
| 528 | u32 ret = 0; | 531 | u32 length = 1024 * 5 + sizeof(*payload) - 1; |
| 532 | |||
| 533 | if (pm8001_ha->fw_image->size > 4096) { | ||
| 534 | pm8001_ha->fw_status = FAIL_FILE_SIZE; | ||
| 535 | return -EFAULT; | ||
| 536 | } | ||
| 529 | 537 | ||
| 530 | length = 1024 * 5 + sizeof(*payload) - 1; | ||
| 531 | ioctlbuffer = kzalloc(length, GFP_KERNEL); | 538 | ioctlbuffer = kzalloc(length, GFP_KERNEL); |
| 532 | if (!ioctlbuffer) | 539 | if (!ioctlbuffer) { |
| 540 | pm8001_ha->fw_status = FAIL_OUT_MEMORY; | ||
| 533 | return -ENOMEM; | 541 | return -ENOMEM; |
| 534 | if ((pm8001_ha->fw_image->size <= 0) || | ||
| 535 | (pm8001_ha->fw_image->size > 4096)) { | ||
| 536 | ret = FAIL_FILE_SIZE; | ||
| 537 | goto out; | ||
| 538 | } | 542 | } |
| 539 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; | 543 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; |
| 540 | memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, | 544 | memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, |
| @@ -544,6 +548,10 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) | |||
| 544 | payload->minor_function = 0x1; | 548 | payload->minor_function = 0x1; |
| 545 | pm8001_ha->nvmd_completion = &completion; | 549 | pm8001_ha->nvmd_completion = &completion; |
| 546 | ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); | 550 | ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); |
| 551 | if (ret) { | ||
| 552 | pm8001_ha->fw_status = FAIL_OUT_MEMORY; | ||
| 553 | goto out; | ||
| 554 | } | ||
| 547 | wait_for_completion(&completion); | 555 | wait_for_completion(&completion); |
| 548 | out: | 556 | out: |
| 549 | kfree(ioctlbuffer); | 557 | kfree(ioctlbuffer); |
| @@ -554,35 +562,31 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) | |||
| 554 | { | 562 | { |
| 555 | struct pm8001_ioctl_payload *payload; | 563 | struct pm8001_ioctl_payload *payload; |
| 556 | DECLARE_COMPLETION_ONSTACK(completion); | 564 | DECLARE_COMPLETION_ONSTACK(completion); |
| 557 | u8 *ioctlbuffer = NULL; | 565 | u8 *ioctlbuffer; |
| 558 | u32 length = 0; | ||
| 559 | struct fw_control_info *fwControl; | 566 | struct fw_control_info *fwControl; |
| 560 | u32 loopNumber, loopcount = 0; | ||
| 561 | u32 sizeRead = 0; | ||
| 562 | u32 partitionSize, partitionSizeTmp; | 567 | u32 partitionSize, partitionSizeTmp; |
| 563 | u32 ret = 0; | 568 | u32 loopNumber, loopcount; |
| 564 | u32 partitionNumber = 0; | ||
| 565 | struct pm8001_fw_image_header *image_hdr; | 569 | struct pm8001_fw_image_header *image_hdr; |
| 570 | u32 sizeRead = 0; | ||
| 571 | u32 ret = 0; | ||
| 572 | u32 length = 1024 * 16 + sizeof(*payload) - 1; | ||
| 566 | 573 | ||
| 567 | length = 1024 * 16 + sizeof(*payload) - 1; | 574 | if (pm8001_ha->fw_image->size < 28) { |
| 575 | pm8001_ha->fw_status = FAIL_FILE_SIZE; | ||
| 576 | return -EFAULT; | ||
| 577 | } | ||
| 568 | ioctlbuffer = kzalloc(length, GFP_KERNEL); | 578 | ioctlbuffer = kzalloc(length, GFP_KERNEL); |
| 569 | image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; | 579 | if (!ioctlbuffer) { |
| 570 | if (!ioctlbuffer) | 580 | pm8001_ha->fw_status = FAIL_OUT_MEMORY; |
| 571 | return -ENOMEM; | 581 | return -ENOMEM; |
| 572 | if (pm8001_ha->fw_image->size < 28) { | ||
| 573 | ret = FAIL_FILE_SIZE; | ||
| 574 | goto out; | ||
| 575 | } | 582 | } |
| 576 | 583 | image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; | |
| 577 | while (sizeRead < pm8001_ha->fw_image->size) { | 584 | while (sizeRead < pm8001_ha->fw_image->size) { |
| 578 | partitionSizeTmp = | 585 | partitionSizeTmp = |
| 579 | *(u32 *)((u8 *)&image_hdr->image_length + sizeRead); | 586 | *(u32 *)((u8 *)&image_hdr->image_length + sizeRead); |
| 580 | partitionSize = be32_to_cpu(partitionSizeTmp); | 587 | partitionSize = be32_to_cpu(partitionSizeTmp); |
| 581 | loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE; | 588 | loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN, |
| 582 | if (loopcount % IOCTL_BUF_SIZE) | 589 | IOCTL_BUF_SIZE); |
| 583 | loopcount++; | ||
| 584 | if (loopcount == 0) | ||
| 585 | loopcount++; | ||
| 586 | for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { | 590 | for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { |
| 587 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; | 591 | payload = (struct pm8001_ioctl_payload *)ioctlbuffer; |
| 588 | payload->length = 1024*16; | 592 | payload->length = 1024*16; |
| @@ -614,18 +618,18 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) | |||
| 614 | 618 | ||
| 615 | pm8001_ha->nvmd_completion = &completion; | 619 | pm8001_ha->nvmd_completion = &completion; |
| 616 | ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); | 620 | ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); |
| 621 | if (ret) { | ||
| 622 | pm8001_ha->fw_status = FAIL_OUT_MEMORY; | ||
| 623 | goto out; | ||
| 624 | } | ||
| 617 | wait_for_completion(&completion); | 625 | wait_for_completion(&completion); |
| 618 | if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) { | 626 | if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) { |
| 619 | ret = fwControl->retcode; | 627 | pm8001_ha->fw_status = fwControl->retcode; |
| 620 | kfree(ioctlbuffer); | 628 | ret = -EFAULT; |
| 621 | ioctlbuffer = NULL; | 629 | goto out; |
| 622 | break; | 630 | } |
| 623 | } | 631 | } |
| 624 | } | 632 | } |
| 625 | if (ret) | ||
| 626 | break; | ||
| 627 | partitionNumber++; | ||
| 628 | } | ||
| 629 | out: | 633 | out: |
| 630 | kfree(ioctlbuffer); | 634 | kfree(ioctlbuffer); |
| 631 | return ret; | 635 | return ret; |
| @@ -640,22 +644,29 @@ static ssize_t pm8001_store_update_fw(struct device *cdev, | |||
| 640 | char *cmd_ptr, *filename_ptr; | 644 | char *cmd_ptr, *filename_ptr; |
| 641 | int res, i; | 645 | int res, i; |
| 642 | int flash_command = FLASH_CMD_NONE; | 646 | int flash_command = FLASH_CMD_NONE; |
| 643 | int err = 0; | 647 | int ret; |
| 648 | |||
| 644 | if (!capable(CAP_SYS_ADMIN)) | 649 | if (!capable(CAP_SYS_ADMIN)) |
| 645 | return -EACCES; | 650 | return -EACCES; |
| 646 | 651 | ||
| 647 | cmd_ptr = kzalloc(count*2, GFP_KERNEL); | 652 | /* this test protects us from running two flash processes at once, |
| 653 | * so we should start with this test */ | ||
| 654 | if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) | ||
| 655 | return -EINPROGRESS; | ||
| 656 | pm8001_ha->fw_status = FLASH_IN_PROGRESS; | ||
| 648 | 657 | ||
| 658 | cmd_ptr = kzalloc(count*2, GFP_KERNEL); | ||
| 649 | if (!cmd_ptr) { | 659 | if (!cmd_ptr) { |
| 650 | err = FAIL_OUT_MEMORY; | 660 | pm8001_ha->fw_status = FAIL_OUT_MEMORY; |
| 651 | goto out; | 661 | return -ENOMEM; |
| 652 | } | 662 | } |
| 653 | 663 | ||
| 654 | filename_ptr = cmd_ptr + count; | 664 | filename_ptr = cmd_ptr + count; |
| 655 | res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); | 665 | res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); |
| 656 | if (res != 2) { | 666 | if (res != 2) { |
| 657 | err = FAIL_PARAMETERS; | 667 | pm8001_ha->fw_status = FAIL_PARAMETERS; |
| 658 | goto out1; | 668 | ret = -EINVAL; |
| 669 | goto out; | ||
| 659 | } | 670 | } |
| 660 | 671 | ||
| 661 | for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { | 672 | for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { |
| @@ -666,50 +677,38 @@ static ssize_t pm8001_store_update_fw(struct device *cdev, | |||
| 666 | } | 677 | } |
| 667 | } | 678 | } |
| 668 | if (flash_command == FLASH_CMD_NONE) { | 679 | if (flash_command == FLASH_CMD_NONE) { |
| 669 | err = FAIL_PARAMETERS; | 680 | pm8001_ha->fw_status = FAIL_PARAMETERS; |
| 670 | goto out1; | 681 | ret = -EINVAL; |
| 682 | goto out; | ||
| 671 | } | 683 | } |
| 672 | 684 | ||
| 673 | if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) { | 685 | ret = request_firmware(&pm8001_ha->fw_image, |
| 674 | err = FLASH_IN_PROGRESS; | ||
| 675 | goto out1; | ||
| 676 | } | ||
| 677 | err = request_firmware(&pm8001_ha->fw_image, | ||
| 678 | filename_ptr, | 686 | filename_ptr, |
| 679 | pm8001_ha->dev); | 687 | pm8001_ha->dev); |
| 680 | 688 | ||
| 681 | if (err) { | 689 | if (ret) { |
| 682 | PM8001_FAIL_DBG(pm8001_ha, | 690 | PM8001_FAIL_DBG(pm8001_ha, |
| 683 | pm8001_printk("Failed to load firmware image file %s," | 691 | pm8001_printk( |
| 684 | " error %d\n", filename_ptr, err)); | 692 | "Failed to load firmware image file %s, error %d\n", |
| 685 | err = FAIL_OPEN_BIOS_FILE; | 693 | filename_ptr, ret)); |
| 686 | goto out1; | 694 | pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE; |
| 695 | goto out; | ||
| 687 | } | 696 | } |
| 688 | 697 | ||
| 689 | switch (flash_command) { | 698 | if (FLASH_CMD_UPDATE == flash_command) |
| 690 | case FLASH_CMD_UPDATE: | 699 | ret = pm8001_update_flash(pm8001_ha); |
| 691 | pm8001_ha->fw_status = FLASH_IN_PROGRESS; | 700 | else |
| 692 | err = pm8001_update_flash(pm8001_ha); | 701 | ret = pm8001_set_nvmd(pm8001_ha); |
| 693 | break; | 702 | |
| 694 | case FLASH_CMD_SET_NVMD: | ||
| 695 | pm8001_ha->fw_status = FLASH_IN_PROGRESS; | ||
| 696 | err = pm8001_set_nvmd(pm8001_ha); | ||
| 697 | break; | ||
| 698 | default: | ||
| 699 | pm8001_ha->fw_status = FAIL_PARAMETERS; | ||
| 700 | err = FAIL_PARAMETERS; | ||
| 701 | break; | ||
| 702 | } | ||
| 703 | release_firmware(pm8001_ha->fw_image); | 703 | release_firmware(pm8001_ha->fw_image); |
| 704 | out1: | ||
| 705 | kfree(cmd_ptr); | ||
| 706 | out: | 704 | out: |
| 707 | pm8001_ha->fw_status = err; | 705 | kfree(cmd_ptr); |
| 708 | 706 | ||
| 709 | if (!err) | 707 | if (ret) |
| 710 | return count; | 708 | return ret; |
| 711 | else | 709 | |
| 712 | return -err; | 710 | pm8001_ha->fw_status = FLASH_OK; |
| 711 | return count; | ||
| 713 | } | 712 | } |
| 714 | 713 | ||
| 715 | static ssize_t pm8001_show_update_fw(struct device *cdev, | 714 | static ssize_t pm8001_show_update_fw(struct device *cdev, |
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index a97be015e52e..dd12c6fe57a6 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c | |||
| @@ -1346,7 +1346,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, | |||
| 1346 | &pMessage) < 0) { | 1346 | &pMessage) < 0) { |
| 1347 | PM8001_IO_DBG(pm8001_ha, | 1347 | PM8001_IO_DBG(pm8001_ha, |
| 1348 | pm8001_printk("No free mpi buffer\n")); | 1348 | pm8001_printk("No free mpi buffer\n")); |
| 1349 | return -1; | 1349 | return -ENOMEM; |
| 1350 | } | 1350 | } |
| 1351 | BUG_ON(!payload); | 1351 | BUG_ON(!payload); |
| 1352 | /*Copy to the payload*/ | 1352 | /*Copy to the payload*/ |
| @@ -1751,6 +1751,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, | |||
| 1751 | task_abort.tag = cpu_to_le32(ccb_tag); | 1751 | task_abort.tag = cpu_to_le32(ccb_tag); |
| 1752 | 1752 | ||
| 1753 | ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); | 1753 | ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); |
| 1754 | if (ret) | ||
| 1755 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1754 | 1756 | ||
| 1755 | } | 1757 | } |
| 1756 | 1758 | ||
| @@ -1778,6 +1780,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1778 | 1780 | ||
| 1779 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); | 1781 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); |
| 1780 | if (res) { | 1782 | if (res) { |
| 1783 | sas_free_task(task); | ||
| 1781 | PM8001_FAIL_DBG(pm8001_ha, | 1784 | PM8001_FAIL_DBG(pm8001_ha, |
| 1782 | pm8001_printk("cannot allocate tag !!!\n")); | 1785 | pm8001_printk("cannot allocate tag !!!\n")); |
| 1783 | return; | 1786 | return; |
| @@ -1788,14 +1791,14 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1788 | */ | 1791 | */ |
| 1789 | dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); | 1792 | dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); |
| 1790 | if (!dev) { | 1793 | if (!dev) { |
| 1794 | sas_free_task(task); | ||
| 1795 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1791 | PM8001_FAIL_DBG(pm8001_ha, | 1796 | PM8001_FAIL_DBG(pm8001_ha, |
| 1792 | pm8001_printk("Domain device cannot be allocated\n")); | 1797 | pm8001_printk("Domain device cannot be allocated\n")); |
| 1793 | sas_free_task(task); | ||
| 1794 | return; | 1798 | return; |
| 1795 | } else { | ||
| 1796 | task->dev = dev; | ||
| 1797 | task->dev->lldd_dev = pm8001_ha_dev; | ||
| 1798 | } | 1799 | } |
| 1800 | task->dev = dev; | ||
| 1801 | task->dev->lldd_dev = pm8001_ha_dev; | ||
| 1799 | 1802 | ||
| 1800 | ccb = &pm8001_ha->ccb_info[ccb_tag]; | 1803 | ccb = &pm8001_ha->ccb_info[ccb_tag]; |
| 1801 | ccb->device = pm8001_ha_dev; | 1804 | ccb->device = pm8001_ha_dev; |
| @@ -1821,7 +1824,11 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1821 | memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); | 1824 | memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); |
| 1822 | 1825 | ||
| 1823 | res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); | 1826 | res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); |
| 1824 | 1827 | if (res) { | |
| 1828 | sas_free_task(task); | ||
| 1829 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1830 | kfree(dev); | ||
| 1831 | } | ||
| 1825 | } | 1832 | } |
| 1826 | 1833 | ||
| 1827 | /** | 1834 | /** |
| @@ -3100,7 +3107,7 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, | |||
| 3100 | complete(pm8001_dev->setds_completion); | 3107 | complete(pm8001_dev->setds_completion); |
| 3101 | ccb->task = NULL; | 3108 | ccb->task = NULL; |
| 3102 | ccb->ccb_tag = 0xFFFFFFFF; | 3109 | ccb->ccb_tag = 0xFFFFFFFF; |
| 3103 | pm8001_ccb_free(pm8001_ha, tag); | 3110 | pm8001_tag_free(pm8001_ha, tag); |
| 3104 | } | 3111 | } |
| 3105 | 3112 | ||
| 3106 | void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | 3113 | void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) |
| @@ -3119,13 +3126,12 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
| 3119 | } | 3126 | } |
| 3120 | ccb->task = NULL; | 3127 | ccb->task = NULL; |
| 3121 | ccb->ccb_tag = 0xFFFFFFFF; | 3128 | ccb->ccb_tag = 0xFFFFFFFF; |
| 3122 | pm8001_ccb_free(pm8001_ha, tag); | 3129 | pm8001_tag_free(pm8001_ha, tag); |
| 3123 | } | 3130 | } |
| 3124 | 3131 | ||
| 3125 | void | 3132 | void |
| 3126 | pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | 3133 | pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) |
| 3127 | { | 3134 | { |
| 3128 | struct fw_control_ex *fw_control_context; | ||
| 3129 | struct get_nvm_data_resp *pPayload = | 3135 | struct get_nvm_data_resp *pPayload = |
| 3130 | (struct get_nvm_data_resp *)(piomb + 4); | 3136 | (struct get_nvm_data_resp *)(piomb + 4); |
| 3131 | u32 tag = le32_to_cpu(pPayload->tag); | 3137 | u32 tag = le32_to_cpu(pPayload->tag); |
| @@ -3134,7 +3140,6 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
| 3134 | u32 ir_tds_bn_dps_das_nvm = | 3140 | u32 ir_tds_bn_dps_das_nvm = |
| 3135 | le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); | 3141 | le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); |
| 3136 | void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; | 3142 | void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; |
| 3137 | fw_control_context = ccb->fw_control_context; | ||
| 3138 | 3143 | ||
| 3139 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); | 3144 | PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); |
| 3140 | if ((dlen_status & NVMD_STAT) != 0) { | 3145 | if ((dlen_status & NVMD_STAT) != 0) { |
| @@ -3175,13 +3180,11 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
| 3175 | pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", | 3180 | pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", |
| 3176 | (dlen_status & NVMD_LEN) >> 24)); | 3181 | (dlen_status & NVMD_LEN) >> 24)); |
| 3177 | } | 3182 | } |
| 3178 | memcpy(fw_control_context->usrAddr, | 3183 | kfree(ccb->fw_control_context); |
| 3179 | pm8001_ha->memoryMap.region[NVMD].virt_ptr, | ||
| 3180 | fw_control_context->len); | ||
| 3181 | complete(pm8001_ha->nvmd_completion); | ||
| 3182 | ccb->task = NULL; | 3184 | ccb->task = NULL; |
| 3183 | ccb->ccb_tag = 0xFFFFFFFF; | 3185 | ccb->ccb_tag = 0xFFFFFFFF; |
| 3184 | pm8001_ccb_free(pm8001_ha, tag); | 3186 | pm8001_tag_free(pm8001_ha, tag); |
| 3187 | complete(pm8001_ha->nvmd_completion); | ||
| 3185 | } | 3188 | } |
| 3186 | 3189 | ||
| 3187 | int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) | 3190 | int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) |
| @@ -3588,7 +3591,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) | |||
| 3588 | complete(pm8001_dev->dcompletion); | 3591 | complete(pm8001_dev->dcompletion); |
| 3589 | ccb->task = NULL; | 3592 | ccb->task = NULL; |
| 3590 | ccb->ccb_tag = 0xFFFFFFFF; | 3593 | ccb->ccb_tag = 0xFFFFFFFF; |
| 3591 | pm8001_ccb_free(pm8001_ha, htag); | 3594 | pm8001_tag_free(pm8001_ha, htag); |
| 3592 | return 0; | 3595 | return 0; |
| 3593 | } | 3596 | } |
| 3594 | 3597 | ||
| @@ -3617,15 +3620,11 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, | |||
| 3617 | void *piomb) | 3620 | void *piomb) |
| 3618 | { | 3621 | { |
| 3619 | u32 status; | 3622 | u32 status; |
| 3620 | struct fw_control_ex fw_control_context; | ||
| 3621 | struct fw_flash_Update_resp *ppayload = | 3623 | struct fw_flash_Update_resp *ppayload = |
| 3622 | (struct fw_flash_Update_resp *)(piomb + 4); | 3624 | (struct fw_flash_Update_resp *)(piomb + 4); |
| 3623 | u32 tag = le32_to_cpu(ppayload->tag); | 3625 | u32 tag = le32_to_cpu(ppayload->tag); |
| 3624 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; | 3626 | struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; |
| 3625 | status = le32_to_cpu(ppayload->status); | 3627 | status = le32_to_cpu(ppayload->status); |
| 3626 | memcpy(&fw_control_context, | ||
| 3627 | ccb->fw_control_context, | ||
| 3628 | sizeof(fw_control_context)); | ||
| 3629 | switch (status) { | 3628 | switch (status) { |
| 3630 | case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: | 3629 | case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: |
| 3631 | PM8001_MSG_DBG(pm8001_ha, | 3630 | PM8001_MSG_DBG(pm8001_ha, |
| @@ -3668,11 +3667,11 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, | |||
| 3668 | pm8001_printk("No matched status = %d\n", status)); | 3667 | pm8001_printk("No matched status = %d\n", status)); |
| 3669 | break; | 3668 | break; |
| 3670 | } | 3669 | } |
| 3671 | ccb->fw_control_context->fw_control->retcode = status; | 3670 | kfree(ccb->fw_control_context); |
| 3672 | complete(pm8001_ha->nvmd_completion); | ||
| 3673 | ccb->task = NULL; | 3671 | ccb->task = NULL; |
| 3674 | ccb->ccb_tag = 0xFFFFFFFF; | 3672 | ccb->ccb_tag = 0xFFFFFFFF; |
| 3675 | pm8001_ccb_free(pm8001_ha, tag); | 3673 | pm8001_tag_free(pm8001_ha, tag); |
| 3674 | complete(pm8001_ha->nvmd_completion); | ||
| 3676 | return 0; | 3675 | return 0; |
| 3677 | } | 3676 | } |
| 3678 | 3677 | ||
| @@ -4257,7 +4256,11 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4257 | smp_cmd.long_smp_req.long_resp_size = | 4256 | smp_cmd.long_smp_req.long_resp_size = |
| 4258 | cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); | 4257 | cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); |
| 4259 | build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); | 4258 | build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); |
| 4260 | pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); | 4259 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, |
| 4260 | (u32 *)&smp_cmd, 0); | ||
| 4261 | if (rc) | ||
| 4262 | goto err_out_2; | ||
| 4263 | |||
| 4261 | return 0; | 4264 | return 0; |
| 4262 | 4265 | ||
| 4263 | err_out_2: | 4266 | err_out_2: |
| @@ -4398,7 +4401,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4398 | 4401 | ||
| 4399 | /* Check for read log for failed drive and return */ | 4402 | /* Check for read log for failed drive and return */ |
| 4400 | if (sata_cmd.sata_fis.command == 0x2f) { | 4403 | if (sata_cmd.sata_fis.command == 0x2f) { |
| 4401 | if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || | 4404 | if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || |
| 4402 | (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || | 4405 | (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || |
| 4403 | (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { | 4406 | (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { |
| 4404 | struct task_status_struct *ts; | 4407 | struct task_status_struct *ts; |
| @@ -4789,6 +4792,10 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4789 | break; | 4792 | break; |
| 4790 | } | 4793 | } |
| 4791 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); | 4794 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); |
| 4795 | if (rc) { | ||
| 4796 | kfree(fw_control_context); | ||
| 4797 | pm8001_tag_free(pm8001_ha, tag); | ||
| 4798 | } | ||
| 4792 | return rc; | 4799 | return rc; |
| 4793 | } | 4800 | } |
| 4794 | 4801 | ||
| @@ -4817,7 +4824,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4817 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | 4824 | rc = pm8001_tag_alloc(pm8001_ha, &tag); |
| 4818 | if (rc) { | 4825 | if (rc) { |
| 4819 | kfree(fw_control_context); | 4826 | kfree(fw_control_context); |
| 4820 | return rc; | 4827 | return -EBUSY; |
| 4821 | } | 4828 | } |
| 4822 | ccb = &pm8001_ha->ccb_info[tag]; | 4829 | ccb = &pm8001_ha->ccb_info[tag]; |
| 4823 | ccb->fw_control_context = fw_control_context; | 4830 | ccb->fw_control_context = fw_control_context; |
| @@ -4869,6 +4876,10 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4869 | break; | 4876 | break; |
| 4870 | } | 4877 | } |
| 4871 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); | 4878 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); |
| 4879 | if (rc) { | ||
| 4880 | kfree(fw_control_context); | ||
| 4881 | pm8001_tag_free(pm8001_ha, tag); | ||
| 4882 | } | ||
| 4872 | return rc; | 4883 | return rc; |
| 4873 | } | 4884 | } |
| 4874 | 4885 | ||
| @@ -4935,7 +4946,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4935 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | 4946 | rc = pm8001_tag_alloc(pm8001_ha, &tag); |
| 4936 | if (rc) { | 4947 | if (rc) { |
| 4937 | kfree(fw_control_context); | 4948 | kfree(fw_control_context); |
| 4938 | return rc; | 4949 | return -EBUSY; |
| 4939 | } | 4950 | } |
| 4940 | ccb = &pm8001_ha->ccb_info[tag]; | 4951 | ccb = &pm8001_ha->ccb_info[tag]; |
| 4941 | ccb->fw_control_context = fw_control_context; | 4952 | ccb->fw_control_context = fw_control_context; |
| @@ -5061,7 +5072,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) | |||
| 5061 | memset(&payload, 0, sizeof(payload)); | 5072 | memset(&payload, 0, sizeof(payload)); |
| 5062 | rc = pm8001_tag_alloc(pm8001_ha, &tag); | 5073 | rc = pm8001_tag_alloc(pm8001_ha, &tag); |
| 5063 | if (rc) | 5074 | if (rc) |
| 5064 | return -1; | 5075 | return -ENOMEM; |
| 5065 | ccb = &pm8001_ha->ccb_info[tag]; | 5076 | ccb = &pm8001_ha->ccb_info[tag]; |
| 5066 | ccb->ccb_tag = tag; | 5077 | ccb->ccb_tag = tag; |
| 5067 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; | 5078 | circularQ = &pm8001_ha->inbnd_q_tbl[0]; |
| @@ -5070,6 +5081,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) | |||
| 5070 | payload.sata_hol_tmo = cpu_to_le32(80); | 5081 | payload.sata_hol_tmo = cpu_to_le32(80); |
| 5071 | payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); | 5082 | payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); |
| 5072 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 5083 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 5084 | if (rc) | ||
| 5085 | pm8001_tag_free(pm8001_ha, tag); | ||
| 5073 | return rc; | 5086 | return rc; |
| 5074 | 5087 | ||
| 5075 | } | 5088 | } |
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index e90c89f1d480..666bf5af06e2 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c | |||
| @@ -246,6 +246,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, | |||
| 246 | { | 246 | { |
| 247 | int i; | 247 | int i; |
| 248 | spin_lock_init(&pm8001_ha->lock); | 248 | spin_lock_init(&pm8001_ha->lock); |
| 249 | spin_lock_init(&pm8001_ha->bitmap_lock); | ||
| 249 | PM8001_INIT_DBG(pm8001_ha, | 250 | PM8001_INIT_DBG(pm8001_ha, |
| 250 | pm8001_printk("pm8001_alloc: PHY:%x\n", | 251 | pm8001_printk("pm8001_alloc: PHY:%x\n", |
| 251 | pm8001_ha->chip->n_phy)); | 252 | pm8001_ha->chip->n_phy)); |
| @@ -621,6 +622,8 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) | |||
| 621 | DECLARE_COMPLETION_ONSTACK(completion); | 622 | DECLARE_COMPLETION_ONSTACK(completion); |
| 622 | struct pm8001_ioctl_payload payload; | 623 | struct pm8001_ioctl_payload payload; |
| 623 | u16 deviceid; | 624 | u16 deviceid; |
| 625 | int rc; | ||
| 626 | |||
| 624 | pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); | 627 | pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); |
| 625 | pm8001_ha->nvmd_completion = &completion; | 628 | pm8001_ha->nvmd_completion = &completion; |
| 626 | 629 | ||
| @@ -638,7 +641,16 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) | |||
| 638 | } | 641 | } |
| 639 | payload.offset = 0; | 642 | payload.offset = 0; |
| 640 | payload.func_specific = kzalloc(payload.length, GFP_KERNEL); | 643 | payload.func_specific = kzalloc(payload.length, GFP_KERNEL); |
| 641 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); | 644 | if (!payload.func_specific) { |
| 645 | PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n")); | ||
| 646 | return; | ||
| 647 | } | ||
| 648 | rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); | ||
| 649 | if (rc) { | ||
| 650 | kfree(payload.func_specific); | ||
| 651 | PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n")); | ||
| 652 | return; | ||
| 653 | } | ||
| 642 | wait_for_completion(&completion); | 654 | wait_for_completion(&completion); |
| 643 | 655 | ||
| 644 | for (i = 0, j = 0; i <= 7; i++, j++) { | 656 | for (i = 0, j = 0; i <= 7; i++, j++) { |
| @@ -661,6 +673,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) | |||
| 661 | pm8001_printk("phy %d sas_addr = %016llx\n", i, | 673 | pm8001_printk("phy %d sas_addr = %016llx\n", i, |
| 662 | pm8001_ha->phy[i].dev_sas_addr)); | 674 | pm8001_ha->phy[i].dev_sas_addr)); |
| 663 | } | 675 | } |
| 676 | kfree(payload.func_specific); | ||
| 664 | #else | 677 | #else |
| 665 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) { | 678 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) { |
| 666 | pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL; | 679 | pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL; |
| @@ -684,6 +697,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) | |||
| 684 | /*OPTION ROM FLASH read for the SPC cards */ | 697 | /*OPTION ROM FLASH read for the SPC cards */ |
| 685 | DECLARE_COMPLETION_ONSTACK(completion); | 698 | DECLARE_COMPLETION_ONSTACK(completion); |
| 686 | struct pm8001_ioctl_payload payload; | 699 | struct pm8001_ioctl_payload payload; |
| 700 | int rc; | ||
| 687 | 701 | ||
| 688 | pm8001_ha->nvmd_completion = &completion; | 702 | pm8001_ha->nvmd_completion = &completion; |
| 689 | /* SAS ADDRESS read from flash / EEPROM */ | 703 | /* SAS ADDRESS read from flash / EEPROM */ |
| @@ -694,7 +708,12 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) | |||
| 694 | if (!payload.func_specific) | 708 | if (!payload.func_specific) |
| 695 | return -ENOMEM; | 709 | return -ENOMEM; |
| 696 | /* Read phy setting values from flash */ | 710 | /* Read phy setting values from flash */ |
| 697 | PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); | 711 | rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); |
| 712 | if (rc) { | ||
| 713 | kfree(payload.func_specific); | ||
| 714 | PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n")); | ||
| 715 | return -ENOMEM; | ||
| 716 | } | ||
| 698 | wait_for_completion(&completion); | 717 | wait_for_completion(&completion); |
| 699 | pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); | 718 | pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); |
| 700 | kfree(payload.func_specific); | 719 | kfree(payload.func_specific); |
| @@ -729,33 +748,35 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) | |||
| 729 | sizeof(pm8001_ha->msix_entries[0]); | 748 | sizeof(pm8001_ha->msix_entries[0]); |
| 730 | for (i = 0; i < max_entry ; i++) | 749 | for (i = 0; i < max_entry ; i++) |
| 731 | pm8001_ha->msix_entries[i].entry = i; | 750 | pm8001_ha->msix_entries[i].entry = i; |
| 732 | rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, | 751 | rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries, |
| 733 | number_of_intr); | 752 | number_of_intr); |
| 734 | pm8001_ha->number_of_intr = number_of_intr; | 753 | pm8001_ha->number_of_intr = number_of_intr; |
| 735 | if (!rc) { | 754 | if (rc) |
| 736 | PM8001_INIT_DBG(pm8001_ha, pm8001_printk( | 755 | return rc; |
| 737 | "pci_enable_msix request ret:%d no of intr %d\n", | 756 | |
| 738 | rc, pm8001_ha->number_of_intr)); | 757 | PM8001_INIT_DBG(pm8001_ha, pm8001_printk( |
| 739 | 758 | "pci_enable_msix_exact request ret:%d no of intr %d\n", | |
| 740 | 759 | rc, pm8001_ha->number_of_intr)); | |
| 741 | for (i = 0; i < number_of_intr; i++) { | 760 | |
| 742 | snprintf(intr_drvname[i], sizeof(intr_drvname[0]), | 761 | for (i = 0; i < number_of_intr; i++) { |
| 743 | DRV_NAME"%d", i); | 762 | snprintf(intr_drvname[i], sizeof(intr_drvname[0]), |
| 744 | pm8001_ha->irq_vector[i].irq_id = i; | 763 | DRV_NAME"%d", i); |
| 745 | pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; | 764 | pm8001_ha->irq_vector[i].irq_id = i; |
| 746 | 765 | pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; | |
| 747 | if (request_irq(pm8001_ha->msix_entries[i].vector, | 766 | |
| 748 | pm8001_interrupt_handler_msix, flag, | 767 | rc = request_irq(pm8001_ha->msix_entries[i].vector, |
| 749 | intr_drvname[i], &(pm8001_ha->irq_vector[i]))) { | 768 | pm8001_interrupt_handler_msix, flag, |
| 750 | for (j = 0; j < i; j++) | 769 | intr_drvname[i], &(pm8001_ha->irq_vector[i])); |
| 751 | free_irq( | 770 | if (rc) { |
| 752 | pm8001_ha->msix_entries[j].vector, | 771 | for (j = 0; j < i; j++) { |
| 772 | free_irq(pm8001_ha->msix_entries[j].vector, | ||
| 753 | &(pm8001_ha->irq_vector[i])); | 773 | &(pm8001_ha->irq_vector[i])); |
| 754 | pci_disable_msix(pm8001_ha->pdev); | ||
| 755 | break; | ||
| 756 | } | 774 | } |
| 775 | pci_disable_msix(pm8001_ha->pdev); | ||
| 776 | break; | ||
| 757 | } | 777 | } |
| 758 | } | 778 | } |
| 779 | |||
| 759 | return rc; | 780 | return rc; |
| 760 | } | 781 | } |
| 761 | #endif | 782 | #endif |
| @@ -964,6 +985,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 964 | int i, j; | 985 | int i, j; |
| 965 | u32 device_state; | 986 | u32 device_state; |
| 966 | pm8001_ha = sha->lldd_ha; | 987 | pm8001_ha = sha->lldd_ha; |
| 988 | sas_suspend_ha(sha); | ||
| 967 | flush_workqueue(pm8001_wq); | 989 | flush_workqueue(pm8001_wq); |
| 968 | scsi_block_requests(pm8001_ha->shost); | 990 | scsi_block_requests(pm8001_ha->shost); |
| 969 | if (!pdev->pm_cap) { | 991 | if (!pdev->pm_cap) { |
| @@ -1013,6 +1035,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) | |||
| 1013 | int rc; | 1035 | int rc; |
| 1014 | u8 i = 0, j; | 1036 | u8 i = 0, j; |
| 1015 | u32 device_state; | 1037 | u32 device_state; |
| 1038 | DECLARE_COMPLETION_ONSTACK(completion); | ||
| 1016 | pm8001_ha = sha->lldd_ha; | 1039 | pm8001_ha = sha->lldd_ha; |
| 1017 | device_state = pdev->current_state; | 1040 | device_state = pdev->current_state; |
| 1018 | 1041 | ||
| @@ -1033,7 +1056,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) | |||
| 1033 | rc = pci_go_44(pdev); | 1056 | rc = pci_go_44(pdev); |
| 1034 | if (rc) | 1057 | if (rc) |
| 1035 | goto err_out_disable; | 1058 | goto err_out_disable; |
| 1036 | 1059 | sas_prep_resume_ha(sha); | |
| 1037 | /* chip soft rst only for spc */ | 1060 | /* chip soft rst only for spc */ |
| 1038 | if (pm8001_ha->chip_id == chip_8001) { | 1061 | if (pm8001_ha->chip_id == chip_8001) { |
| 1039 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); | 1062 | PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); |
| @@ -1065,7 +1088,13 @@ static int pm8001_pci_resume(struct pci_dev *pdev) | |||
| 1065 | for (i = 1; i < pm8001_ha->number_of_intr; i++) | 1088 | for (i = 1; i < pm8001_ha->number_of_intr; i++) |
| 1066 | PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); | 1089 | PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); |
| 1067 | } | 1090 | } |
| 1068 | scsi_unblock_requests(pm8001_ha->shost); | 1091 | pm8001_ha->flags = PM8001F_RUN_TIME; |
| 1092 | for (i = 0; i < pm8001_ha->chip->n_phy; i++) { | ||
| 1093 | pm8001_ha->phy[i].enable_completion = &completion; | ||
| 1094 | PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); | ||
| 1095 | wait_for_completion(&completion); | ||
| 1096 | } | ||
| 1097 | sas_resume_ha(sha); | ||
| 1069 | return 0; | 1098 | return 0; |
| 1070 | 1099 | ||
| 1071 | err_out_disable: | 1100 | err_out_disable: |
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 8a44bc92bc78..76570e6a547d 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c | |||
| @@ -58,25 +58,14 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag) | |||
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | /** | 60 | /** |
| 61 | * pm8001_tag_clear - clear the tags bitmap | 61 | * pm8001_tag_free - free the no more needed tag |
| 62 | * @pm8001_ha: our hba struct | 62 | * @pm8001_ha: our hba struct |
| 63 | * @tag: the found tag associated with the task | 63 | * @tag: the found tag associated with the task |
| 64 | */ | 64 | */ |
| 65 | static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) | ||
| 66 | { | ||
| 67 | void *bitmap = pm8001_ha->tags; | ||
| 68 | clear_bit(tag, bitmap); | ||
| 69 | } | ||
| 70 | |||
| 71 | void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) | 65 | void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) |
| 72 | { | 66 | { |
| 73 | pm8001_tag_clear(pm8001_ha, tag); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) | ||
| 77 | { | ||
| 78 | void *bitmap = pm8001_ha->tags; | 67 | void *bitmap = pm8001_ha->tags; |
| 79 | set_bit(tag, bitmap); | 68 | clear_bit(tag, bitmap); |
| 80 | } | 69 | } |
| 81 | 70 | ||
| 82 | /** | 71 | /** |
| @@ -86,14 +75,18 @@ static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) | |||
| 86 | */ | 75 | */ |
| 87 | inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) | 76 | inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) |
| 88 | { | 77 | { |
| 89 | unsigned int index, tag; | 78 | unsigned int tag; |
| 90 | void *bitmap = pm8001_ha->tags; | 79 | void *bitmap = pm8001_ha->tags; |
| 80 | unsigned long flags; | ||
| 91 | 81 | ||
| 92 | index = find_first_zero_bit(bitmap, pm8001_ha->tags_num); | 82 | spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); |
| 93 | tag = index; | 83 | tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); |
| 94 | if (tag >= pm8001_ha->tags_num) | 84 | if (tag >= pm8001_ha->tags_num) { |
| 85 | spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); | ||
| 95 | return -SAS_QUEUE_FULL; | 86 | return -SAS_QUEUE_FULL; |
| 96 | pm8001_tag_set(pm8001_ha, tag); | 87 | } |
| 88 | set_bit(tag, bitmap); | ||
| 89 | spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); | ||
| 97 | *tag_out = tag; | 90 | *tag_out = tag; |
| 98 | return 0; | 91 | return 0; |
| 99 | } | 92 | } |
| @@ -102,7 +95,7 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) | |||
| 102 | { | 95 | { |
| 103 | int i; | 96 | int i; |
| 104 | for (i = 0; i < pm8001_ha->tags_num; ++i) | 97 | for (i = 0; i < pm8001_ha->tags_num; ++i) |
| 105 | pm8001_tag_clear(pm8001_ha, i); | 98 | pm8001_tag_free(pm8001_ha, i); |
| 106 | } | 99 | } |
| 107 | 100 | ||
| 108 | /** | 101 | /** |
| @@ -123,13 +116,12 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, | |||
| 123 | u64 align_offset = 0; | 116 | u64 align_offset = 0; |
| 124 | if (align) | 117 | if (align) |
| 125 | align_offset = (dma_addr_t)align - 1; | 118 | align_offset = (dma_addr_t)align - 1; |
| 126 | mem_virt_alloc = | 119 | mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align, |
| 127 | pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); | 120 | &mem_dma_handle); |
| 128 | if (!mem_virt_alloc) { | 121 | if (!mem_virt_alloc) { |
| 129 | pm8001_printk("memory allocation error\n"); | 122 | pm8001_printk("memory allocation error\n"); |
| 130 | return -1; | 123 | return -1; |
| 131 | } | 124 | } |
| 132 | memset((void *)mem_virt_alloc, 0, mem_size+align); | ||
| 133 | *pphys_addr = mem_dma_handle; | 125 | *pphys_addr = mem_dma_handle; |
| 134 | phys_align = (*pphys_addr + align_offset) & ~align_offset; | 126 | phys_align = (*pphys_addr + align_offset) & ~align_offset; |
| 135 | *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; | 127 | *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; |
| @@ -501,11 +493,6 @@ int pm8001_queue_command(struct sas_task *task, const int num, | |||
| 501 | return pm8001_task_exec(task, num, gfp_flags, 0, NULL); | 493 | return pm8001_task_exec(task, num, gfp_flags, 0, NULL); |
| 502 | } | 494 | } |
| 503 | 495 | ||
| 504 | void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx) | ||
| 505 | { | ||
| 506 | pm8001_tag_clear(pm8001_ha, ccb_idx); | ||
| 507 | } | ||
| 508 | |||
| 509 | /** | 496 | /** |
| 510 | * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. | 497 | * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. |
| 511 | * @pm8001_ha: our hba card information | 498 | * @pm8001_ha: our hba card information |
| @@ -542,7 +529,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, | |||
| 542 | ccb->task = NULL; | 529 | ccb->task = NULL; |
| 543 | ccb->ccb_tag = 0xFFFFFFFF; | 530 | ccb->ccb_tag = 0xFFFFFFFF; |
| 544 | ccb->open_retry = 0; | 531 | ccb->open_retry = 0; |
| 545 | pm8001_ccb_free(pm8001_ha, ccb_idx); | 532 | pm8001_tag_free(pm8001_ha, ccb_idx); |
| 546 | } | 533 | } |
| 547 | 534 | ||
| 548 | /** | 535 | /** |
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 1ee06f21803b..f6b2ac59dae4 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h | |||
| @@ -475,6 +475,7 @@ struct pm8001_hba_info { | |||
| 475 | struct list_head list; | 475 | struct list_head list; |
| 476 | unsigned long flags; | 476 | unsigned long flags; |
| 477 | spinlock_t lock;/* host-wide lock */ | 477 | spinlock_t lock;/* host-wide lock */ |
| 478 | spinlock_t bitmap_lock; | ||
| 478 | struct pci_dev *pdev;/* our device */ | 479 | struct pci_dev *pdev;/* our device */ |
| 479 | struct device *dev; | 480 | struct device *dev; |
| 480 | struct pm8001_hba_memspace io_mem[6]; | 481 | struct pm8001_hba_memspace io_mem[6]; |
| @@ -616,7 +617,6 @@ extern struct workqueue_struct *pm8001_wq; | |||
| 616 | int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); | 617 | int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); |
| 617 | void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); | 618 | void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); |
| 618 | u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); | 619 | u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); |
| 619 | void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx); | ||
| 620 | void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, | 620 | void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, |
| 621 | struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); | 621 | struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); |
| 622 | int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | 622 | int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, |
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index d70587f96184..b06443a0db2d 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c | |||
| @@ -856,6 +856,8 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) | |||
| 856 | payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); | 856 | payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); |
| 857 | 857 | ||
| 858 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 858 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 859 | if (rc) | ||
| 860 | pm8001_tag_free(pm8001_ha, tag); | ||
| 859 | return rc; | 861 | return rc; |
| 860 | 862 | ||
| 861 | } | 863 | } |
| @@ -936,6 +938,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) | |||
| 936 | sizeof(SASProtocolTimerConfig_t)); | 938 | sizeof(SASProtocolTimerConfig_t)); |
| 937 | 939 | ||
| 938 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 940 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 941 | if (rc) | ||
| 942 | pm8001_tag_free(pm8001_ha, tag); | ||
| 939 | 943 | ||
| 940 | return rc; | 944 | return rc; |
| 941 | } | 945 | } |
| @@ -948,7 +952,7 @@ static int | |||
| 948 | pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) | 952 | pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) |
| 949 | { | 953 | { |
| 950 | u32 scratch3_value; | 954 | u32 scratch3_value; |
| 951 | int ret; | 955 | int ret = -1; |
| 952 | 956 | ||
| 953 | /* Read encryption status from SCRATCH PAD 3 */ | 957 | /* Read encryption status from SCRATCH PAD 3 */ |
| 954 | scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); | 958 | scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); |
| @@ -982,7 +986,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) | |||
| 982 | pm8001_ha->encrypt_info.status = 0xFFFFFFFF; | 986 | pm8001_ha->encrypt_info.status = 0xFFFFFFFF; |
| 983 | pm8001_ha->encrypt_info.cipher_mode = 0; | 987 | pm8001_ha->encrypt_info.cipher_mode = 0; |
| 984 | pm8001_ha->encrypt_info.sec_mode = 0; | 988 | pm8001_ha->encrypt_info.sec_mode = 0; |
| 985 | return 0; | 989 | ret = 0; |
| 986 | } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == | 990 | } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == |
| 987 | SCRATCH_PAD3_ENC_DIS_ERR) { | 991 | SCRATCH_PAD3_ENC_DIS_ERR) { |
| 988 | pm8001_ha->encrypt_info.status = | 992 | pm8001_ha->encrypt_info.status = |
| @@ -1004,7 +1008,6 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) | |||
| 1004 | scratch3_value, pm8001_ha->encrypt_info.cipher_mode, | 1008 | scratch3_value, pm8001_ha->encrypt_info.cipher_mode, |
| 1005 | pm8001_ha->encrypt_info.sec_mode, | 1009 | pm8001_ha->encrypt_info.sec_mode, |
| 1006 | pm8001_ha->encrypt_info.status)); | 1010 | pm8001_ha->encrypt_info.status)); |
| 1007 | ret = -1; | ||
| 1008 | } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == | 1011 | } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == |
| 1009 | SCRATCH_PAD3_ENC_ENA_ERR) { | 1012 | SCRATCH_PAD3_ENC_ENA_ERR) { |
| 1010 | 1013 | ||
| @@ -1028,7 +1031,6 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) | |||
| 1028 | scratch3_value, pm8001_ha->encrypt_info.cipher_mode, | 1031 | scratch3_value, pm8001_ha->encrypt_info.cipher_mode, |
| 1029 | pm8001_ha->encrypt_info.sec_mode, | 1032 | pm8001_ha->encrypt_info.sec_mode, |
| 1030 | pm8001_ha->encrypt_info.status)); | 1033 | pm8001_ha->encrypt_info.status)); |
| 1031 | ret = -1; | ||
| 1032 | } | 1034 | } |
| 1033 | return ret; | 1035 | return ret; |
| 1034 | } | 1036 | } |
| @@ -1059,6 +1061,8 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) | |||
| 1059 | KEK_MGMT_SUBOP_KEYCARDUPDATE); | 1061 | KEK_MGMT_SUBOP_KEYCARDUPDATE); |
| 1060 | 1062 | ||
| 1061 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 1063 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 1064 | if (rc) | ||
| 1065 | pm8001_tag_free(pm8001_ha, tag); | ||
| 1062 | 1066 | ||
| 1063 | return rc; | 1067 | return rc; |
| 1064 | } | 1068 | } |
| @@ -1383,8 +1387,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, | |||
| 1383 | task->task_done = pm8001_task_done; | 1387 | task->task_done = pm8001_task_done; |
| 1384 | 1388 | ||
| 1385 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); | 1389 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); |
| 1386 | if (res) | 1390 | if (res) { |
| 1391 | sas_free_task(task); | ||
| 1387 | return; | 1392 | return; |
| 1393 | } | ||
| 1388 | 1394 | ||
| 1389 | ccb = &pm8001_ha->ccb_info[ccb_tag]; | 1395 | ccb = &pm8001_ha->ccb_info[ccb_tag]; |
| 1390 | ccb->device = pm8001_ha_dev; | 1396 | ccb->device = pm8001_ha_dev; |
| @@ -1399,7 +1405,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, | |||
| 1399 | task_abort.tag = cpu_to_le32(ccb_tag); | 1405 | task_abort.tag = cpu_to_le32(ccb_tag); |
| 1400 | 1406 | ||
| 1401 | ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); | 1407 | ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); |
| 1402 | 1408 | if (ret) { | |
| 1409 | sas_free_task(task); | ||
| 1410 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1411 | } | ||
| 1403 | } | 1412 | } |
| 1404 | 1413 | ||
| 1405 | static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, | 1414 | static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, |
| @@ -1426,6 +1435,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1426 | 1435 | ||
| 1427 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); | 1436 | res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); |
| 1428 | if (res) { | 1437 | if (res) { |
| 1438 | sas_free_task(task); | ||
| 1429 | PM8001_FAIL_DBG(pm8001_ha, | 1439 | PM8001_FAIL_DBG(pm8001_ha, |
| 1430 | pm8001_printk("cannot allocate tag !!!\n")); | 1440 | pm8001_printk("cannot allocate tag !!!\n")); |
| 1431 | return; | 1441 | return; |
| @@ -1436,15 +1446,16 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1436 | */ | 1446 | */ |
| 1437 | dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); | 1447 | dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); |
| 1438 | if (!dev) { | 1448 | if (!dev) { |
| 1449 | sas_free_task(task); | ||
| 1450 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1439 | PM8001_FAIL_DBG(pm8001_ha, | 1451 | PM8001_FAIL_DBG(pm8001_ha, |
| 1440 | pm8001_printk("Domain device cannot be allocated\n")); | 1452 | pm8001_printk("Domain device cannot be allocated\n")); |
| 1441 | sas_free_task(task); | ||
| 1442 | return; | 1453 | return; |
| 1443 | } else { | ||
| 1444 | task->dev = dev; | ||
| 1445 | task->dev->lldd_dev = pm8001_ha_dev; | ||
| 1446 | } | 1454 | } |
| 1447 | 1455 | ||
| 1456 | task->dev = dev; | ||
| 1457 | task->dev->lldd_dev = pm8001_ha_dev; | ||
| 1458 | |||
| 1448 | ccb = &pm8001_ha->ccb_info[ccb_tag]; | 1459 | ccb = &pm8001_ha->ccb_info[ccb_tag]; |
| 1449 | ccb->device = pm8001_ha_dev; | 1460 | ccb->device = pm8001_ha_dev; |
| 1450 | ccb->ccb_tag = ccb_tag; | 1461 | ccb->ccb_tag = ccb_tag; |
| @@ -1469,7 +1480,11 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, | |||
| 1469 | memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); | 1480 | memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); |
| 1470 | 1481 | ||
| 1471 | res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); | 1482 | res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); |
| 1472 | 1483 | if (res) { | |
| 1484 | sas_free_task(task); | ||
| 1485 | pm8001_tag_free(pm8001_ha, ccb_tag); | ||
| 1486 | kfree(dev); | ||
| 1487 | } | ||
| 1473 | } | 1488 | } |
| 1474 | 1489 | ||
| 1475 | /** | 1490 | /** |
| @@ -3815,7 +3830,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, | |||
| 3815 | 3830 | ||
| 3816 | build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, | 3831 | build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, |
| 3817 | &smp_cmd, pm8001_ha->smp_exp_mode, length); | 3832 | &smp_cmd, pm8001_ha->smp_exp_mode, length); |
| 3818 | pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); | 3833 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, |
| 3834 | (u32 *)&smp_cmd, 0); | ||
| 3835 | if (rc) | ||
| 3836 | goto err_out_2; | ||
| 3819 | return 0; | 3837 | return 0; |
| 3820 | 3838 | ||
| 3821 | err_out_2: | 3839 | err_out_2: |
| @@ -4406,6 +4424,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4406 | SAS_ADDR_SIZE); | 4424 | SAS_ADDR_SIZE); |
| 4407 | 4425 | ||
| 4408 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 4426 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 4427 | if (rc) | ||
| 4428 | pm8001_tag_free(pm8001_ha, tag); | ||
| 4409 | 4429 | ||
| 4410 | return rc; | 4430 | return rc; |
| 4411 | } | 4431 | } |
| @@ -4484,7 +4504,9 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, | |||
| 4484 | payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); | 4504 | payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); |
| 4485 | j++; | 4505 | j++; |
| 4486 | } | 4506 | } |
| 4487 | pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); | 4507 | rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); |
| 4508 | if (rc) | ||
| 4509 | pm8001_tag_free(pm8001_ha, tag); | ||
| 4488 | } | 4510 | } |
| 4489 | 4511 | ||
| 4490 | void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, | 4512 | void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index be8ce54f99b2..6f3275d020a0 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -237,7 +237,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) | |||
| 237 | scsi_dev->host->unique_id, | 237 | scsi_dev->host->unique_id, |
| 238 | scsi_dev->channel, | 238 | scsi_dev->channel, |
| 239 | scsi_dev->id, | 239 | scsi_dev->id, |
| 240 | scsi_dev->lun); | 240 | (u8)scsi_dev->lun); |
| 241 | 241 | ||
| 242 | if (RES_IS_GSCSI(res->cfg_entry)) { | 242 | if (RES_IS_GSCSI(res->cfg_entry)) { |
| 243 | scsi_dev->allow_restart = 1; | 243 | scsi_dev->allow_restart = 1; |
| @@ -4213,9 +4213,9 @@ static ssize_t pmcraid_store_log_level( | |||
| 4213 | { | 4213 | { |
| 4214 | struct Scsi_Host *shost; | 4214 | struct Scsi_Host *shost; |
| 4215 | struct pmcraid_instance *pinstance; | 4215 | struct pmcraid_instance *pinstance; |
| 4216 | unsigned long val; | 4216 | u8 val; |
| 4217 | 4217 | ||
| 4218 | if (strict_strtoul(buf, 10, &val)) | 4218 | if (kstrtou8(buf, 10, &val)) |
| 4219 | return -EINVAL; | 4219 | return -EINVAL; |
| 4220 | /* log-level should be from 0 to 2 */ | 4220 | /* log-level should be from 0 to 2 */ |
| 4221 | if (val > 2) | 4221 | if (val > 2) |
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index e6e2a30493e6..ef23fabe3924 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
| @@ -78,7 +78,7 @@ static int ps3rom_slave_configure(struct scsi_device *scsi_dev) | |||
| 78 | struct ps3rom_private *priv = shost_priv(scsi_dev->host); | 78 | struct ps3rom_private *priv = shost_priv(scsi_dev->host); |
| 79 | struct ps3_storage_device *dev = priv->dev; | 79 | struct ps3_storage_device *dev = priv->dev; |
| 80 | 80 | ||
| 81 | dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %u, channel %u\n", __func__, | 81 | dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %llu, channel %u\n", __func__, |
| 82 | __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel); | 82 | __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel); |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index de5d0ae19d83..b64399153135 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -320,8 +320,8 @@ struct srb_iocb { | |||
| 320 | * defined in tsk_mgmt_entry struct | 320 | * defined in tsk_mgmt_entry struct |
| 321 | * for control_flags field in qla_fw.h. | 321 | * for control_flags field in qla_fw.h. |
| 322 | */ | 322 | */ |
| 323 | uint64_t lun; | ||
| 323 | uint32_t flags; | 324 | uint32_t flags; |
| 324 | uint32_t lun; | ||
| 325 | uint32_t data; | 325 | uint32_t data; |
| 326 | struct completion comp; | 326 | struct completion comp; |
| 327 | __le16 comp_status; | 327 | __le16 comp_status; |
| @@ -2529,8 +2529,8 @@ struct isp_operations { | |||
| 2529 | void (*disable_intrs) (struct qla_hw_data *); | 2529 | void (*disable_intrs) (struct qla_hw_data *); |
| 2530 | 2530 | ||
| 2531 | int (*abort_command) (srb_t *); | 2531 | int (*abort_command) (srb_t *); |
| 2532 | int (*target_reset) (struct fc_port *, unsigned int, int); | 2532 | int (*target_reset) (struct fc_port *, uint64_t, int); |
| 2533 | int (*lun_reset) (struct fc_port *, unsigned int, int); | 2533 | int (*lun_reset) (struct fc_port *, uint64_t, int); |
| 2534 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, | 2534 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, |
| 2535 | uint8_t, uint8_t, uint16_t *, uint8_t); | 2535 | uint8_t, uint8_t, uint16_t *, uint8_t); |
| 2536 | int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, | 2536 | int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index d48dea8fab1b..d646540db3ac 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -113,7 +113,7 @@ extern int ql2xenabledif; | |||
| 113 | extern int ql2xenablehba_err_chk; | 113 | extern int ql2xenablehba_err_chk; |
| 114 | extern int ql2xtargetreset; | 114 | extern int ql2xtargetreset; |
| 115 | extern int ql2xdontresethba; | 115 | extern int ql2xdontresethba; |
| 116 | extern unsigned int ql2xmaxlun; | 116 | extern uint64_t ql2xmaxlun; |
| 117 | extern int ql2xmdcapmask; | 117 | extern int ql2xmdcapmask; |
| 118 | extern int ql2xmdenable; | 118 | extern int ql2xmdenable; |
| 119 | 119 | ||
| @@ -212,7 +212,7 @@ extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); | |||
| 212 | extern int qla2x00_start_scsi(srb_t *sp); | 212 | extern int qla2x00_start_scsi(srb_t *sp); |
| 213 | extern int qla24xx_start_scsi(srb_t *sp); | 213 | extern int qla24xx_start_scsi(srb_t *sp); |
| 214 | int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, | 214 | int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, |
| 215 | uint16_t, uint16_t, uint8_t); | 215 | uint16_t, uint64_t, uint8_t); |
| 216 | extern int qla2x00_start_sp(srb_t *); | 216 | extern int qla2x00_start_sp(srb_t *); |
| 217 | extern int qla24xx_dif_start_scsi(srb_t *); | 217 | extern int qla24xx_dif_start_scsi(srb_t *); |
| 218 | extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); | 218 | extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); |
| @@ -262,10 +262,10 @@ extern int | |||
| 262 | qla2x00_abort_command(srb_t *); | 262 | qla2x00_abort_command(srb_t *); |
| 263 | 263 | ||
| 264 | extern int | 264 | extern int |
| 265 | qla2x00_abort_target(struct fc_port *, unsigned int, int); | 265 | qla2x00_abort_target(struct fc_port *, uint64_t, int); |
| 266 | 266 | ||
| 267 | extern int | 267 | extern int |
| 268 | qla2x00_lun_reset(struct fc_port *, unsigned int, int); | 268 | qla2x00_lun_reset(struct fc_port *, uint64_t, int); |
| 269 | 269 | ||
| 270 | extern int | 270 | extern int |
| 271 | qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, | 271 | qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, |
| @@ -339,12 +339,12 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, | |||
| 339 | extern int qla24xx_abort_command(srb_t *); | 339 | extern int qla24xx_abort_command(srb_t *); |
| 340 | extern int qla24xx_async_abort_command(srb_t *); | 340 | extern int qla24xx_async_abort_command(srb_t *); |
| 341 | extern int | 341 | extern int |
| 342 | qla24xx_abort_target(struct fc_port *, unsigned int, int); | 342 | qla24xx_abort_target(struct fc_port *, uint64_t, int); |
| 343 | extern int | 343 | extern int |
| 344 | qla24xx_lun_reset(struct fc_port *, unsigned int, int); | 344 | qla24xx_lun_reset(struct fc_port *, uint64_t, int); |
| 345 | extern int | 345 | extern int |
| 346 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int, | 346 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int, |
| 347 | unsigned int, enum nexus_wait_type); | 347 | uint64_t, enum nexus_wait_type); |
| 348 | extern int | 348 | extern int |
| 349 | qla2x00_system_error(scsi_qla_host_t *); | 349 | qla2x00_system_error(scsi_qla_host_t *); |
| 350 | 350 | ||
| @@ -617,8 +617,8 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *); | |||
| 617 | extern irqreturn_t qlafx00_intr_handler(int, void *); | 617 | extern irqreturn_t qlafx00_intr_handler(int, void *); |
| 618 | extern void qlafx00_enable_intrs(struct qla_hw_data *); | 618 | extern void qlafx00_enable_intrs(struct qla_hw_data *); |
| 619 | extern void qlafx00_disable_intrs(struct qla_hw_data *); | 619 | extern void qlafx00_disable_intrs(struct qla_hw_data *); |
| 620 | extern int qlafx00_abort_target(fc_port_t *, unsigned int, int); | 620 | extern int qlafx00_abort_target(fc_port_t *, uint64_t, int); |
| 621 | extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int); | 621 | extern int qlafx00_lun_reset(fc_port_t *, uint64_t, int); |
| 622 | extern int qlafx00_start_scsi(srb_t *); | 622 | extern int qlafx00_start_scsi(srb_t *); |
| 623 | extern int qlafx00_abort_isp(scsi_qla_host_t *); | 623 | extern int qlafx00_abort_isp(scsi_qla_host_t *); |
| 624 | extern int qlafx00_iospace_config(struct qla_hw_data *); | 624 | extern int qlafx00_iospace_config(struct qla_hw_data *); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e2184412617d..46990f4ceb40 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -1526,8 +1526,8 @@ try_fce: | |||
| 1526 | FCE_SIZE, ha->fce, ha->fce_dma); | 1526 | FCE_SIZE, ha->fce, ha->fce_dma); |
| 1527 | 1527 | ||
| 1528 | /* Allocate memory for Fibre Channel Event Buffer. */ | 1528 | /* Allocate memory for Fibre Channel Event Buffer. */ |
| 1529 | tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, | 1529 | tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, |
| 1530 | GFP_KERNEL); | 1530 | GFP_KERNEL); |
| 1531 | if (!tc) { | 1531 | if (!tc) { |
| 1532 | ql_log(ql_log_warn, vha, 0x00be, | 1532 | ql_log(ql_log_warn, vha, 0x00be, |
| 1533 | "Unable to allocate (%d KB) for FCE.\n", | 1533 | "Unable to allocate (%d KB) for FCE.\n", |
| @@ -1535,7 +1535,6 @@ try_fce: | |||
| 1535 | goto try_eft; | 1535 | goto try_eft; |
| 1536 | } | 1536 | } |
| 1537 | 1537 | ||
| 1538 | memset(tc, 0, FCE_SIZE); | ||
| 1539 | rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, | 1538 | rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, |
| 1540 | ha->fce_mb, &ha->fce_bufs); | 1539 | ha->fce_mb, &ha->fce_bufs); |
| 1541 | if (rval) { | 1540 | if (rval) { |
| @@ -1560,8 +1559,8 @@ try_eft: | |||
| 1560 | EFT_SIZE, ha->eft, ha->eft_dma); | 1559 | EFT_SIZE, ha->eft, ha->eft_dma); |
| 1561 | 1560 | ||
| 1562 | /* Allocate memory for Extended Trace Buffer. */ | 1561 | /* Allocate memory for Extended Trace Buffer. */ |
| 1563 | tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, | 1562 | tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, |
| 1564 | GFP_KERNEL); | 1563 | GFP_KERNEL); |
| 1565 | if (!tc) { | 1564 | if (!tc) { |
| 1566 | ql_log(ql_log_warn, vha, 0x00c1, | 1565 | ql_log(ql_log_warn, vha, 0x00c1, |
| 1567 | "Unable to allocate (%d KB) for EFT.\n", | 1566 | "Unable to allocate (%d KB) for EFT.\n", |
| @@ -1569,7 +1568,6 @@ try_eft: | |||
| 1569 | goto cont_alloc; | 1568 | goto cont_alloc; |
| 1570 | } | 1569 | } |
| 1571 | 1570 | ||
| 1572 | memset(tc, 0, EFT_SIZE); | ||
| 1573 | rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); | 1571 | rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); |
| 1574 | if (rval) { | 1572 | if (rval) { |
| 1575 | ql_log(ql_log_warn, vha, 0x00c2, | 1573 | ql_log(ql_log_warn, vha, 0x00c2, |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 760931529592..150529d98db4 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
| @@ -520,7 +520,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) | |||
| 520 | static int | 520 | static int |
| 521 | __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | 521 | __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
| 522 | struct rsp_que *rsp, uint16_t loop_id, | 522 | struct rsp_que *rsp, uint16_t loop_id, |
| 523 | uint16_t lun, uint8_t type) | 523 | uint64_t lun, uint8_t type) |
| 524 | { | 524 | { |
| 525 | mrk_entry_t *mrk; | 525 | mrk_entry_t *mrk; |
| 526 | struct mrk_entry_24xx *mrk24 = NULL; | 526 | struct mrk_entry_24xx *mrk24 = NULL; |
| @@ -543,14 +543,13 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | |||
| 543 | if (IS_FWI2_CAPABLE(ha)) { | 543 | if (IS_FWI2_CAPABLE(ha)) { |
| 544 | mrk24 = (struct mrk_entry_24xx *) mrk; | 544 | mrk24 = (struct mrk_entry_24xx *) mrk; |
| 545 | mrk24->nport_handle = cpu_to_le16(loop_id); | 545 | mrk24->nport_handle = cpu_to_le16(loop_id); |
| 546 | mrk24->lun[1] = LSB(lun); | 546 | int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); |
| 547 | mrk24->lun[2] = MSB(lun); | ||
| 548 | host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); | 547 | host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); |
| 549 | mrk24->vp_index = vha->vp_idx; | 548 | mrk24->vp_index = vha->vp_idx; |
| 550 | mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); | 549 | mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); |
| 551 | } else { | 550 | } else { |
| 552 | SET_TARGET_ID(ha, mrk->target, loop_id); | 551 | SET_TARGET_ID(ha, mrk->target, loop_id); |
| 553 | mrk->lun = cpu_to_le16(lun); | 552 | mrk->lun = cpu_to_le16((uint16_t)lun); |
| 554 | } | 553 | } |
| 555 | } | 554 | } |
| 556 | wmb(); | 555 | wmb(); |
| @@ -562,7 +561,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | |||
| 562 | 561 | ||
| 563 | int | 562 | int |
| 564 | qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | 563 | qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
| 565 | struct rsp_que *rsp, uint16_t loop_id, uint16_t lun, | 564 | struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, |
| 566 | uint8_t type) | 565 | uint8_t type) |
| 567 | { | 566 | { |
| 568 | int ret; | 567 | int ret; |
| @@ -2047,7 +2046,7 @@ static void | |||
| 2047 | qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) | 2046 | qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) |
| 2048 | { | 2047 | { |
| 2049 | uint32_t flags; | 2048 | uint32_t flags; |
| 2050 | unsigned int lun; | 2049 | uint64_t lun; |
| 2051 | struct fc_port *fcport = sp->fcport; | 2050 | struct fc_port *fcport = sp->fcport; |
| 2052 | scsi_qla_host_t *vha = fcport->vha; | 2051 | scsi_qla_host_t *vha = fcport->vha; |
| 2053 | struct qla_hw_data *ha = vha->hw; | 2052 | struct qla_hw_data *ha = vha->hw; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a56825c73c31..550a4a31f51a 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -1659,7 +1659,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, | |||
| 1659 | 1659 | ||
| 1660 | if (sense_len) { | 1660 | if (sense_len) { |
| 1661 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, | 1661 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, |
| 1662 | "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", | 1662 | "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", |
| 1663 | sp->fcport->vha->host_no, cp->device->id, cp->device->lun, | 1663 | sp->fcport->vha->host_no, cp->device->id, cp->device->lun, |
| 1664 | cp); | 1664 | cp); |
| 1665 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, | 1665 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, |
| @@ -2281,7 +2281,7 @@ check_scsi_status: | |||
| 2281 | out: | 2281 | out: |
| 2282 | if (logit) | 2282 | if (logit) |
| 2283 | ql_dbg(ql_dbg_io, fcport->vha, 0x3022, | 2283 | ql_dbg(ql_dbg_io, fcport->vha, 0x3022, |
| 2284 | "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " | 2284 | "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " |
| 2285 | "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " | 2285 | "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " |
| 2286 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", | 2286 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", |
| 2287 | comp_status, scsi_status, res, vha->host_no, | 2287 | comp_status, scsi_status, res, vha->host_no, |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 1c33a77db5c2..d9aafc003be2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -947,7 +947,7 @@ qla2x00_abort_command(srb_t *sp) | |||
| 947 | } | 947 | } |
| 948 | 948 | ||
| 949 | int | 949 | int |
| 950 | qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) | 950 | qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) |
| 951 | { | 951 | { |
| 952 | int rval, rval2; | 952 | int rval, rval2; |
| 953 | mbx_cmd_t mc; | 953 | mbx_cmd_t mc; |
| @@ -1000,7 +1000,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) | |||
| 1000 | } | 1000 | } |
| 1001 | 1001 | ||
| 1002 | int | 1002 | int |
| 1003 | qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) | 1003 | qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) |
| 1004 | { | 1004 | { |
| 1005 | int rval, rval2; | 1005 | int rval, rval2; |
| 1006 | mbx_cmd_t mc; | 1006 | mbx_cmd_t mc; |
| @@ -1022,7 +1022,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) | |||
| 1022 | mcp->mb[1] = fcport->loop_id; | 1022 | mcp->mb[1] = fcport->loop_id; |
| 1023 | else | 1023 | else |
| 1024 | mcp->mb[1] = fcport->loop_id << 8; | 1024 | mcp->mb[1] = fcport->loop_id << 8; |
| 1025 | mcp->mb[2] = l; | 1025 | mcp->mb[2] = (u32)l; |
| 1026 | mcp->mb[3] = 0; | 1026 | mcp->mb[3] = 0; |
| 1027 | mcp->mb[9] = vha->vp_idx; | 1027 | mcp->mb[9] = vha->vp_idx; |
| 1028 | 1028 | ||
| @@ -2666,7 +2666,7 @@ struct tsk_mgmt_cmd { | |||
| 2666 | 2666 | ||
| 2667 | static int | 2667 | static int |
| 2668 | __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | 2668 | __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, |
| 2669 | unsigned int l, int tag) | 2669 | uint64_t l, int tag) |
| 2670 | { | 2670 | { |
| 2671 | int rval, rval2; | 2671 | int rval, rval2; |
| 2672 | struct tsk_mgmt_cmd *tsk; | 2672 | struct tsk_mgmt_cmd *tsk; |
| @@ -2760,7 +2760,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
| 2760 | } | 2760 | } |
| 2761 | 2761 | ||
| 2762 | int | 2762 | int |
| 2763 | qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) | 2763 | qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) |
| 2764 | { | 2764 | { |
| 2765 | struct qla_hw_data *ha = fcport->vha->hw; | 2765 | struct qla_hw_data *ha = fcport->vha->hw; |
| 2766 | 2766 | ||
| @@ -2771,7 +2771,7 @@ qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) | |||
| 2771 | } | 2771 | } |
| 2772 | 2772 | ||
| 2773 | int | 2773 | int |
| 2774 | qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) | 2774 | qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) |
| 2775 | { | 2775 | { |
| 2776 | struct qla_hw_data *ha = fcport->vha->hw; | 2776 | struct qla_hw_data *ha = fcport->vha->hw; |
| 2777 | 2777 | ||
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index abeb3901498b..4775baa8b6a0 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c | |||
| @@ -726,13 +726,13 @@ qlafx00_disable_intrs(struct qla_hw_data *ha) | |||
| 726 | } | 726 | } |
| 727 | 727 | ||
| 728 | int | 728 | int |
| 729 | qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) | 729 | qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) |
| 730 | { | 730 | { |
| 731 | return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); | 731 | return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); |
| 732 | } | 732 | } |
| 733 | 733 | ||
| 734 | int | 734 | int |
| 735 | qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) | 735 | qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) |
| 736 | { | 736 | { |
| 737 | return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); | 737 | return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); |
| 738 | } | 738 | } |
| @@ -2159,7 +2159,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, | |||
| 2159 | 2159 | ||
| 2160 | if (sense_len) { | 2160 | if (sense_len) { |
| 2161 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, | 2161 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, |
| 2162 | "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", | 2162 | "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", |
| 2163 | sp->fcport->vha->host_no, cp->device->id, cp->device->lun, | 2163 | sp->fcport->vha->host_no, cp->device->id, cp->device->lun, |
| 2164 | cp); | 2164 | cp); |
| 2165 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, | 2165 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, |
| @@ -2524,7 +2524,7 @@ check_scsi_status: | |||
| 2524 | 2524 | ||
| 2525 | if (logit) | 2525 | if (logit) |
| 2526 | ql_dbg(ql_dbg_io, fcport->vha, 0x3058, | 2526 | ql_dbg(ql_dbg_io, fcport->vha, 0x3058, |
| 2527 | "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " | 2527 | "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " |
| 2528 | "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " | 2528 | "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " |
| 2529 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " | 2529 | "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " |
| 2530 | "par_sense_len=0x%x, rsp_info_len=0x%x\n", | 2530 | "par_sense_len=0x%x, rsp_info_len=0x%x\n", |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d96bfb55e57b..be9698d920c2 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -202,8 +202,8 @@ MODULE_PARM_DESC(ql2xdontresethba, | |||
| 202 | " 0 (Default) -- Reset on failure.\n" | 202 | " 0 (Default) -- Reset on failure.\n" |
| 203 | " 1 -- Do not reset on failure.\n"); | 203 | " 1 -- Do not reset on failure.\n"); |
| 204 | 204 | ||
| 205 | uint ql2xmaxlun = MAX_LUNS; | 205 | uint64_t ql2xmaxlun = MAX_LUNS; |
| 206 | module_param(ql2xmaxlun, uint, S_IRUGO); | 206 | module_param(ql2xmaxlun, ullong, S_IRUGO); |
| 207 | MODULE_PARM_DESC(ql2xmaxlun, | 207 | MODULE_PARM_DESC(ql2xmaxlun, |
| 208 | "Defines the maximum LU number to register with the SCSI " | 208 | "Defines the maximum LU number to register with the SCSI " |
| 209 | "midlayer. Default is 65535."); | 209 | "midlayer. Default is 65535."); |
| @@ -920,7 +920,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 920 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 920 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
| 921 | srb_t *sp; | 921 | srb_t *sp; |
| 922 | int ret; | 922 | int ret; |
| 923 | unsigned int id, lun; | 923 | unsigned int id; |
| 924 | uint64_t lun; | ||
| 924 | unsigned long flags; | 925 | unsigned long flags; |
| 925 | int rval, wait = 0; | 926 | int rval, wait = 0; |
| 926 | struct qla_hw_data *ha = vha->hw; | 927 | struct qla_hw_data *ha = vha->hw; |
| @@ -944,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 944 | } | 945 | } |
| 945 | 946 | ||
| 946 | ql_dbg(ql_dbg_taskm, vha, 0x8002, | 947 | ql_dbg(ql_dbg_taskm, vha, 0x8002, |
| 947 | "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n", | 948 | "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n", |
| 948 | vha->host_no, id, lun, sp, cmd); | 949 | vha->host_no, id, lun, sp, cmd); |
| 949 | 950 | ||
| 950 | /* Get a reference to the sp and drop the lock.*/ | 951 | /* Get a reference to the sp and drop the lock.*/ |
| @@ -995,7 +996,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 995 | } | 996 | } |
| 996 | 997 | ||
| 997 | ql_log(ql_log_info, vha, 0x801c, | 998 | ql_log(ql_log_info, vha, 0x801c, |
| 998 | "Abort command issued nexus=%ld:%d:%d -- %d %x.\n", | 999 | "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n", |
| 999 | vha->host_no, id, lun, wait, ret); | 1000 | vha->host_no, id, lun, wait, ret); |
| 1000 | 1001 | ||
| 1001 | return ret; | 1002 | return ret; |
| @@ -1003,7 +1004,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 1003 | 1004 | ||
| 1004 | int | 1005 | int |
| 1005 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | 1006 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, |
| 1006 | unsigned int l, enum nexus_wait_type type) | 1007 | uint64_t l, enum nexus_wait_type type) |
| 1007 | { | 1008 | { |
| 1008 | int cnt, match, status; | 1009 | int cnt, match, status; |
| 1009 | unsigned long flags; | 1010 | unsigned long flags; |
| @@ -1060,7 +1061,7 @@ static char *reset_errors[] = { | |||
| 1060 | 1061 | ||
| 1061 | static int | 1062 | static int |
| 1062 | __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | 1063 | __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, |
| 1063 | struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int)) | 1064 | struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int)) |
| 1064 | { | 1065 | { |
| 1065 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 1066 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
| 1066 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 1067 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
| @@ -1075,7 +1076,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
| 1075 | return err; | 1076 | return err; |
| 1076 | 1077 | ||
| 1077 | ql_log(ql_log_info, vha, 0x8009, | 1078 | ql_log(ql_log_info, vha, 0x8009, |
| 1078 | "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no, | 1079 | "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, |
| 1079 | cmd->device->id, cmd->device->lun, cmd); | 1080 | cmd->device->id, cmd->device->lun, cmd); |
| 1080 | 1081 | ||
| 1081 | err = 0; | 1082 | err = 0; |
| @@ -1100,14 +1101,14 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, | |||
| 1100 | } | 1101 | } |
| 1101 | 1102 | ||
| 1102 | ql_log(ql_log_info, vha, 0x800e, | 1103 | ql_log(ql_log_info, vha, 0x800e, |
| 1103 | "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name, | 1104 | "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, |
| 1104 | vha->host_no, cmd->device->id, cmd->device->lun, cmd); | 1105 | vha->host_no, cmd->device->id, cmd->device->lun, cmd); |
| 1105 | 1106 | ||
| 1106 | return SUCCESS; | 1107 | return SUCCESS; |
| 1107 | 1108 | ||
| 1108 | eh_reset_failed: | 1109 | eh_reset_failed: |
| 1109 | ql_log(ql_log_info, vha, 0x800f, | 1110 | ql_log(ql_log_info, vha, 0x800f, |
| 1110 | "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name, | 1111 | "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, |
| 1111 | reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, | 1112 | reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, |
| 1112 | cmd); | 1113 | cmd); |
| 1113 | return FAILED; | 1114 | return FAILED; |
| @@ -1154,7 +1155,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
| 1154 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 1155 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
| 1155 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; | 1156 | fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; |
| 1156 | int ret = FAILED; | 1157 | int ret = FAILED; |
| 1157 | unsigned int id, lun; | 1158 | unsigned int id; |
| 1159 | uint64_t lun; | ||
| 1158 | 1160 | ||
| 1159 | id = cmd->device->id; | 1161 | id = cmd->device->id; |
| 1160 | lun = cmd->device->lun; | 1162 | lun = cmd->device->lun; |
| @@ -1169,7 +1171,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
| 1169 | ret = FAILED; | 1171 | ret = FAILED; |
| 1170 | 1172 | ||
| 1171 | ql_log(ql_log_info, vha, 0x8012, | 1173 | ql_log(ql_log_info, vha, 0x8012, |
| 1172 | "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); | 1174 | "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); |
| 1173 | 1175 | ||
| 1174 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { | 1176 | if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { |
| 1175 | ql_log(ql_log_fatal, vha, 0x8013, | 1177 | ql_log(ql_log_fatal, vha, 0x8013, |
| @@ -1193,7 +1195,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) | |||
| 1193 | 1195 | ||
| 1194 | eh_bus_reset_done: | 1196 | eh_bus_reset_done: |
| 1195 | ql_log(ql_log_warn, vha, 0x802b, | 1197 | ql_log(ql_log_warn, vha, 0x802b, |
| 1196 | "BUS RESET %s nexus=%ld:%d:%d.\n", | 1198 | "BUS RESET %s nexus=%ld:%d:%llu.\n", |
| 1197 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); | 1199 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); |
| 1198 | 1200 | ||
| 1199 | return ret; | 1201 | return ret; |
| @@ -1220,14 +1222,15 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
| 1220 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 1222 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
| 1221 | struct qla_hw_data *ha = vha->hw; | 1223 | struct qla_hw_data *ha = vha->hw; |
| 1222 | int ret = FAILED; | 1224 | int ret = FAILED; |
| 1223 | unsigned int id, lun; | 1225 | unsigned int id; |
| 1226 | uint64_t lun; | ||
| 1224 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 1227 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
| 1225 | 1228 | ||
| 1226 | id = cmd->device->id; | 1229 | id = cmd->device->id; |
| 1227 | lun = cmd->device->lun; | 1230 | lun = cmd->device->lun; |
| 1228 | 1231 | ||
| 1229 | ql_log(ql_log_info, vha, 0x8018, | 1232 | ql_log(ql_log_info, vha, 0x8018, |
| 1230 | "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); | 1233 | "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); |
| 1231 | 1234 | ||
| 1232 | /* | 1235 | /* |
| 1233 | * No point in issuing another reset if one is active. Also do not | 1236 | * No point in issuing another reset if one is active. Also do not |
| @@ -1273,7 +1276,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
| 1273 | 1276 | ||
| 1274 | eh_host_reset_lock: | 1277 | eh_host_reset_lock: |
| 1275 | ql_log(ql_log_info, vha, 0x8017, | 1278 | ql_log(ql_log_info, vha, 0x8017, |
| 1276 | "ADAPTER RESET %s nexus=%ld:%d:%d.\n", | 1279 | "ADAPTER RESET %s nexus=%ld:%d:%llu.\n", |
| 1277 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); | 1280 | (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun); |
| 1278 | 1281 | ||
| 1279 | return ret; | 1282 | return ret; |
| @@ -1409,7 +1412,7 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) | |||
| 1409 | return; | 1412 | return; |
| 1410 | 1413 | ||
| 1411 | ql_dbg(ql_dbg_io, fcport->vha, 0x3029, | 1414 | ql_dbg(ql_dbg_io, fcport->vha, 0x3029, |
| 1412 | "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n", | 1415 | "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n", |
| 1413 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); | 1416 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); |
| 1414 | } | 1417 | } |
| 1415 | 1418 | ||
| @@ -1432,7 +1435,7 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) | |||
| 1432 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); | 1435 | scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); |
| 1433 | 1436 | ||
| 1434 | ql_dbg(ql_dbg_io, vha, 0x302a, | 1437 | ql_dbg(ql_dbg_io, vha, 0x302a, |
| 1435 | "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n", | 1438 | "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n", |
| 1436 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); | 1439 | sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); |
| 1437 | } | 1440 | } |
| 1438 | 1441 | ||
| @@ -2661,14 +2664,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2661 | else | 2664 | else |
| 2662 | host->max_cmd_len = MAX_CMDSZ; | 2665 | host->max_cmd_len = MAX_CMDSZ; |
| 2663 | host->max_channel = MAX_BUSES - 1; | 2666 | host->max_channel = MAX_BUSES - 1; |
| 2664 | host->max_lun = ql2xmaxlun; | 2667 | /* Older HBAs support only 16-bit LUNs */ |
| 2668 | if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) && | ||
| 2669 | ql2xmaxlun > 0xffff) | ||
| 2670 | host->max_lun = 0xffff; | ||
| 2671 | else | ||
| 2672 | host->max_lun = ql2xmaxlun; | ||
| 2665 | host->transportt = qla2xxx_transport_template; | 2673 | host->transportt = qla2xxx_transport_template; |
| 2666 | sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); | 2674 | sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); |
| 2667 | 2675 | ||
| 2668 | ql_dbg(ql_dbg_init, base_vha, 0x0033, | 2676 | ql_dbg(ql_dbg_init, base_vha, 0x0033, |
| 2669 | "max_id=%d this_id=%d " | 2677 | "max_id=%d this_id=%d " |
| 2670 | "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " | 2678 | "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " |
| 2671 | "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id, | 2679 | "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, |
| 2672 | host->this_id, host->cmd_per_lun, host->unique_id, | 2680 | host->this_id, host->cmd_per_lun, host->unique_id, |
| 2673 | host->max_cmd_len, host->max_channel, host->max_lun, | 2681 | host->max_cmd_len, host->max_channel, host->max_lun, |
| 2674 | host->transportt, sht->vendor_id); | 2682 | host->transportt, sht->vendor_id); |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 5f58b451327e..2559144f5475 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
| @@ -23,7 +23,7 @@ void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen); | |||
| 23 | int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha); | 23 | int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha); |
| 24 | int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); | 24 | int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); |
| 25 | int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, | 25 | int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, |
| 26 | int lun); | 26 | uint64_t lun); |
| 27 | int qla4xxx_reset_target(struct scsi_qla_host *ha, | 27 | int qla4xxx_reset_target(struct scsi_qla_host *ha, |
| 28 | struct ddb_entry *ddb_entry); | 28 | struct ddb_entry *ddb_entry); |
| 29 | int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, | 29 | int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, |
| @@ -76,7 +76,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, | |||
| 76 | uint32_t state, uint32_t conn_error); | 76 | uint32_t state, uint32_t conn_error); |
| 77 | void qla4xxx_dump_buffer(void *b, uint32_t size); | 77 | void qla4xxx_dump_buffer(void *b, uint32_t size); |
| 78 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, | 78 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, |
| 79 | struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); | 79 | struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod); |
| 80 | int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, | 80 | int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, |
| 81 | uint32_t offset, uint32_t length, uint32_t options); | 81 | uint32_t offset, uint32_t length, uint32_t options); |
| 82 | int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | 82 | int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 6f12f859b11d..4180d6d9fe78 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
| @@ -334,6 +334,12 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) | |||
| 334 | /* Allocate memory for saving the template */ | 334 | /* Allocate memory for saving the template */ |
| 335 | md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, | 335 | md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, |
| 336 | &md_tmp_dma, GFP_KERNEL); | 336 | &md_tmp_dma, GFP_KERNEL); |
| 337 | if (!md_tmp) { | ||
| 338 | ql4_printk(KERN_INFO, ha, | ||
| 339 | "scsi%ld: Failed to allocate DMA memory\n", | ||
| 340 | ha->host_no); | ||
| 341 | return; | ||
| 342 | } | ||
| 337 | 343 | ||
| 338 | /* Request template */ | 344 | /* Request template */ |
| 339 | status = qla4xxx_get_minidump_template(ha, md_tmp_dma); | 345 | status = qla4xxx_get_minidump_template(ha, md_tmp_dma); |
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index e5697ab144d2..08ab6dac226d 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c | |||
| @@ -83,7 +83,7 @@ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, | |||
| 83 | * This routine issues a marker IOCB. | 83 | * This routine issues a marker IOCB. |
| 84 | **/ | 84 | **/ |
| 85 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, | 85 | int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, |
| 86 | struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod) | 86 | struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod) |
| 87 | { | 87 | { |
| 88 | struct qla4_marker_entry *marker_entry; | 88 | struct qla4_marker_entry *marker_entry; |
| 89 | unsigned long flags = 0; | 89 | unsigned long flags = 0; |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 081b6b78d2c6..4f9c0f2be89d 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
| @@ -26,7 +26,7 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha, | |||
| 26 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 26 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
| 27 | sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); | 27 | sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); |
| 28 | if (sense_len == 0) { | 28 | if (sense_len == 0) { |
| 29 | DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:" | 29 | DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:" |
| 30 | " sense len 0\n", ha->host_no, | 30 | " sense len 0\n", ha->host_no, |
| 31 | cmd->device->channel, cmd->device->id, | 31 | cmd->device->channel, cmd->device->id, |
| 32 | cmd->device->lun, __func__)); | 32 | cmd->device->lun, __func__)); |
| @@ -43,7 +43,7 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha, | |||
| 43 | sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); | 43 | sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); |
| 44 | memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); | 44 | memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); |
| 45 | 45 | ||
| 46 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, " | 46 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, " |
| 47 | "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, | 47 | "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, |
| 48 | cmd->device->channel, cmd->device->id, | 48 | cmd->device->channel, cmd->device->id, |
| 49 | cmd->device->lun, __func__, | 49 | cmd->device->lun, __func__, |
| @@ -169,7 +169,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 169 | 169 | ||
| 170 | cmd->result = DID_ERROR << 16; | 170 | cmd->result = DID_ERROR << 16; |
| 171 | 171 | ||
| 172 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " | 172 | DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " |
| 173 | "Mid-layer Data underrun0, " | 173 | "Mid-layer Data underrun0, " |
| 174 | "xferlen = 0x%x, " | 174 | "xferlen = 0x%x, " |
| 175 | "residual = 0x%x\n", ha->host_no, | 175 | "residual = 0x%x\n", ha->host_no, |
| @@ -197,7 +197,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 197 | break; | 197 | break; |
| 198 | 198 | ||
| 199 | case SCS_RESET_OCCURRED: | 199 | case SCS_RESET_OCCURRED: |
| 200 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n", | 200 | DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n", |
| 201 | ha->host_no, cmd->device->channel, | 201 | ha->host_no, cmd->device->channel, |
| 202 | cmd->device->id, cmd->device->lun, __func__)); | 202 | cmd->device->id, cmd->device->lun, __func__)); |
| 203 | 203 | ||
| @@ -205,7 +205,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 205 | break; | 205 | break; |
| 206 | 206 | ||
| 207 | case SCS_ABORTED: | 207 | case SCS_ABORTED: |
| 208 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n", | 208 | DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n", |
| 209 | ha->host_no, cmd->device->channel, | 209 | ha->host_no, cmd->device->channel, |
| 210 | cmd->device->id, cmd->device->lun, __func__)); | 210 | cmd->device->id, cmd->device->lun, __func__)); |
| 211 | 211 | ||
| @@ -213,7 +213,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 213 | break; | 213 | break; |
| 214 | 214 | ||
| 215 | case SCS_TIMEOUT: | 215 | case SCS_TIMEOUT: |
| 216 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n", | 216 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n", |
| 217 | ha->host_no, cmd->device->channel, | 217 | ha->host_no, cmd->device->channel, |
| 218 | cmd->device->id, cmd->device->lun)); | 218 | cmd->device->id, cmd->device->lun)); |
| 219 | 219 | ||
| @@ -232,7 +232,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 232 | case SCS_DATA_OVERRUN: | 232 | case SCS_DATA_OVERRUN: |
| 233 | if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || | 233 | if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || |
| 234 | (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { | 234 | (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { |
| 235 | DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n", | 235 | DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n", |
| 236 | ha->host_no, | 236 | ha->host_no, |
| 237 | cmd->device->channel, cmd->device->id, | 237 | cmd->device->channel, cmd->device->id, |
| 238 | cmd->device->lun, __func__)); | 238 | cmd->device->lun, __func__)); |
| @@ -259,7 +259,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 259 | if (!scsi_status && (scsi_bufflen(cmd) - residual) < | 259 | if (!scsi_status && (scsi_bufflen(cmd) - residual) < |
| 260 | cmd->underflow) { | 260 | cmd->underflow) { |
| 261 | DEBUG2(ql4_printk(KERN_INFO, ha, | 261 | DEBUG2(ql4_printk(KERN_INFO, ha, |
| 262 | "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n", | 262 | "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n", |
| 263 | ha->host_no, | 263 | ha->host_no, |
| 264 | cmd->device->channel, | 264 | cmd->device->channel, |
| 265 | cmd->device->id, | 265 | cmd->device->id, |
| @@ -291,7 +291,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
| 291 | */ | 291 | */ |
| 292 | 292 | ||
| 293 | DEBUG2(ql4_printk(KERN_INFO, ha, | 293 | DEBUG2(ql4_printk(KERN_INFO, ha, |
| 294 | "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n", | 294 | "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n", |
| 295 | ha->host_no, | 295 | ha->host_no, |
| 296 | cmd->device->channel, | 296 | cmd->device->channel, |
| 297 | cmd->device->id, | 297 | cmd->device->id, |
| @@ -313,7 +313,7 @@ check_scsi_status: | |||
| 313 | 313 | ||
| 314 | case SCS_DEVICE_LOGGED_OUT: | 314 | case SCS_DEVICE_LOGGED_OUT: |
| 315 | case SCS_DEVICE_UNAVAILABLE: | 315 | case SCS_DEVICE_UNAVAILABLE: |
| 316 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE " | 316 | DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE " |
| 317 | "state: 0x%x\n", ha->host_no, | 317 | "state: 0x%x\n", ha->host_no, |
| 318 | cmd->device->channel, cmd->device->id, | 318 | cmd->device->channel, cmd->device->id, |
| 319 | cmd->device->lun, sts_entry->completionStatus)); | 319 | cmd->device->lun, sts_entry->completionStatus)); |
| @@ -333,7 +333,7 @@ check_scsi_status: | |||
| 333 | * SCSI Mid-Layer handles device queue full | 333 | * SCSI Mid-Layer handles device queue full |
| 334 | */ | 334 | */ |
| 335 | cmd->result = DID_OK << 16 | sts_entry->scsiStatus; | 335 | cmd->result = DID_OK << 16 | sts_entry->scsiStatus; |
| 336 | DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected " | 336 | DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected " |
| 337 | "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," | 337 | "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," |
| 338 | " iResp=%02x\n", ha->host_no, cmd->device->id, | 338 | " iResp=%02x\n", ha->host_no, cmd->device->id, |
| 339 | cmd->device->lun, __func__, | 339 | cmd->device->lun, __func__, |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 0a3312c6dd6d..c291fdff1b33 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
| @@ -1205,7 +1205,7 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) | |||
| 1205 | if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) { | 1205 | if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) { |
| 1206 | status = QLA_ERROR; | 1206 | status = QLA_ERROR; |
| 1207 | 1207 | ||
| 1208 | DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: " | 1208 | DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: " |
| 1209 | "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", | 1209 | "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", |
| 1210 | ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0], | 1210 | ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0], |
| 1211 | mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); | 1211 | mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4])); |
| @@ -1225,14 +1225,14 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) | |||
| 1225 | * are valid before calling this routine. | 1225 | * are valid before calling this routine. |
| 1226 | **/ | 1226 | **/ |
| 1227 | int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, | 1227 | int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, |
| 1228 | int lun) | 1228 | uint64_t lun) |
| 1229 | { | 1229 | { |
| 1230 | uint32_t mbox_cmd[MBOX_REG_COUNT]; | 1230 | uint32_t mbox_cmd[MBOX_REG_COUNT]; |
| 1231 | uint32_t mbox_sts[MBOX_REG_COUNT]; | 1231 | uint32_t mbox_sts[MBOX_REG_COUNT]; |
| 1232 | uint32_t scsi_lun[2]; | 1232 | uint32_t scsi_lun[2]; |
| 1233 | int status = QLA_SUCCESS; | 1233 | int status = QLA_SUCCESS; |
| 1234 | 1234 | ||
| 1235 | DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no, | 1235 | DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no, |
| 1236 | ddb_entry->fw_ddb_index, lun)); | 1236 | ddb_entry->fw_ddb_index, lun)); |
| 1237 | 1237 | ||
| 1238 | /* | 1238 | /* |
| @@ -1620,8 +1620,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, | |||
| 1620 | goto exit_get_chap; | 1620 | goto exit_get_chap; |
| 1621 | } | 1621 | } |
| 1622 | 1622 | ||
| 1623 | strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); | 1623 | strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); |
| 1624 | strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); | 1624 | strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); |
| 1625 | chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); | 1625 | chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); |
| 1626 | 1626 | ||
| 1627 | exit_get_chap: | 1627 | exit_get_chap: |
| @@ -1663,8 +1663,8 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, | |||
| 1663 | else | 1663 | else |
| 1664 | chap_table->flags |= BIT_7; /* local */ | 1664 | chap_table->flags |= BIT_7; /* local */ |
| 1665 | chap_table->secret_len = strlen(password); | 1665 | chap_table->secret_len = strlen(password); |
| 1666 | strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); | 1666 | strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); |
| 1667 | strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); | 1667 | strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); |
| 1668 | chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); | 1668 | chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); |
| 1669 | 1669 | ||
| 1670 | if (is_qla40XX(ha)) { | 1670 | if (is_qla40XX(ha)) { |
| @@ -1742,8 +1742,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, | |||
| 1742 | goto exit_unlock_uni_chap; | 1742 | goto exit_unlock_uni_chap; |
| 1743 | } | 1743 | } |
| 1744 | 1744 | ||
| 1745 | strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); | 1745 | strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); |
| 1746 | strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN); | 1746 | strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN); |
| 1747 | 1747 | ||
| 1748 | rval = QLA_SUCCESS; | 1748 | rval = QLA_SUCCESS; |
| 1749 | 1749 | ||
| @@ -2295,7 +2295,7 @@ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param) | |||
| 2295 | if (param == SET_DRVR_VERSION) { | 2295 | if (param == SET_DRVR_VERSION) { |
| 2296 | mbox_cmd[1] = SET_DRVR_VERSION; | 2296 | mbox_cmd[1] = SET_DRVR_VERSION; |
| 2297 | strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, | 2297 | strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, |
| 2298 | MAX_DRVR_VER_LEN); | 2298 | MAX_DRVR_VER_LEN - 1); |
| 2299 | } else { | 2299 | } else { |
| 2300 | ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", | 2300 | ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", |
| 2301 | __func__, param); | 2301 | __func__, param); |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 9dbdb4be2d8f..7c3365864242 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
| @@ -4221,7 +4221,7 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha) | |||
| 4221 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) | 4221 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) |
| 4222 | entries[i].entry = qla4_8xxx_msix_entries[i].entry; | 4222 | entries[i].entry = qla4_8xxx_msix_entries[i].entry; |
| 4223 | 4223 | ||
| 4224 | ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); | 4224 | ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries)); |
| 4225 | if (ret) { | 4225 | if (ret) { |
| 4226 | ql4_printk(KERN_WARNING, ha, | 4226 | ql4_printk(KERN_WARNING, ha, |
| 4227 | "MSI-X: Failed to enable support -- %d/%d\n", | 4227 | "MSI-X: Failed to enable support -- %d/%d\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 320206376206..199fcf79a051 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
| @@ -756,9 +756,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, | |||
| 756 | continue; | 756 | continue; |
| 757 | 757 | ||
| 758 | chap_rec->chap_tbl_idx = i; | 758 | chap_rec->chap_tbl_idx = i; |
| 759 | strncpy(chap_rec->username, chap_table->name, | 759 | strlcpy(chap_rec->username, chap_table->name, |
| 760 | ISCSI_CHAP_AUTH_NAME_MAX_LEN); | 760 | ISCSI_CHAP_AUTH_NAME_MAX_LEN); |
| 761 | strncpy(chap_rec->password, chap_table->secret, | 761 | strlcpy(chap_rec->password, chap_table->secret, |
| 762 | QL4_CHAP_MAX_SECRET_LEN); | 762 | QL4_CHAP_MAX_SECRET_LEN); |
| 763 | chap_rec->password_length = chap_table->secret_len; | 763 | chap_rec->password_length = chap_table->secret_len; |
| 764 | 764 | ||
| @@ -1050,6 +1050,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) | |||
| 1050 | if (!ql_iscsi_stats) { | 1050 | if (!ql_iscsi_stats) { |
| 1051 | ql4_printk(KERN_ERR, ha, | 1051 | ql4_printk(KERN_ERR, ha, |
| 1052 | "Unable to allocate memory for iscsi stats\n"); | 1052 | "Unable to allocate memory for iscsi stats\n"); |
| 1053 | ret = -ENOMEM; | ||
| 1053 | goto exit_host_stats; | 1054 | goto exit_host_stats; |
| 1054 | } | 1055 | } |
| 1055 | 1056 | ||
| @@ -1058,6 +1059,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) | |||
| 1058 | if (ret != QLA_SUCCESS) { | 1059 | if (ret != QLA_SUCCESS) { |
| 1059 | ql4_printk(KERN_ERR, ha, | 1060 | ql4_printk(KERN_ERR, ha, |
| 1060 | "Unable to retrieve iscsi stats\n"); | 1061 | "Unable to retrieve iscsi stats\n"); |
| 1062 | ret = -EIO; | ||
| 1061 | goto exit_host_stats; | 1063 | goto exit_host_stats; |
| 1062 | } | 1064 | } |
| 1063 | host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); | 1065 | host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); |
| @@ -6027,8 +6029,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, | |||
| 6027 | if (!(chap_table->flags & BIT_6)) /* Not BIDI */ | 6029 | if (!(chap_table->flags & BIT_6)) /* Not BIDI */ |
| 6028 | continue; | 6030 | continue; |
| 6029 | 6031 | ||
| 6030 | strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); | 6032 | strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); |
| 6031 | strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); | 6033 | strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); |
| 6032 | ret = 0; | 6034 | ret = 0; |
| 6033 | break; | 6035 | break; |
| 6034 | } | 6036 | } |
| @@ -6258,8 +6260,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, | |||
| 6258 | 6260 | ||
| 6259 | tddb->tpgt = sess->tpgt; | 6261 | tddb->tpgt = sess->tpgt; |
| 6260 | tddb->port = conn->persistent_port; | 6262 | tddb->port = conn->persistent_port; |
| 6261 | strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); | 6263 | strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); |
| 6262 | strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); | 6264 | strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); |
| 6263 | } | 6265 | } |
| 6264 | 6266 | ||
| 6265 | static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, | 6267 | static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, |
| @@ -7764,7 +7766,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, | |||
| 7764 | goto exit_ddb_logout; | 7766 | goto exit_ddb_logout; |
| 7765 | } | 7767 | } |
| 7766 | 7768 | ||
| 7767 | strncpy(flash_tddb->iscsi_name, fnode_sess->targetname, | 7769 | strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, |
| 7768 | ISCSI_NAME_SIZE); | 7770 | ISCSI_NAME_SIZE); |
| 7769 | 7771 | ||
| 7770 | if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) | 7772 | if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) |
| @@ -9223,20 +9225,20 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 9223 | { | 9225 | { |
| 9224 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 9226 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
| 9225 | unsigned int id = cmd->device->id; | 9227 | unsigned int id = cmd->device->id; |
| 9226 | unsigned int lun = cmd->device->lun; | 9228 | uint64_t lun = cmd->device->lun; |
| 9227 | unsigned long flags; | 9229 | unsigned long flags; |
| 9228 | struct srb *srb = NULL; | 9230 | struct srb *srb = NULL; |
| 9229 | int ret = SUCCESS; | 9231 | int ret = SUCCESS; |
| 9230 | int wait = 0; | 9232 | int wait = 0; |
| 9231 | 9233 | ||
| 9232 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n", | 9234 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", |
| 9233 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); | 9235 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); |
| 9234 | 9236 | ||
| 9235 | spin_lock_irqsave(&ha->hardware_lock, flags); | 9237 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 9236 | srb = (struct srb *) CMD_SP(cmd); | 9238 | srb = (struct srb *) CMD_SP(cmd); |
| 9237 | if (!srb) { | 9239 | if (!srb) { |
| 9238 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 9240 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 9239 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n", | 9241 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n", |
| 9240 | ha->host_no, id, lun); | 9242 | ha->host_no, id, lun); |
| 9241 | return SUCCESS; | 9243 | return SUCCESS; |
| 9242 | } | 9244 | } |
| @@ -9244,11 +9246,11 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 9244 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 9246 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 9245 | 9247 | ||
| 9246 | if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { | 9248 | if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { |
| 9247 | DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", | 9249 | DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n", |
| 9248 | ha->host_no, id, lun)); | 9250 | ha->host_no, id, lun)); |
| 9249 | ret = FAILED; | 9251 | ret = FAILED; |
| 9250 | } else { | 9252 | } else { |
| 9251 | DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n", | 9253 | DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n", |
| 9252 | ha->host_no, id, lun)); | 9254 | ha->host_no, id, lun)); |
| 9253 | wait = 1; | 9255 | wait = 1; |
| 9254 | } | 9256 | } |
| @@ -9258,14 +9260,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) | |||
| 9258 | /* Wait for command to complete */ | 9260 | /* Wait for command to complete */ |
| 9259 | if (wait) { | 9261 | if (wait) { |
| 9260 | if (!qla4xxx_eh_wait_on_command(ha, cmd)) { | 9262 | if (!qla4xxx_eh_wait_on_command(ha, cmd)) { |
| 9261 | DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n", | 9263 | DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n", |
| 9262 | ha->host_no, id, lun)); | 9264 | ha->host_no, id, lun)); |
| 9263 | ret = FAILED; | 9265 | ret = FAILED; |
| 9264 | } | 9266 | } |
| 9265 | } | 9267 | } |
| 9266 | 9268 | ||
| 9267 | ql4_printk(KERN_INFO, ha, | 9269 | ql4_printk(KERN_INFO, ha, |
| 9268 | "scsi%ld:%d:%d: Abort command - %s\n", | 9270 | "scsi%ld:%d:%llu: Abort command - %s\n", |
| 9269 | ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); | 9271 | ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed"); |
| 9270 | 9272 | ||
| 9271 | return ret; | 9273 | return ret; |
| @@ -9293,7 +9295,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
| 9293 | ret = FAILED; | 9295 | ret = FAILED; |
| 9294 | 9296 | ||
| 9295 | ql4_printk(KERN_INFO, ha, | 9297 | ql4_printk(KERN_INFO, ha, |
| 9296 | "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, | 9298 | "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, |
| 9297 | cmd->device->channel, cmd->device->id, cmd->device->lun); | 9299 | cmd->device->channel, cmd->device->id, cmd->device->lun); |
| 9298 | 9300 | ||
| 9299 | DEBUG2(printk(KERN_INFO | 9301 | DEBUG2(printk(KERN_INFO |
| @@ -9323,7 +9325,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
| 9323 | goto eh_dev_reset_done; | 9325 | goto eh_dev_reset_done; |
| 9324 | 9326 | ||
| 9325 | ql4_printk(KERN_INFO, ha, | 9327 | ql4_printk(KERN_INFO, ha, |
| 9326 | "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n", | 9328 | "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", |
| 9327 | ha->host_no, cmd->device->channel, cmd->device->id, | 9329 | ha->host_no, cmd->device->channel, cmd->device->id, |
| 9328 | cmd->device->lun); | 9330 | cmd->device->lun); |
| 9329 | 9331 | ||
| @@ -9440,7 +9442,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
| 9440 | } | 9442 | } |
| 9441 | 9443 | ||
| 9442 | ql4_printk(KERN_INFO, ha, | 9444 | ql4_printk(KERN_INFO, ha, |
| 9443 | "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no, | 9445 | "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, |
| 9444 | cmd->device->channel, cmd->device->id, cmd->device->lun); | 9446 | cmd->device->channel, cmd->device->id, cmd->device->lun); |
| 9445 | 9447 | ||
| 9446 | if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { | 9448 | if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { |
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c index 13d628b56ff7..a22bb1b40ce2 100644 --- a/drivers/scsi/qlogicfas.c +++ b/drivers/scsi/qlogicfas.c | |||
| @@ -171,8 +171,6 @@ static int qlogicfas_release(struct Scsi_Host *shost) | |||
| 171 | qlogicfas408_disable_ints(priv); | 171 | qlogicfas408_disable_ints(priv); |
| 172 | free_irq(shost->irq, shost); | 172 | free_irq(shost->irq, shost); |
| 173 | } | 173 | } |
| 174 | if (shost->dma_channel != 0xff) | ||
| 175 | free_dma(shost->dma_channel); | ||
| 176 | if (shost->io_port && shost->n_io_port) | 174 | if (shost->io_port && shost->n_io_port) |
| 177 | release_region(shost->io_port, shost->n_io_port); | 175 | release_region(shost->io_port, shost->n_io_port); |
| 178 | scsi_host_put(shost); | 176 | scsi_host_put(shost); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 6d48d30bed05..740ae495aa77 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
| @@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int | |||
| 959 | /* Temporary workaround until bug is found and fixed (one bug has been found | 959 | /* Temporary workaround until bug is found and fixed (one bug has been found |
| 960 | already, but fixing it makes things even worse) -jj */ | 960 | already, but fixing it makes things even worse) -jj */ |
| 961 | int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; | 961 | int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; |
| 962 | host->can_queue = host->host_busy + num_free; | 962 | host->can_queue = atomic_read(&host->host_busy) + num_free; |
| 963 | host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); | 963 | host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); |
| 964 | } | 964 | } |
| 965 | 965 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 88d46fe6bf98..d81f3cc43ff1 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -72,8 +72,6 @@ | |||
| 72 | #define CREATE_TRACE_POINTS | 72 | #define CREATE_TRACE_POINTS |
| 73 | #include <trace/events/scsi.h> | 73 | #include <trace/events/scsi.h> |
| 74 | 74 | ||
| 75 | static void scsi_done(struct scsi_cmnd *cmd); | ||
| 76 | |||
| 77 | /* | 75 | /* |
| 78 | * Definitions and constants. | 76 | * Definitions and constants. |
| 79 | */ | 77 | */ |
| @@ -124,6 +122,8 @@ static const char *const scsi_device_types[] = { | |||
| 124 | "Bridge controller", | 122 | "Bridge controller", |
| 125 | "Object storage ", | 123 | "Object storage ", |
| 126 | "Automation/Drive ", | 124 | "Automation/Drive ", |
| 125 | "Security Manager ", | ||
| 126 | "Direct-Access-ZBC", | ||
| 127 | }; | 127 | }; |
| 128 | 128 | ||
| 129 | /** | 129 | /** |
| @@ -235,7 +235,8 @@ fail: | |||
| 235 | * Description: allocate a struct scsi_cmd from host's slab, recycling from the | 235 | * Description: allocate a struct scsi_cmd from host's slab, recycling from the |
| 236 | * host's free_list if necessary. | 236 | * host's free_list if necessary. |
| 237 | */ | 237 | */ |
| 238 | struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) | 238 | static struct scsi_cmnd * |
| 239 | __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) | ||
| 239 | { | 240 | { |
| 240 | struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); | 241 | struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); |
| 241 | 242 | ||
| @@ -265,7 +266,6 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) | |||
| 265 | 266 | ||
| 266 | return cmd; | 267 | return cmd; |
| 267 | } | 268 | } |
| 268 | EXPORT_SYMBOL_GPL(__scsi_get_command); | ||
| 269 | 269 | ||
| 270 | /** | 270 | /** |
| 271 | * scsi_get_command - Allocate and setup a scsi command block | 271 | * scsi_get_command - Allocate and setup a scsi command block |
| @@ -291,14 +291,13 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) | |||
| 291 | cmd->jiffies_at_alloc = jiffies; | 291 | cmd->jiffies_at_alloc = jiffies; |
| 292 | return cmd; | 292 | return cmd; |
| 293 | } | 293 | } |
| 294 | EXPORT_SYMBOL(scsi_get_command); | ||
| 295 | 294 | ||
| 296 | /** | 295 | /** |
| 297 | * __scsi_put_command - Free a struct scsi_cmnd | 296 | * __scsi_put_command - Free a struct scsi_cmnd |
| 298 | * @shost: dev->host | 297 | * @shost: dev->host |
| 299 | * @cmd: Command to free | 298 | * @cmd: Command to free |
| 300 | */ | 299 | */ |
| 301 | void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | 300 | static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
| 302 | { | 301 | { |
| 303 | unsigned long flags; | 302 | unsigned long flags; |
| 304 | 303 | ||
| @@ -314,7 +313,6 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | |||
| 314 | if (likely(cmd != NULL)) | 313 | if (likely(cmd != NULL)) |
| 315 | scsi_host_free_command(shost, cmd); | 314 | scsi_host_free_command(shost, cmd); |
| 316 | } | 315 | } |
| 317 | EXPORT_SYMBOL(__scsi_put_command); | ||
| 318 | 316 | ||
| 319 | /** | 317 | /** |
| 320 | * scsi_put_command - Free a scsi command block | 318 | * scsi_put_command - Free a scsi command block |
| @@ -334,11 +332,10 @@ void scsi_put_command(struct scsi_cmnd *cmd) | |||
| 334 | list_del_init(&cmd->list); | 332 | list_del_init(&cmd->list); |
| 335 | spin_unlock_irqrestore(&cmd->device->list_lock, flags); | 333 | spin_unlock_irqrestore(&cmd->device->list_lock, flags); |
| 336 | 334 | ||
| 337 | cancel_delayed_work(&cmd->abort_work); | 335 | BUG_ON(delayed_work_pending(&cmd->abort_work)); |
| 338 | 336 | ||
| 339 | __scsi_put_command(cmd->device->host, cmd); | 337 | __scsi_put_command(cmd->device->host, cmd); |
| 340 | } | 338 | } |
| 341 | EXPORT_SYMBOL(scsi_put_command); | ||
| 342 | 339 | ||
| 343 | static struct scsi_host_cmd_pool * | 340 | static struct scsi_host_cmd_pool * |
| 344 | scsi_find_host_cmd_pool(struct Scsi_Host *shost) | 341 | scsi_find_host_cmd_pool(struct Scsi_Host *shost) |
| @@ -368,8 +365,8 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) | |||
| 368 | if (!pool) | 365 | if (!pool) |
| 369 | return NULL; | 366 | return NULL; |
| 370 | 367 | ||
| 371 | pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name); | 368 | pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name); |
| 372 | pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name); | 369 | pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name); |
| 373 | if (!pool->cmd_name || !pool->sense_name) { | 370 | if (!pool->cmd_name || !pool->sense_name) { |
| 374 | scsi_free_host_cmd_pool(pool); | 371 | scsi_free_host_cmd_pool(pool); |
| 375 | return NULL; | 372 | return NULL; |
| @@ -380,6 +377,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) | |||
| 380 | pool->slab_flags |= SLAB_CACHE_DMA; | 377 | pool->slab_flags |= SLAB_CACHE_DMA; |
| 381 | pool->gfp_mask = __GFP_DMA; | 378 | pool->gfp_mask = __GFP_DMA; |
| 382 | } | 379 | } |
| 380 | |||
| 381 | if (hostt->cmd_size) | ||
| 382 | hostt->cmd_pool = pool; | ||
| 383 | |||
| 383 | return pool; | 384 | return pool; |
| 384 | } | 385 | } |
| 385 | 386 | ||
| @@ -424,8 +425,10 @@ out: | |||
| 424 | out_free_slab: | 425 | out_free_slab: |
| 425 | kmem_cache_destroy(pool->cmd_slab); | 426 | kmem_cache_destroy(pool->cmd_slab); |
| 426 | out_free_pool: | 427 | out_free_pool: |
| 427 | if (hostt->cmd_size) | 428 | if (hostt->cmd_size) { |
| 428 | scsi_free_host_cmd_pool(pool); | 429 | scsi_free_host_cmd_pool(pool); |
| 430 | hostt->cmd_pool = NULL; | ||
| 431 | } | ||
| 429 | goto out; | 432 | goto out; |
| 430 | } | 433 | } |
| 431 | 434 | ||
| @@ -447,8 +450,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost) | |||
| 447 | if (!--pool->users) { | 450 | if (!--pool->users) { |
| 448 | kmem_cache_destroy(pool->cmd_slab); | 451 | kmem_cache_destroy(pool->cmd_slab); |
| 449 | kmem_cache_destroy(pool->sense_slab); | 452 | kmem_cache_destroy(pool->sense_slab); |
| 450 | if (hostt->cmd_size) | 453 | if (hostt->cmd_size) { |
| 451 | scsi_free_host_cmd_pool(pool); | 454 | scsi_free_host_cmd_pool(pool); |
| 455 | hostt->cmd_pool = NULL; | ||
| 456 | } | ||
| 452 | } | 457 | } |
| 453 | mutex_unlock(&host_cmd_pool_mutex); | 458 | mutex_unlock(&host_cmd_pool_mutex); |
| 454 | } | 459 | } |
| @@ -605,7 +610,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
| 605 | if (level > 3) | 610 | if (level > 3) |
| 606 | scmd_printk(KERN_INFO, cmd, | 611 | scmd_printk(KERN_INFO, cmd, |
| 607 | "scsi host busy %d failed %d\n", | 612 | "scsi host busy %d failed %d\n", |
| 608 | cmd->device->host->host_busy, | 613 | atomic_read(&cmd->device->host->host_busy), |
| 609 | cmd->device->host->host_failed); | 614 | cmd->device->host->host_failed); |
| 610 | } | 615 | } |
| 611 | } | 616 | } |
| @@ -648,33 +653,24 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
| 648 | * returns an immediate error upwards, and signals | 653 | * returns an immediate error upwards, and signals |
| 649 | * that the device is no longer present */ | 654 | * that the device is no longer present */ |
| 650 | cmd->result = DID_NO_CONNECT << 16; | 655 | cmd->result = DID_NO_CONNECT << 16; |
| 651 | scsi_done(cmd); | 656 | goto done; |
| 652 | /* return 0 (because the command has been processed) */ | ||
| 653 | goto out; | ||
| 654 | } | 657 | } |
| 655 | 658 | ||
| 656 | /* Check to see if the scsi lld made this device blocked. */ | 659 | /* Check to see if the scsi lld made this device blocked. */ |
| 657 | if (unlikely(scsi_device_blocked(cmd->device))) { | 660 | if (unlikely(scsi_device_blocked(cmd->device))) { |
| 658 | /* | 661 | /* |
| 659 | * in blocked state, the command is just put back on | 662 | * in blocked state, the command is just put back on |
| 660 | * the device queue. The suspend state has already | 663 | * the device queue. The suspend state has already |
| 661 | * blocked the queue so future requests should not | 664 | * blocked the queue so future requests should not |
| 662 | * occur until the device transitions out of the | 665 | * occur until the device transitions out of the |
| 663 | * suspend state. | 666 | * suspend state. |
| 664 | */ | 667 | */ |
| 665 | 668 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, | |
| 666 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | 669 | "queuecommand : device blocked\n")); |
| 667 | 670 | return SCSI_MLQUEUE_DEVICE_BUSY; | |
| 668 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); | ||
| 669 | |||
| 670 | /* | ||
| 671 | * NOTE: rtn is still zero here because we don't need the | ||
| 672 | * queue to be plugged on return (it's already stopped) | ||
| 673 | */ | ||
| 674 | goto out; | ||
| 675 | } | 671 | } |
| 676 | 672 | ||
| 677 | /* | 673 | /* |
| 678 | * If SCSI-2 or lower, store the LUN value in cmnd. | 674 | * If SCSI-2 or lower, store the LUN value in cmnd. |
| 679 | */ | 675 | */ |
| 680 | if (cmd->device->scsi_level <= SCSI_2 && | 676 | if (cmd->device->scsi_level <= SCSI_2 && |
| @@ -690,57 +686,36 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
| 690 | * length exceeds what the host adapter can handle. | 686 | * length exceeds what the host adapter can handle. |
| 691 | */ | 687 | */ |
| 692 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { | 688 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { |
| 693 | SCSI_LOG_MLQUEUE(3, | 689 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
| 694 | printk("queuecommand : command too long. " | 690 | "queuecommand : command too long. " |
| 695 | "cdb_size=%d host->max_cmd_len=%d\n", | 691 | "cdb_size=%d host->max_cmd_len=%d\n", |
| 696 | cmd->cmd_len, cmd->device->host->max_cmd_len)); | 692 | cmd->cmd_len, cmd->device->host->max_cmd_len)); |
| 697 | cmd->result = (DID_ABORT << 16); | 693 | cmd->result = (DID_ABORT << 16); |
| 698 | 694 | goto done; | |
| 699 | scsi_done(cmd); | ||
| 700 | goto out; | ||
| 701 | } | 695 | } |
| 702 | 696 | ||
| 703 | if (unlikely(host->shost_state == SHOST_DEL)) { | 697 | if (unlikely(host->shost_state == SHOST_DEL)) { |
| 704 | cmd->result = (DID_NO_CONNECT << 16); | 698 | cmd->result = (DID_NO_CONNECT << 16); |
| 705 | scsi_done(cmd); | 699 | goto done; |
| 706 | } else { | 700 | |
| 707 | trace_scsi_dispatch_cmd_start(cmd); | ||
| 708 | cmd->scsi_done = scsi_done; | ||
| 709 | rtn = host->hostt->queuecommand(host, cmd); | ||
| 710 | } | 701 | } |
| 711 | 702 | ||
| 703 | trace_scsi_dispatch_cmd_start(cmd); | ||
| 704 | rtn = host->hostt->queuecommand(host, cmd); | ||
| 712 | if (rtn) { | 705 | if (rtn) { |
| 713 | trace_scsi_dispatch_cmd_error(cmd, rtn); | 706 | trace_scsi_dispatch_cmd_error(cmd, rtn); |
| 714 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && | 707 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
| 715 | rtn != SCSI_MLQUEUE_TARGET_BUSY) | 708 | rtn != SCSI_MLQUEUE_TARGET_BUSY) |
| 716 | rtn = SCSI_MLQUEUE_HOST_BUSY; | 709 | rtn = SCSI_MLQUEUE_HOST_BUSY; |
| 717 | 710 | ||
| 718 | scsi_queue_insert(cmd, rtn); | 711 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
| 719 | 712 | "queuecommand : request rejected\n")); | |
| 720 | SCSI_LOG_MLQUEUE(3, | ||
| 721 | printk("queuecommand : request rejected\n")); | ||
| 722 | } | 713 | } |
| 723 | 714 | ||
| 724 | out: | ||
| 725 | SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); | ||
| 726 | return rtn; | 715 | return rtn; |
| 727 | } | 716 | done: |
| 728 | 717 | cmd->scsi_done(cmd); | |
| 729 | /** | 718 | return 0; |
| 730 | * scsi_done - Invoke completion on finished SCSI command. | ||
| 731 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | ||
| 732 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | ||
| 733 | * | ||
| 734 | * Description: This function is the mid-level's (SCSI Core) interrupt routine, | ||
| 735 | * which regains ownership of the SCSI command (de facto) from a LLDD, and | ||
| 736 | * calls blk_complete_request() for further processing. | ||
| 737 | * | ||
| 738 | * This function is interrupt context safe. | ||
| 739 | */ | ||
| 740 | static void scsi_done(struct scsi_cmnd *cmd) | ||
| 741 | { | ||
| 742 | trace_scsi_dispatch_cmd_done(cmd); | ||
| 743 | blk_complete_request(cmd->request); | ||
| 744 | } | 719 | } |
| 745 | 720 | ||
| 746 | /** | 721 | /** |
| @@ -761,17 +736,16 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
| 761 | 736 | ||
| 762 | scsi_device_unbusy(sdev); | 737 | scsi_device_unbusy(sdev); |
| 763 | 738 | ||
| 764 | /* | 739 | /* |
| 765 | * Clear the flags which say that the device/host is no longer | 740 | * Clear the flags that say that the device/target/host is no longer |
| 766 | * capable of accepting new commands. These are set in scsi_queue.c | 741 | * capable of accepting new commands. |
| 767 | * for both the queue full condition on a device, and for a | 742 | */ |
| 768 | * host full condition on the host. | 743 | if (atomic_read(&shost->host_blocked)) |
| 769 | * | 744 | atomic_set(&shost->host_blocked, 0); |
| 770 | * XXX(hch): What about locking? | 745 | if (atomic_read(&starget->target_blocked)) |
| 771 | */ | 746 | atomic_set(&starget->target_blocked, 0); |
| 772 | shost->host_blocked = 0; | 747 | if (atomic_read(&sdev->device_blocked)) |
| 773 | starget->target_blocked = 0; | 748 | atomic_set(&sdev->device_blocked, 0); |
| 774 | sdev->device_blocked = 0; | ||
| 775 | 749 | ||
| 776 | /* | 750 | /* |
| 777 | * If we have valid sense information, then some kind of recovery | 751 | * If we have valid sense information, then some kind of recovery |
| @@ -801,7 +775,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
| 801 | } | 775 | } |
| 802 | scsi_io_completion(cmd, good_bytes); | 776 | scsi_io_completion(cmd, good_bytes); |
| 803 | } | 777 | } |
| 804 | EXPORT_SYMBOL(scsi_finish_command); | ||
| 805 | 778 | ||
| 806 | /** | 779 | /** |
| 807 | * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth | 780 | * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth |
| @@ -842,7 +815,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) | |||
| 842 | * is more IO than the LLD's can_queue (so there are not enuogh | 815 | * is more IO than the LLD's can_queue (so there are not enuogh |
| 843 | * tags) request_fn's host queue ready check will handle it. | 816 | * tags) request_fn's host queue ready check will handle it. |
| 844 | */ | 817 | */ |
| 845 | if (!sdev->host->bqt) { | 818 | if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) { |
| 846 | if (blk_queue_tagged(sdev->request_queue) && | 819 | if (blk_queue_tagged(sdev->request_queue) && |
| 847 | blk_queue_resize_tags(sdev->request_queue, tags) != 0) | 820 | blk_queue_resize_tags(sdev->request_queue, tags) != 0) |
| 848 | goto out; | 821 | goto out; |
| @@ -850,6 +823,10 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) | |||
| 850 | 823 | ||
| 851 | sdev->queue_depth = tags; | 824 | sdev->queue_depth = tags; |
| 852 | switch (tagged) { | 825 | switch (tagged) { |
| 826 | case 0: | ||
| 827 | sdev->ordered_tags = 0; | ||
| 828 | sdev->simple_tags = 0; | ||
| 829 | break; | ||
| 853 | case MSG_ORDERED_TAG: | 830 | case MSG_ORDERED_TAG: |
| 854 | sdev->ordered_tags = 1; | 831 | sdev->ordered_tags = 1; |
| 855 | sdev->simple_tags = 1; | 832 | sdev->simple_tags = 1; |
| @@ -859,13 +836,11 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) | |||
| 859 | sdev->simple_tags = 1; | 836 | sdev->simple_tags = 1; |
| 860 | break; | 837 | break; |
| 861 | default: | 838 | default: |
| 839 | sdev->ordered_tags = 0; | ||
| 840 | sdev->simple_tags = 0; | ||
| 862 | sdev_printk(KERN_WARNING, sdev, | 841 | sdev_printk(KERN_WARNING, sdev, |
| 863 | "scsi_adjust_queue_depth, bad queue type, " | 842 | "scsi_adjust_queue_depth, bad queue type, " |
| 864 | "disabled\n"); | 843 | "disabled\n"); |
| 865 | case 0: | ||
| 866 | sdev->ordered_tags = sdev->simple_tags = 0; | ||
| 867 | sdev->queue_depth = tags; | ||
| 868 | break; | ||
| 869 | } | 844 | } |
| 870 | out: | 845 | out: |
| 871 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); | 846 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); |
| @@ -1291,7 +1266,7 @@ EXPORT_SYMBOL(__starget_for_each_device); | |||
| 1291 | * really want to use scsi_device_lookup_by_target instead. | 1266 | * really want to use scsi_device_lookup_by_target instead. |
| 1292 | **/ | 1267 | **/ |
| 1293 | struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, | 1268 | struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, |
| 1294 | uint lun) | 1269 | u64 lun) |
| 1295 | { | 1270 | { |
| 1296 | struct scsi_device *sdev; | 1271 | struct scsi_device *sdev; |
| 1297 | 1272 | ||
| @@ -1316,7 +1291,7 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target); | |||
| 1316 | * needs to be released with scsi_device_put once you're done with it. | 1291 | * needs to be released with scsi_device_put once you're done with it. |
| 1317 | **/ | 1292 | **/ |
| 1318 | struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, | 1293 | struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, |
| 1319 | uint lun) | 1294 | u64 lun) |
| 1320 | { | 1295 | { |
| 1321 | struct scsi_device *sdev; | 1296 | struct scsi_device *sdev; |
| 1322 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 1297 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| @@ -1349,7 +1324,7 @@ EXPORT_SYMBOL(scsi_device_lookup_by_target); | |||
| 1349 | * really want to use scsi_device_lookup instead. | 1324 | * really want to use scsi_device_lookup instead. |
| 1350 | **/ | 1325 | **/ |
| 1351 | struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, | 1326 | struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, |
| 1352 | uint channel, uint id, uint lun) | 1327 | uint channel, uint id, u64 lun) |
| 1353 | { | 1328 | { |
| 1354 | struct scsi_device *sdev; | 1329 | struct scsi_device *sdev; |
| 1355 | 1330 | ||
| @@ -1375,7 +1350,7 @@ EXPORT_SYMBOL(__scsi_device_lookup); | |||
| 1375 | * needs to be released with scsi_device_put once you're done with it. | 1350 | * needs to be released with scsi_device_put once you're done with it. |
| 1376 | **/ | 1351 | **/ |
| 1377 | struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, | 1352 | struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, |
| 1378 | uint channel, uint id, uint lun) | 1353 | uint channel, uint id, u64 lun) |
| 1379 | { | 1354 | { |
| 1380 | struct scsi_device *sdev; | 1355 | struct scsi_device *sdev; |
| 1381 | unsigned long flags; | 1356 | unsigned long flags; |
| @@ -1396,6 +1371,9 @@ MODULE_LICENSE("GPL"); | |||
| 1396 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); | 1371 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); |
| 1397 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); | 1372 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); |
| 1398 | 1373 | ||
| 1374 | bool scsi_use_blk_mq = false; | ||
| 1375 | module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); | ||
| 1376 | |||
| 1399 | static int __init init_scsi(void) | 1377 | static int __init init_scsi(void) |
| 1400 | { | 1378 | { |
| 1401 | int error; | 1379 | int error; |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1328a2621070..d19c0e3c7f48 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -42,6 +42,10 @@ | |||
| 42 | #include <linux/scatterlist.h> | 42 | #include <linux/scatterlist.h> |
| 43 | #include <linux/blkdev.h> | 43 | #include <linux/blkdev.h> |
| 44 | #include <linux/crc-t10dif.h> | 44 | #include <linux/crc-t10dif.h> |
| 45 | #include <linux/spinlock.h> | ||
| 46 | #include <linux/interrupt.h> | ||
| 47 | #include <linux/atomic.h> | ||
| 48 | #include <linux/hrtimer.h> | ||
| 45 | 49 | ||
| 46 | #include <net/checksum.h> | 50 | #include <net/checksum.h> |
| 47 | 51 | ||
| @@ -53,13 +57,16 @@ | |||
| 53 | #include <scsi/scsi_host.h> | 57 | #include <scsi/scsi_host.h> |
| 54 | #include <scsi/scsicam.h> | 58 | #include <scsi/scsicam.h> |
| 55 | #include <scsi/scsi_eh.h> | 59 | #include <scsi/scsi_eh.h> |
| 60 | #include <scsi/scsi_tcq.h> | ||
| 56 | #include <scsi/scsi_dbg.h> | 61 | #include <scsi/scsi_dbg.h> |
| 57 | 62 | ||
| 58 | #include "sd.h" | 63 | #include "sd.h" |
| 59 | #include "scsi_logging.h" | 64 | #include "scsi_logging.h" |
| 60 | 65 | ||
| 61 | #define SCSI_DEBUG_VERSION "1.82" | 66 | #define SCSI_DEBUG_VERSION "1.84" |
| 62 | static const char * scsi_debug_version_date = "20100324"; | 67 | static const char *scsi_debug_version_date = "20140706"; |
| 68 | |||
| 69 | #define MY_NAME "scsi_debug" | ||
| 63 | 70 | ||
| 64 | /* Additional Sense Code (ASC) */ | 71 | /* Additional Sense Code (ASC) */ |
| 65 | #define NO_ADDITIONAL_SENSE 0x0 | 72 | #define NO_ADDITIONAL_SENSE 0x0 |
| @@ -72,7 +79,11 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 72 | #define INVALID_COMMAND_OPCODE 0x20 | 79 | #define INVALID_COMMAND_OPCODE 0x20 |
| 73 | #define INVALID_FIELD_IN_CDB 0x24 | 80 | #define INVALID_FIELD_IN_CDB 0x24 |
| 74 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 | 81 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 |
| 75 | #define POWERON_RESET 0x29 | 82 | #define UA_RESET_ASC 0x29 |
| 83 | #define UA_CHANGED_ASC 0x2a | ||
| 84 | #define POWER_ON_RESET_ASCQ 0x0 | ||
| 85 | #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ | ||
| 86 | #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ | ||
| 76 | #define SAVING_PARAMS_UNSUP 0x39 | 87 | #define SAVING_PARAMS_UNSUP 0x39 |
| 77 | #define TRANSPORT_PROBLEM 0x4b | 88 | #define TRANSPORT_PROBLEM 0x4b |
| 78 | #define THRESHOLD_EXCEEDED 0x5d | 89 | #define THRESHOLD_EXCEEDED 0x5d |
| @@ -81,7 +92,6 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 81 | /* Additional Sense Code Qualifier (ASCQ) */ | 92 | /* Additional Sense Code Qualifier (ASCQ) */ |
| 82 | #define ACK_NAK_TO 0x3 | 93 | #define ACK_NAK_TO 0x3 |
| 83 | 94 | ||
| 84 | #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ | ||
| 85 | 95 | ||
| 86 | /* Default values for driver parameters */ | 96 | /* Default values for driver parameters */ |
| 87 | #define DEF_NUM_HOST 1 | 97 | #define DEF_NUM_HOST 1 |
| @@ -91,7 +101,7 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 91 | * (id 0) containing 1 logical unit (lun 0). That is 1 device. | 101 | * (id 0) containing 1 logical unit (lun 0). That is 1 device. |
| 92 | */ | 102 | */ |
| 93 | #define DEF_ATO 1 | 103 | #define DEF_ATO 1 |
| 94 | #define DEF_DELAY 1 | 104 | #define DEF_DELAY 1 /* if > 0 unit is a jiffy */ |
| 95 | #define DEF_DEV_SIZE_MB 8 | 105 | #define DEF_DEV_SIZE_MB 8 |
| 96 | #define DEF_DIF 0 | 106 | #define DEF_DIF 0 |
| 97 | #define DEF_DIX 0 | 107 | #define DEF_DIX 0 |
| @@ -99,11 +109,13 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 99 | #define DEF_EVERY_NTH 0 | 109 | #define DEF_EVERY_NTH 0 |
| 100 | #define DEF_FAKE_RW 0 | 110 | #define DEF_FAKE_RW 0 |
| 101 | #define DEF_GUARD 0 | 111 | #define DEF_GUARD 0 |
| 112 | #define DEF_HOST_LOCK 0 | ||
| 102 | #define DEF_LBPU 0 | 113 | #define DEF_LBPU 0 |
| 103 | #define DEF_LBPWS 0 | 114 | #define DEF_LBPWS 0 |
| 104 | #define DEF_LBPWS10 0 | 115 | #define DEF_LBPWS10 0 |
| 105 | #define DEF_LBPRZ 1 | 116 | #define DEF_LBPRZ 1 |
| 106 | #define DEF_LOWEST_ALIGNED 0 | 117 | #define DEF_LOWEST_ALIGNED 0 |
| 118 | #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */ | ||
| 107 | #define DEF_NO_LUN_0 0 | 119 | #define DEF_NO_LUN_0 0 |
| 108 | #define DEF_NUM_PARTS 0 | 120 | #define DEF_NUM_PARTS 0 |
| 109 | #define DEF_OPTS 0 | 121 | #define DEF_OPTS 0 |
| @@ -113,6 +125,7 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 113 | #define DEF_REMOVABLE false | 125 | #define DEF_REMOVABLE false |
| 114 | #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ | 126 | #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ |
| 115 | #define DEF_SECTOR_SIZE 512 | 127 | #define DEF_SECTOR_SIZE 512 |
| 128 | #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ | ||
| 116 | #define DEF_UNMAP_ALIGNMENT 0 | 129 | #define DEF_UNMAP_ALIGNMENT 0 |
| 117 | #define DEF_UNMAP_GRANULARITY 1 | 130 | #define DEF_UNMAP_GRANULARITY 1 |
| 118 | #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF | 131 | #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF |
| @@ -120,6 +133,7 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 120 | #define DEF_VIRTUAL_GB 0 | 133 | #define DEF_VIRTUAL_GB 0 |
| 121 | #define DEF_VPD_USE_HOSTNO 1 | 134 | #define DEF_VPD_USE_HOSTNO 1 |
| 122 | #define DEF_WRITESAME_LENGTH 0xFFFF | 135 | #define DEF_WRITESAME_LENGTH 0xFFFF |
| 136 | #define DELAY_OVERRIDDEN -9999 | ||
| 123 | 137 | ||
| 124 | /* bit mask values for scsi_debug_opts */ | 138 | /* bit mask values for scsi_debug_opts */ |
| 125 | #define SCSI_DEBUG_OPT_NOISE 1 | 139 | #define SCSI_DEBUG_OPT_NOISE 1 |
| @@ -130,7 +144,14 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 130 | #define SCSI_DEBUG_OPT_DIF_ERR 32 | 144 | #define SCSI_DEBUG_OPT_DIF_ERR 32 |
| 131 | #define SCSI_DEBUG_OPT_DIX_ERR 64 | 145 | #define SCSI_DEBUG_OPT_DIX_ERR 64 |
| 132 | #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 | 146 | #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 |
| 133 | #define SCSI_DEBUG_OPT_SHORT_TRANSFER 256 | 147 | #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100 |
| 148 | #define SCSI_DEBUG_OPT_Q_NOISE 0x200 | ||
| 149 | #define SCSI_DEBUG_OPT_ALL_TSF 0x400 | ||
| 150 | #define SCSI_DEBUG_OPT_RARE_TSF 0x800 | ||
| 151 | #define SCSI_DEBUG_OPT_N_WCE 0x1000 | ||
| 152 | #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000 | ||
| 153 | #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000 | ||
| 154 | #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000) | ||
| 134 | /* When "every_nth" > 0 then modulo "every_nth" commands: | 155 | /* When "every_nth" > 0 then modulo "every_nth" commands: |
| 135 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set | 156 | * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set |
| 136 | * - a RECOVERED_ERROR is simulated on successful read and write | 157 | * - a RECOVERED_ERROR is simulated on successful read and write |
| @@ -148,6 +169,19 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 148 | * writing a new value (other than -1 or 1) to every_nth via sysfs). | 169 | * writing a new value (other than -1 or 1) to every_nth via sysfs). |
| 149 | */ | 170 | */ |
| 150 | 171 | ||
| 172 | /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in | ||
| 173 | * priority order. In the subset implemented here lower numbers have higher | ||
| 174 | * priority. The UA numbers should be a sequence starting from 0 with | ||
| 175 | * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ | ||
| 176 | #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ | ||
| 177 | #define SDEBUG_UA_BUS_RESET 1 | ||
| 178 | #define SDEBUG_UA_MODE_CHANGED 2 | ||
| 179 | #define SDEBUG_NUM_UAS 3 | ||
| 180 | |||
| 181 | /* for check_readiness() */ | ||
| 182 | #define UAS_ONLY 1 | ||
| 183 | #define UAS_TUR 0 | ||
| 184 | |||
| 151 | /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this | 185 | /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this |
| 152 | * sector on read commands: */ | 186 | * sector on read commands: */ |
| 153 | #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ | 187 | #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ |
| @@ -158,9 +192,19 @@ static const char * scsi_debug_version_date = "20100324"; | |||
| 158 | #define SAM2_LUN_ADDRESS_METHOD 0 | 192 | #define SAM2_LUN_ADDRESS_METHOD 0 |
| 159 | #define SAM2_WLUN_REPORT_LUNS 0xc101 | 193 | #define SAM2_WLUN_REPORT_LUNS 0xc101 |
| 160 | 194 | ||
| 161 | /* Can queue up to this number of commands. Typically commands that | 195 | /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued |
| 162 | * that have a non-zero delay are queued. */ | 196 | * (for response) at one time. Can be reduced by max_queue option. Command |
| 163 | #define SCSI_DEBUG_CANQUEUE 255 | 197 | * responses are not queued when delay=0 and ndelay=0. The per-device |
| 198 | * DEF_CMD_PER_LUN can be changed via sysfs: | ||
| 199 | * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed | ||
| 200 | * SCSI_DEBUG_CANQUEUE. */ | ||
| 201 | #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */ | ||
| 202 | #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG) | ||
| 203 | #define DEF_CMD_PER_LUN 255 | ||
| 204 | |||
| 205 | #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE | ||
| 206 | #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" | ||
| 207 | #endif | ||
| 164 | 208 | ||
| 165 | static int scsi_debug_add_host = DEF_NUM_HOST; | 209 | static int scsi_debug_add_host = DEF_NUM_HOST; |
| 166 | static int scsi_debug_ato = DEF_ATO; | 210 | static int scsi_debug_ato = DEF_ATO; |
| @@ -175,6 +219,8 @@ static unsigned int scsi_debug_guard = DEF_GUARD; | |||
| 175 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; | 219 | static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; |
| 176 | static int scsi_debug_max_luns = DEF_MAX_LUNS; | 220 | static int scsi_debug_max_luns = DEF_MAX_LUNS; |
| 177 | static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; | 221 | static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; |
| 222 | static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ | ||
| 223 | static int scsi_debug_ndelay = DEF_NDELAY; | ||
| 178 | static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; | 224 | static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; |
| 179 | static int scsi_debug_no_uld = 0; | 225 | static int scsi_debug_no_uld = 0; |
| 180 | static int scsi_debug_num_parts = DEF_NUM_PARTS; | 226 | static int scsi_debug_num_parts = DEF_NUM_PARTS; |
| @@ -198,8 +244,11 @@ static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; | |||
| 198 | static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; | 244 | static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; |
| 199 | static bool scsi_debug_removable = DEF_REMOVABLE; | 245 | static bool scsi_debug_removable = DEF_REMOVABLE; |
| 200 | static bool scsi_debug_clustering; | 246 | static bool scsi_debug_clustering; |
| 247 | static bool scsi_debug_host_lock = DEF_HOST_LOCK; | ||
| 201 | 248 | ||
| 202 | static int scsi_debug_cmnd_count = 0; | 249 | static atomic_t sdebug_cmnd_count; |
| 250 | static atomic_t sdebug_completions; | ||
| 251 | static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */ | ||
| 203 | 252 | ||
| 204 | #define DEV_READONLY(TGT) (0) | 253 | #define DEV_READONLY(TGT) (0) |
| 205 | 254 | ||
| @@ -214,24 +263,23 @@ static int sdebug_sectors_per; /* sectors per cylinder */ | |||
| 214 | 263 | ||
| 215 | #define SDEBUG_MAX_PARTS 4 | 264 | #define SDEBUG_MAX_PARTS 4 |
| 216 | 265 | ||
| 217 | #define SDEBUG_SENSE_LEN 32 | ||
| 218 | |||
| 219 | #define SCSI_DEBUG_MAX_CMD_LEN 32 | 266 | #define SCSI_DEBUG_MAX_CMD_LEN 32 |
| 220 | 267 | ||
| 221 | static unsigned int scsi_debug_lbp(void) | 268 | static unsigned int scsi_debug_lbp(void) |
| 222 | { | 269 | { |
| 223 | return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10; | 270 | return ((0 == scsi_debug_fake_rw) && |
| 271 | (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10)); | ||
| 224 | } | 272 | } |
| 225 | 273 | ||
| 226 | struct sdebug_dev_info { | 274 | struct sdebug_dev_info { |
| 227 | struct list_head dev_list; | 275 | struct list_head dev_list; |
| 228 | unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ | ||
| 229 | unsigned int channel; | 276 | unsigned int channel; |
| 230 | unsigned int target; | 277 | unsigned int target; |
| 231 | unsigned int lun; | 278 | u64 lun; |
| 232 | struct sdebug_host_info *sdbg_host; | 279 | struct sdebug_host_info *sdbg_host; |
| 233 | unsigned int wlun; | 280 | u64 wlun; |
| 234 | char reset; | 281 | unsigned long uas_bm[1]; |
| 282 | atomic_t num_in_q; | ||
| 235 | char stopped; | 283 | char stopped; |
| 236 | char used; | 284 | char used; |
| 237 | }; | 285 | }; |
| @@ -249,26 +297,33 @@ struct sdebug_host_info { | |||
| 249 | static LIST_HEAD(sdebug_host_list); | 297 | static LIST_HEAD(sdebug_host_list); |
| 250 | static DEFINE_SPINLOCK(sdebug_host_list_lock); | 298 | static DEFINE_SPINLOCK(sdebug_host_list_lock); |
| 251 | 299 | ||
| 252 | typedef void (* done_funct_t) (struct scsi_cmnd *); | 300 | |
| 301 | struct sdebug_hrtimer { /* ... is derived from hrtimer */ | ||
| 302 | struct hrtimer hrt; /* must be first element */ | ||
| 303 | int qa_indx; | ||
| 304 | }; | ||
| 253 | 305 | ||
| 254 | struct sdebug_queued_cmd { | 306 | struct sdebug_queued_cmd { |
| 255 | int in_use; | 307 | /* in_use flagged by a bit in queued_in_use_bm[] */ |
| 256 | struct timer_list cmnd_timer; | 308 | struct timer_list *cmnd_timerp; |
| 257 | done_funct_t done_funct; | 309 | struct tasklet_struct *tletp; |
| 310 | struct sdebug_hrtimer *sd_hrtp; | ||
| 258 | struct scsi_cmnd * a_cmnd; | 311 | struct scsi_cmnd * a_cmnd; |
| 259 | int scsi_result; | ||
| 260 | }; | 312 | }; |
| 261 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; | 313 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; |
| 314 | static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS]; | ||
| 315 | |||
| 262 | 316 | ||
| 263 | static unsigned char * fake_storep; /* ramdisk storage */ | 317 | static unsigned char * fake_storep; /* ramdisk storage */ |
| 264 | static struct sd_dif_tuple *dif_storep; /* protection info */ | 318 | static struct sd_dif_tuple *dif_storep; /* protection info */ |
| 265 | static void *map_storep; /* provisioning map */ | 319 | static void *map_storep; /* provisioning map */ |
| 266 | 320 | ||
| 267 | static unsigned long map_size; | 321 | static unsigned long map_size; |
| 268 | static int num_aborts = 0; | 322 | static int num_aborts; |
| 269 | static int num_dev_resets = 0; | 323 | static int num_dev_resets; |
| 270 | static int num_bus_resets = 0; | 324 | static int num_target_resets; |
| 271 | static int num_host_resets = 0; | 325 | static int num_bus_resets; |
| 326 | static int num_host_resets; | ||
| 272 | static int dix_writes; | 327 | static int dix_writes; |
| 273 | static int dix_reads; | 328 | static int dix_reads; |
| 274 | static int dif_errors; | 329 | static int dif_errors; |
| @@ -276,7 +331,8 @@ static int dif_errors; | |||
| 276 | static DEFINE_SPINLOCK(queued_arr_lock); | 331 | static DEFINE_SPINLOCK(queued_arr_lock); |
| 277 | static DEFINE_RWLOCK(atomic_rw); | 332 | static DEFINE_RWLOCK(atomic_rw); |
| 278 | 333 | ||
| 279 | static char sdebug_proc_name[] = "scsi_debug"; | 334 | static char sdebug_proc_name[] = MY_NAME; |
| 335 | static const char *my_name = MY_NAME; | ||
| 280 | 336 | ||
| 281 | static struct bus_type pseudo_lld_bus; | 337 | static struct bus_type pseudo_lld_bus; |
| 282 | 338 | ||
| @@ -291,6 +347,12 @@ static const int check_condition_result = | |||
| 291 | static const int illegal_condition_result = | 347 | static const int illegal_condition_result = |
| 292 | (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; | 348 | (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; |
| 293 | 349 | ||
| 350 | static const int device_qfull_result = | ||
| 351 | (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL; | ||
| 352 | |||
| 353 | static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | ||
| 354 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, | ||
| 355 | 0, 0, 0, 0}; | ||
| 294 | static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, | 356 | static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, |
| 295 | 0, 0, 0x2, 0x4b}; | 357 | 0, 0, 0x2, 0x4b}; |
| 296 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, | 358 | static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, |
| @@ -332,19 +394,24 @@ static void sdebug_max_tgts_luns(void) | |||
| 332 | spin_unlock(&sdebug_host_list_lock); | 394 | spin_unlock(&sdebug_host_list_lock); |
| 333 | } | 395 | } |
| 334 | 396 | ||
| 335 | static void mk_sense_buffer(struct sdebug_dev_info *devip, int key, | 397 | static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) |
| 336 | int asc, int asq) | ||
| 337 | { | 398 | { |
| 338 | unsigned char *sbuff; | 399 | unsigned char *sbuff; |
| 339 | 400 | ||
| 340 | sbuff = devip->sense_buff; | 401 | sbuff = scp->sense_buffer; |
| 341 | memset(sbuff, 0, SDEBUG_SENSE_LEN); | 402 | if (!sbuff) { |
| 403 | sdev_printk(KERN_ERR, scp->device, | ||
| 404 | "%s: sense_buffer is NULL\n", __func__); | ||
| 405 | return; | ||
| 406 | } | ||
| 407 | memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); | ||
| 342 | 408 | ||
| 343 | scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); | 409 | scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); |
| 344 | 410 | ||
| 345 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 411 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 346 | printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: " | 412 | sdev_printk(KERN_INFO, scp->device, |
| 347 | "[0x%x,0x%x,0x%x]\n", key, asc, asq); | 413 | "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", |
| 414 | my_name, key, asc, asq); | ||
| 348 | } | 415 | } |
| 349 | 416 | ||
| 350 | static void get_data_transfer_info(unsigned char *cmd, | 417 | static void get_data_transfer_info(unsigned char *cmd, |
| @@ -409,29 +476,71 @@ static void get_data_transfer_info(unsigned char *cmd, | |||
| 409 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | 476 | static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
| 410 | { | 477 | { |
| 411 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { | 478 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { |
| 412 | printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd); | 479 | if (0x1261 == cmd) |
| 480 | sdev_printk(KERN_INFO, dev, | ||
| 481 | "%s: BLKFLSBUF [0x1261]\n", __func__); | ||
| 482 | else if (0x5331 == cmd) | ||
| 483 | sdev_printk(KERN_INFO, dev, | ||
| 484 | "%s: CDROM_GET_CAPABILITY [0x5331]\n", | ||
| 485 | __func__); | ||
| 486 | else | ||
| 487 | sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n", | ||
| 488 | __func__, cmd); | ||
| 413 | } | 489 | } |
| 414 | return -EINVAL; | 490 | return -EINVAL; |
| 415 | /* return -ENOTTY; // correct return but upsets fdisk */ | 491 | /* return -ENOTTY; // correct return but upsets fdisk */ |
| 416 | } | 492 | } |
| 417 | 493 | ||
| 418 | static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only, | 494 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, |
| 419 | struct sdebug_dev_info * devip) | 495 | struct sdebug_dev_info * devip) |
| 420 | { | 496 | { |
| 421 | if (devip->reset) { | 497 | int k; |
| 422 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 498 | bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); |
| 423 | printk(KERN_INFO "scsi_debug: Reporting Unit " | 499 | |
| 424 | "attention: power on reset\n"); | 500 | k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); |
| 425 | devip->reset = 0; | 501 | if (k != SDEBUG_NUM_UAS) { |
| 426 | mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0); | 502 | const char *cp = NULL; |
| 503 | |||
| 504 | switch (k) { | ||
| 505 | case SDEBUG_UA_POR: | ||
| 506 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
| 507 | UA_RESET_ASC, POWER_ON_RESET_ASCQ); | ||
| 508 | if (debug) | ||
| 509 | cp = "power on reset"; | ||
| 510 | break; | ||
| 511 | case SDEBUG_UA_BUS_RESET: | ||
| 512 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
| 513 | UA_RESET_ASC, BUS_RESET_ASCQ); | ||
| 514 | if (debug) | ||
| 515 | cp = "bus reset"; | ||
| 516 | break; | ||
| 517 | case SDEBUG_UA_MODE_CHANGED: | ||
| 518 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
| 519 | UA_CHANGED_ASC, MODE_CHANGED_ASCQ); | ||
| 520 | if (debug) | ||
| 521 | cp = "mode parameters changed"; | ||
| 522 | break; | ||
| 523 | default: | ||
| 524 | pr_warn("%s: unexpected unit attention code=%d\n", | ||
| 525 | __func__, k); | ||
| 526 | if (debug) | ||
| 527 | cp = "unknown"; | ||
| 528 | break; | ||
| 529 | } | ||
| 530 | clear_bit(k, devip->uas_bm); | ||
| 531 | if (debug) | ||
| 532 | sdev_printk(KERN_INFO, SCpnt->device, | ||
| 533 | "%s reports: Unit attention: %s\n", | ||
| 534 | my_name, cp); | ||
| 427 | return check_condition_result; | 535 | return check_condition_result; |
| 428 | } | 536 | } |
| 429 | if ((0 == reset_only) && devip->stopped) { | 537 | if ((UAS_TUR == uas_only) && devip->stopped) { |
| 430 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 538 | mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY, |
| 431 | printk(KERN_INFO "scsi_debug: Reporting Not " | ||
| 432 | "ready: initializing command required\n"); | ||
| 433 | mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY, | ||
| 434 | 0x2); | 539 | 0x2); |
| 540 | if (debug) | ||
| 541 | sdev_printk(KERN_INFO, SCpnt->device, | ||
| 542 | "%s reports: Not ready: %s\n", my_name, | ||
| 543 | "initializing command required"); | ||
| 435 | return check_condition_result; | 544 | return check_condition_result; |
| 436 | } | 545 | } |
| 437 | return 0; | 546 | return 0; |
| @@ -471,8 +580,9 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, | |||
| 471 | 580 | ||
| 472 | static const char * inq_vendor_id = "Linux "; | 581 | static const char * inq_vendor_id = "Linux "; |
| 473 | static const char * inq_product_id = "scsi_debug "; | 582 | static const char * inq_product_id = "scsi_debug "; |
| 474 | static const char * inq_product_rev = "0004"; | 583 | static const char *inq_product_rev = "0184"; /* version less '.' */ |
| 475 | 584 | ||
| 585 | /* Device identification VPD page. Returns number of bytes placed in arr */ | ||
| 476 | static int inquiry_evpd_83(unsigned char * arr, int port_group_id, | 586 | static int inquiry_evpd_83(unsigned char * arr, int port_group_id, |
| 477 | int target_dev_id, int dev_id_num, | 587 | int target_dev_id, int dev_id_num, |
| 478 | const char * dev_id_str, | 588 | const char * dev_id_str, |
| @@ -573,12 +683,14 @@ static unsigned char vpd84_data[] = { | |||
| 573 | 0x22,0x22,0x22,0x0,0xbb,0x2, | 683 | 0x22,0x22,0x22,0x0,0xbb,0x2, |
| 574 | }; | 684 | }; |
| 575 | 685 | ||
| 686 | /* Software interface identification VPD page */ | ||
| 576 | static int inquiry_evpd_84(unsigned char * arr) | 687 | static int inquiry_evpd_84(unsigned char * arr) |
| 577 | { | 688 | { |
| 578 | memcpy(arr, vpd84_data, sizeof(vpd84_data)); | 689 | memcpy(arr, vpd84_data, sizeof(vpd84_data)); |
| 579 | return sizeof(vpd84_data); | 690 | return sizeof(vpd84_data); |
| 580 | } | 691 | } |
| 581 | 692 | ||
| 693 | /* Management network addresses VPD page */ | ||
| 582 | static int inquiry_evpd_85(unsigned char * arr) | 694 | static int inquiry_evpd_85(unsigned char * arr) |
| 583 | { | 695 | { |
| 584 | int num = 0; | 696 | int num = 0; |
| @@ -713,6 +825,7 @@ static unsigned char vpd89_data[] = { | |||
| 713 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51, | 825 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51, |
| 714 | }; | 826 | }; |
| 715 | 827 | ||
| 828 | /* ATA Information VPD page */ | ||
| 716 | static int inquiry_evpd_89(unsigned char * arr) | 829 | static int inquiry_evpd_89(unsigned char * arr) |
| 717 | { | 830 | { |
| 718 | memcpy(arr, vpd89_data, sizeof(vpd89_data)); | 831 | memcpy(arr, vpd89_data, sizeof(vpd89_data)); |
| @@ -720,7 +833,6 @@ static int inquiry_evpd_89(unsigned char * arr) | |||
| 720 | } | 833 | } |
| 721 | 834 | ||
| 722 | 835 | ||
| 723 | /* Block limits VPD page (SBC-3) */ | ||
| 724 | static unsigned char vpdb0_data[] = { | 836 | static unsigned char vpdb0_data[] = { |
| 725 | /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, | 837 | /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, |
| 726 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, | 838 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, |
| @@ -728,6 +840,7 @@ static unsigned char vpdb0_data[] = { | |||
| 728 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, | 840 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, |
| 729 | }; | 841 | }; |
| 730 | 842 | ||
| 843 | /* Block limits VPD page (SBC-3) */ | ||
| 731 | static int inquiry_evpd_b0(unsigned char * arr) | 844 | static int inquiry_evpd_b0(unsigned char * arr) |
| 732 | { | 845 | { |
| 733 | unsigned int gran; | 846 | unsigned int gran; |
| @@ -811,7 +924,7 @@ static int inquiry_evpd_b2(unsigned char *arr) | |||
| 811 | #define SDEBUG_LONG_INQ_SZ 96 | 924 | #define SDEBUG_LONG_INQ_SZ 96 |
| 812 | #define SDEBUG_MAX_INQ_ARR_SZ 584 | 925 | #define SDEBUG_MAX_INQ_ARR_SZ 584 |
| 813 | 926 | ||
| 814 | static int resp_inquiry(struct scsi_cmnd * scp, int target, | 927 | static int resp_inquiry(struct scsi_cmnd *scp, int target, |
| 815 | struct sdebug_dev_info * devip) | 928 | struct sdebug_dev_info * devip) |
| 816 | { | 929 | { |
| 817 | unsigned char pq_pdt; | 930 | unsigned char pq_pdt; |
| @@ -831,7 +944,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, | |||
| 831 | pq_pdt = (scsi_debug_ptype & 0x1f); | 944 | pq_pdt = (scsi_debug_ptype & 0x1f); |
| 832 | arr[0] = pq_pdt; | 945 | arr[0] = pq_pdt; |
| 833 | if (0x2 & cmd[1]) { /* CMDDT bit set */ | 946 | if (0x2 & cmd[1]) { /* CMDDT bit set */ |
| 834 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 947 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 835 | 0); | 948 | 0); |
| 836 | kfree(arr); | 949 | kfree(arr); |
| 837 | return check_condition_result; | 950 | return check_condition_result; |
| @@ -917,7 +1030,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, | |||
| 917 | arr[3] = inquiry_evpd_b2(&arr[4]); | 1030 | arr[3] = inquiry_evpd_b2(&arr[4]); |
| 918 | } else { | 1031 | } else { |
| 919 | /* Illegal request, invalid field in cdb */ | 1032 | /* Illegal request, invalid field in cdb */ |
| 920 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1033 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 921 | INVALID_FIELD_IN_CDB, 0); | 1034 | INVALID_FIELD_IN_CDB, 0); |
| 922 | kfree(arr); | 1035 | kfree(arr); |
| 923 | return check_condition_result; | 1036 | return check_condition_result; |
| @@ -963,15 +1076,13 @@ static int resp_requests(struct scsi_cmnd * scp, | |||
| 963 | { | 1076 | { |
| 964 | unsigned char * sbuff; | 1077 | unsigned char * sbuff; |
| 965 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 1078 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 966 | unsigned char arr[SDEBUG_SENSE_LEN]; | 1079 | unsigned char arr[SCSI_SENSE_BUFFERSIZE]; |
| 967 | int want_dsense; | 1080 | int want_dsense; |
| 968 | int len = 18; | 1081 | int len = 18; |
| 969 | 1082 | ||
| 970 | memset(arr, 0, sizeof(arr)); | 1083 | memset(arr, 0, sizeof(arr)); |
| 971 | if (devip->reset == 1) | ||
| 972 | mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0); | ||
| 973 | want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; | 1084 | want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; |
| 974 | sbuff = devip->sense_buff; | 1085 | sbuff = scp->sense_buffer; |
| 975 | if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { | 1086 | if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { |
| 976 | if (want_dsense) { | 1087 | if (want_dsense) { |
| 977 | arr[0] = 0x72; | 1088 | arr[0] = 0x72; |
| @@ -986,7 +1097,7 @@ static int resp_requests(struct scsi_cmnd * scp, | |||
| 986 | arr[13] = 0xff; /* TEST set and MRIE==6 */ | 1097 | arr[13] = 0xff; /* TEST set and MRIE==6 */ |
| 987 | } | 1098 | } |
| 988 | } else { | 1099 | } else { |
| 989 | memcpy(arr, sbuff, SDEBUG_SENSE_LEN); | 1100 | memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); |
| 990 | if ((cmd[1] & 1) && (! scsi_debug_dsense)) { | 1101 | if ((cmd[1] & 1) && (! scsi_debug_dsense)) { |
| 991 | /* DESC bit set and sense_buff in fixed format */ | 1102 | /* DESC bit set and sense_buff in fixed format */ |
| 992 | memset(arr, 0, sizeof(arr)); | 1103 | memset(arr, 0, sizeof(arr)); |
| @@ -997,7 +1108,7 @@ static int resp_requests(struct scsi_cmnd * scp, | |||
| 997 | len = 8; | 1108 | len = 8; |
| 998 | } | 1109 | } |
| 999 | } | 1110 | } |
| 1000 | mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0); | 1111 | mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0); |
| 1001 | return fill_from_dev_buffer(scp, arr, len); | 1112 | return fill_from_dev_buffer(scp, arr, len); |
| 1002 | } | 1113 | } |
| 1003 | 1114 | ||
| @@ -1007,11 +1118,12 @@ static int resp_start_stop(struct scsi_cmnd * scp, | |||
| 1007 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 1118 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 1008 | int power_cond, errsts, start; | 1119 | int power_cond, errsts, start; |
| 1009 | 1120 | ||
| 1010 | if ((errsts = check_readiness(scp, 1, devip))) | 1121 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1122 | if (errsts) | ||
| 1011 | return errsts; | 1123 | return errsts; |
| 1012 | power_cond = (cmd[4] & 0xf0) >> 4; | 1124 | power_cond = (cmd[4] & 0xf0) >> 4; |
| 1013 | if (power_cond) { | 1125 | if (power_cond) { |
| 1014 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 1126 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 1015 | 0); | 1127 | 0); |
| 1016 | return check_condition_result; | 1128 | return check_condition_result; |
| 1017 | } | 1129 | } |
| @@ -1038,7 +1150,8 @@ static int resp_readcap(struct scsi_cmnd * scp, | |||
| 1038 | unsigned int capac; | 1150 | unsigned int capac; |
| 1039 | int errsts; | 1151 | int errsts; |
| 1040 | 1152 | ||
| 1041 | if ((errsts = check_readiness(scp, 1, devip))) | 1153 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1154 | if (errsts) | ||
| 1042 | return errsts; | 1155 | return errsts; |
| 1043 | /* following just in case virtual_gb changed */ | 1156 | /* following just in case virtual_gb changed */ |
| 1044 | sdebug_capacity = get_sdebug_capacity(); | 1157 | sdebug_capacity = get_sdebug_capacity(); |
| @@ -1069,7 +1182,8 @@ static int resp_readcap16(struct scsi_cmnd * scp, | |||
| 1069 | unsigned long long capac; | 1182 | unsigned long long capac; |
| 1070 | int errsts, k, alloc_len; | 1183 | int errsts, k, alloc_len; |
| 1071 | 1184 | ||
| 1072 | if ((errsts = check_readiness(scp, 1, devip))) | 1185 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1186 | if (errsts) | ||
| 1073 | return errsts; | 1187 | return errsts; |
| 1074 | alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) | 1188 | alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) |
| 1075 | + cmd[13]); | 1189 | + cmd[13]); |
| @@ -1230,12 +1344,18 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target) | |||
| 1230 | 1344 | ||
| 1231 | static int resp_caching_pg(unsigned char * p, int pcontrol, int target) | 1345 | static int resp_caching_pg(unsigned char * p, int pcontrol, int target) |
| 1232 | { /* Caching page for mode_sense */ | 1346 | { /* Caching page for mode_sense */ |
| 1233 | unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | 1347 | unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, |
| 1348 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; | ||
| 1349 | unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, | ||
| 1234 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; | 1350 | 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; |
| 1235 | 1351 | ||
| 1352 | if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts) | ||
| 1353 | caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ | ||
| 1236 | memcpy(p, caching_pg, sizeof(caching_pg)); | 1354 | memcpy(p, caching_pg, sizeof(caching_pg)); |
| 1237 | if (1 == pcontrol) | 1355 | if (1 == pcontrol) |
| 1238 | memset(p + 2, 0, sizeof(caching_pg) - 2); | 1356 | memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg)); |
| 1357 | else if (2 == pcontrol) | ||
| 1358 | memcpy(p, d_caching_pg, sizeof(d_caching_pg)); | ||
| 1239 | return sizeof(caching_pg); | 1359 | return sizeof(caching_pg); |
| 1240 | } | 1360 | } |
| 1241 | 1361 | ||
| @@ -1350,7 +1470,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
| 1350 | unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; | 1470 | unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; |
| 1351 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 1471 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 1352 | 1472 | ||
| 1353 | if ((errsts = check_readiness(scp, 1, devip))) | 1473 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1474 | if (errsts) | ||
| 1354 | return errsts; | 1475 | return errsts; |
| 1355 | dbd = !!(cmd[1] & 0x8); | 1476 | dbd = !!(cmd[1] & 0x8); |
| 1356 | pcontrol = (cmd[2] & 0xc0) >> 6; | 1477 | pcontrol = (cmd[2] & 0xc0) >> 6; |
| @@ -1365,8 +1486,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
| 1365 | alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); | 1486 | alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); |
| 1366 | memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); | 1487 | memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); |
| 1367 | if (0x3 == pcontrol) { /* Saving values not supported */ | 1488 | if (0x3 == pcontrol) { /* Saving values not supported */ |
| 1368 | mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, | 1489 | mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); |
| 1369 | 0); | ||
| 1370 | return check_condition_result; | 1490 | return check_condition_result; |
| 1371 | } | 1491 | } |
| 1372 | target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + | 1492 | target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + |
| @@ -1422,7 +1542,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
| 1422 | 1542 | ||
| 1423 | if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { | 1543 | if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { |
| 1424 | /* TODO: Control Extension page */ | 1544 | /* TODO: Control Extension page */ |
| 1425 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 1545 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 1426 | 0); | 1546 | 0); |
| 1427 | return check_condition_result; | 1547 | return check_condition_result; |
| 1428 | } | 1548 | } |
| @@ -1449,7 +1569,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
| 1449 | break; | 1569 | break; |
| 1450 | case 0x19: /* if spc==1 then sas phy, control+discover */ | 1570 | case 0x19: /* if spc==1 then sas phy, control+discover */ |
| 1451 | if ((subpcode > 0x2) && (subpcode < 0xff)) { | 1571 | if ((subpcode > 0x2) && (subpcode < 0xff)) { |
| 1452 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1572 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1453 | INVALID_FIELD_IN_CDB, 0); | 1573 | INVALID_FIELD_IN_CDB, 0); |
| 1454 | return check_condition_result; | 1574 | return check_condition_result; |
| 1455 | } | 1575 | } |
| @@ -1482,14 +1602,14 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, | |||
| 1482 | } | 1602 | } |
| 1483 | len += resp_iec_m_pg(ap + len, pcontrol, target); | 1603 | len += resp_iec_m_pg(ap + len, pcontrol, target); |
| 1484 | } else { | 1604 | } else { |
| 1485 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1605 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1486 | INVALID_FIELD_IN_CDB, 0); | 1606 | INVALID_FIELD_IN_CDB, 0); |
| 1487 | return check_condition_result; | 1607 | return check_condition_result; |
| 1488 | } | 1608 | } |
| 1489 | offset += len; | 1609 | offset += len; |
| 1490 | break; | 1610 | break; |
| 1491 | default: | 1611 | default: |
| 1492 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 1612 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 1493 | 0); | 1613 | 0); |
| 1494 | return check_condition_result; | 1614 | return check_condition_result; |
| 1495 | } | 1615 | } |
| @@ -1512,14 +1632,15 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, | |||
| 1512 | unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; | 1632 | unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; |
| 1513 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 1633 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 1514 | 1634 | ||
| 1515 | if ((errsts = check_readiness(scp, 1, devip))) | 1635 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1636 | if (errsts) | ||
| 1516 | return errsts; | 1637 | return errsts; |
| 1517 | memset(arr, 0, sizeof(arr)); | 1638 | memset(arr, 0, sizeof(arr)); |
| 1518 | pf = cmd[1] & 0x10; | 1639 | pf = cmd[1] & 0x10; |
| 1519 | sp = cmd[1] & 0x1; | 1640 | sp = cmd[1] & 0x1; |
| 1520 | param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); | 1641 | param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); |
| 1521 | if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { | 1642 | if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { |
| 1522 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1643 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1523 | INVALID_FIELD_IN_CDB, 0); | 1644 | INVALID_FIELD_IN_CDB, 0); |
| 1524 | return check_condition_result; | 1645 | return check_condition_result; |
| 1525 | } | 1646 | } |
| @@ -1528,12 +1649,13 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, | |||
| 1528 | return (DID_ERROR << 16); | 1649 | return (DID_ERROR << 16); |
| 1529 | else if ((res < param_len) && | 1650 | else if ((res < param_len) && |
| 1530 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 1651 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) |
| 1531 | printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, " | 1652 | sdev_printk(KERN_INFO, scp->device, |
| 1532 | " IO sent=%d bytes\n", param_len, res); | 1653 | "%s: cdb indicated=%d, IO sent=%d bytes\n", |
| 1654 | __func__, param_len, res); | ||
| 1533 | md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); | 1655 | md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); |
| 1534 | bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); | 1656 | bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); |
| 1535 | if (md_len > 2) { | 1657 | if (md_len > 2) { |
| 1536 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1658 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1537 | INVALID_FIELD_IN_PARAM_LIST, 0); | 1659 | INVALID_FIELD_IN_PARAM_LIST, 0); |
| 1538 | return check_condition_result; | 1660 | return check_condition_result; |
| 1539 | } | 1661 | } |
| @@ -1541,7 +1663,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, | |||
| 1541 | mpage = arr[off] & 0x3f; | 1663 | mpage = arr[off] & 0x3f; |
| 1542 | ps = !!(arr[off] & 0x80); | 1664 | ps = !!(arr[off] & 0x80); |
| 1543 | if (ps) { | 1665 | if (ps) { |
| 1544 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1666 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1545 | INVALID_FIELD_IN_PARAM_LIST, 0); | 1667 | INVALID_FIELD_IN_PARAM_LIST, 0); |
| 1546 | return check_condition_result; | 1668 | return check_condition_result; |
| 1547 | } | 1669 | } |
| @@ -1549,32 +1671,42 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, | |||
| 1549 | pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : | 1671 | pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : |
| 1550 | (arr[off + 1] + 2); | 1672 | (arr[off + 1] + 2); |
| 1551 | if ((pg_len + off) > param_len) { | 1673 | if ((pg_len + off) > param_len) { |
| 1552 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1674 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1553 | PARAMETER_LIST_LENGTH_ERR, 0); | 1675 | PARAMETER_LIST_LENGTH_ERR, 0); |
| 1554 | return check_condition_result; | 1676 | return check_condition_result; |
| 1555 | } | 1677 | } |
| 1556 | switch (mpage) { | 1678 | switch (mpage) { |
| 1679 | case 0x8: /* Caching Mode page */ | ||
| 1680 | if (caching_pg[1] == arr[off + 1]) { | ||
| 1681 | memcpy(caching_pg + 2, arr + off + 2, | ||
| 1682 | sizeof(caching_pg) - 2); | ||
| 1683 | goto set_mode_changed_ua; | ||
| 1684 | } | ||
| 1685 | break; | ||
| 1557 | case 0xa: /* Control Mode page */ | 1686 | case 0xa: /* Control Mode page */ |
| 1558 | if (ctrl_m_pg[1] == arr[off + 1]) { | 1687 | if (ctrl_m_pg[1] == arr[off + 1]) { |
| 1559 | memcpy(ctrl_m_pg + 2, arr + off + 2, | 1688 | memcpy(ctrl_m_pg + 2, arr + off + 2, |
| 1560 | sizeof(ctrl_m_pg) - 2); | 1689 | sizeof(ctrl_m_pg) - 2); |
| 1561 | scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); | 1690 | scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); |
| 1562 | return 0; | 1691 | goto set_mode_changed_ua; |
| 1563 | } | 1692 | } |
| 1564 | break; | 1693 | break; |
| 1565 | case 0x1c: /* Informational Exceptions Mode page */ | 1694 | case 0x1c: /* Informational Exceptions Mode page */ |
| 1566 | if (iec_m_pg[1] == arr[off + 1]) { | 1695 | if (iec_m_pg[1] == arr[off + 1]) { |
| 1567 | memcpy(iec_m_pg + 2, arr + off + 2, | 1696 | memcpy(iec_m_pg + 2, arr + off + 2, |
| 1568 | sizeof(iec_m_pg) - 2); | 1697 | sizeof(iec_m_pg) - 2); |
| 1569 | return 0; | 1698 | goto set_mode_changed_ua; |
| 1570 | } | 1699 | } |
| 1571 | break; | 1700 | break; |
| 1572 | default: | 1701 | default: |
| 1573 | break; | 1702 | break; |
| 1574 | } | 1703 | } |
| 1575 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1704 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1576 | INVALID_FIELD_IN_PARAM_LIST, 0); | 1705 | INVALID_FIELD_IN_PARAM_LIST, 0); |
| 1577 | return check_condition_result; | 1706 | return check_condition_result; |
| 1707 | set_mode_changed_ua: | ||
| 1708 | set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); | ||
| 1709 | return 0; | ||
| 1578 | } | 1710 | } |
| 1579 | 1711 | ||
| 1580 | static int resp_temp_l_pg(unsigned char * arr) | 1712 | static int resp_temp_l_pg(unsigned char * arr) |
| @@ -1609,13 +1741,14 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
| 1609 | unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; | 1741 | unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; |
| 1610 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 1742 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 1611 | 1743 | ||
| 1612 | if ((errsts = check_readiness(scp, 1, devip))) | 1744 | errsts = check_readiness(scp, UAS_ONLY, devip); |
| 1745 | if (errsts) | ||
| 1613 | return errsts; | 1746 | return errsts; |
| 1614 | memset(arr, 0, sizeof(arr)); | 1747 | memset(arr, 0, sizeof(arr)); |
| 1615 | ppc = cmd[1] & 0x2; | 1748 | ppc = cmd[1] & 0x2; |
| 1616 | sp = cmd[1] & 0x1; | 1749 | sp = cmd[1] & 0x1; |
| 1617 | if (ppc || sp) { | 1750 | if (ppc || sp) { |
| 1618 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1751 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1619 | INVALID_FIELD_IN_CDB, 0); | 1752 | INVALID_FIELD_IN_CDB, 0); |
| 1620 | return check_condition_result; | 1753 | return check_condition_result; |
| 1621 | } | 1754 | } |
| @@ -1640,7 +1773,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
| 1640 | arr[3] = resp_ie_l_pg(arr + 4); | 1773 | arr[3] = resp_ie_l_pg(arr + 4); |
| 1641 | break; | 1774 | break; |
| 1642 | default: | 1775 | default: |
| 1643 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1776 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1644 | INVALID_FIELD_IN_CDB, 0); | 1777 | INVALID_FIELD_IN_CDB, 0); |
| 1645 | return check_condition_result; | 1778 | return check_condition_result; |
| 1646 | } | 1779 | } |
| @@ -1673,12 +1806,12 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
| 1673 | arr[3] = n - 4; | 1806 | arr[3] = n - 4; |
| 1674 | break; | 1807 | break; |
| 1675 | default: | 1808 | default: |
| 1676 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1809 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1677 | INVALID_FIELD_IN_CDB, 0); | 1810 | INVALID_FIELD_IN_CDB, 0); |
| 1678 | return check_condition_result; | 1811 | return check_condition_result; |
| 1679 | } | 1812 | } |
| 1680 | } else { | 1813 | } else { |
| 1681 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 1814 | mk_sense_buffer(scp, ILLEGAL_REQUEST, |
| 1682 | INVALID_FIELD_IN_CDB, 0); | 1815 | INVALID_FIELD_IN_CDB, 0); |
| 1683 | return check_condition_result; | 1816 | return check_condition_result; |
| 1684 | } | 1817 | } |
| @@ -1687,16 +1820,16 @@ static int resp_log_sense(struct scsi_cmnd * scp, | |||
| 1687 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); | 1820 | min(len, SDEBUG_MAX_INQ_ARR_SZ)); |
| 1688 | } | 1821 | } |
| 1689 | 1822 | ||
| 1690 | static int check_device_access_params(struct sdebug_dev_info *devi, | 1823 | static int check_device_access_params(struct scsi_cmnd *scp, |
| 1691 | unsigned long long lba, unsigned int num) | 1824 | unsigned long long lba, unsigned int num) |
| 1692 | { | 1825 | { |
| 1693 | if (lba + num > sdebug_capacity) { | 1826 | if (lba + num > sdebug_capacity) { |
| 1694 | mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); | 1827 | mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); |
| 1695 | return check_condition_result; | 1828 | return check_condition_result; |
| 1696 | } | 1829 | } |
| 1697 | /* transfer length excessive (tie in to block limits VPD page) */ | 1830 | /* transfer length excessive (tie in to block limits VPD page) */ |
| 1698 | if (num > sdebug_store_sectors) { | 1831 | if (num > sdebug_store_sectors) { |
| 1699 | mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); | 1832 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); |
| 1700 | return check_condition_result; | 1833 | return check_condition_result; |
| 1701 | } | 1834 | } |
| 1702 | return 0; | 1835 | return 0; |
| @@ -1704,7 +1837,6 @@ static int check_device_access_params(struct sdebug_dev_info *devi, | |||
| 1704 | 1837 | ||
| 1705 | /* Returns number of bytes copied or -1 if error. */ | 1838 | /* Returns number of bytes copied or -1 if error. */ |
| 1706 | static int do_device_access(struct scsi_cmnd *scmd, | 1839 | static int do_device_access(struct scsi_cmnd *scmd, |
| 1707 | struct sdebug_dev_info *devi, | ||
| 1708 | unsigned long long lba, unsigned int num, int write) | 1840 | unsigned long long lba, unsigned int num, int write) |
| 1709 | { | 1841 | { |
| 1710 | int ret; | 1842 | int ret; |
| @@ -1861,13 +1993,12 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, | |||
| 1861 | } | 1993 | } |
| 1862 | 1994 | ||
| 1863 | static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | 1995 | static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, |
| 1864 | unsigned int num, struct sdebug_dev_info *devip, | 1996 | unsigned int num, u32 ei_lba) |
| 1865 | u32 ei_lba) | ||
| 1866 | { | 1997 | { |
| 1867 | unsigned long iflags; | 1998 | unsigned long iflags; |
| 1868 | int ret; | 1999 | int ret; |
| 1869 | 2000 | ||
| 1870 | ret = check_device_access_params(devip, lba, num); | 2001 | ret = check_device_access_params(SCpnt, lba, num); |
| 1871 | if (ret) | 2002 | if (ret) |
| 1872 | return ret; | 2003 | return ret; |
| 1873 | 2004 | ||
| @@ -1875,16 +2006,16 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 1875 | (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && | 2006 | (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && |
| 1876 | ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { | 2007 | ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { |
| 1877 | /* claim unrecoverable read error */ | 2008 | /* claim unrecoverable read error */ |
| 1878 | mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); | 2009 | mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); |
| 1879 | /* set info field and valid bit for fixed descriptor */ | 2010 | /* set info field and valid bit for fixed descriptor */ |
| 1880 | if (0x70 == (devip->sense_buff[0] & 0x7f)) { | 2011 | if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) { |
| 1881 | devip->sense_buff[0] |= 0x80; /* Valid bit */ | 2012 | SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */ |
| 1882 | ret = (lba < OPT_MEDIUM_ERR_ADDR) | 2013 | ret = (lba < OPT_MEDIUM_ERR_ADDR) |
| 1883 | ? OPT_MEDIUM_ERR_ADDR : (int)lba; | 2014 | ? OPT_MEDIUM_ERR_ADDR : (int)lba; |
| 1884 | devip->sense_buff[3] = (ret >> 24) & 0xff; | 2015 | SCpnt->sense_buffer[3] = (ret >> 24) & 0xff; |
| 1885 | devip->sense_buff[4] = (ret >> 16) & 0xff; | 2016 | SCpnt->sense_buffer[4] = (ret >> 16) & 0xff; |
| 1886 | devip->sense_buff[5] = (ret >> 8) & 0xff; | 2017 | SCpnt->sense_buffer[5] = (ret >> 8) & 0xff; |
| 1887 | devip->sense_buff[6] = ret & 0xff; | 2018 | SCpnt->sense_buffer[6] = ret & 0xff; |
| 1888 | } | 2019 | } |
| 1889 | scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); | 2020 | scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); |
| 1890 | return check_condition_result; | 2021 | return check_condition_result; |
| @@ -1898,12 +2029,12 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 1898 | 2029 | ||
| 1899 | if (prot_ret) { | 2030 | if (prot_ret) { |
| 1900 | read_unlock_irqrestore(&atomic_rw, iflags); | 2031 | read_unlock_irqrestore(&atomic_rw, iflags); |
| 1901 | mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret); | 2032 | mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret); |
| 1902 | return illegal_condition_result; | 2033 | return illegal_condition_result; |
| 1903 | } | 2034 | } |
| 1904 | } | 2035 | } |
| 1905 | 2036 | ||
| 1906 | ret = do_device_access(SCpnt, devip, lba, num, 0); | 2037 | ret = do_device_access(SCpnt, lba, num, 0); |
| 1907 | read_unlock_irqrestore(&atomic_rw, iflags); | 2038 | read_unlock_irqrestore(&atomic_rw, iflags); |
| 1908 | if (ret == -1) | 2039 | if (ret == -1) |
| 1909 | return DID_ERROR << 16; | 2040 | return DID_ERROR << 16; |
| @@ -1915,22 +2046,23 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 1915 | 2046 | ||
| 1916 | void dump_sector(unsigned char *buf, int len) | 2047 | void dump_sector(unsigned char *buf, int len) |
| 1917 | { | 2048 | { |
| 1918 | int i, j; | 2049 | int i, j, n; |
| 1919 | |||
| 1920 | printk(KERN_ERR ">>> Sector Dump <<<\n"); | ||
| 1921 | 2050 | ||
| 2051 | pr_err(">>> Sector Dump <<<\n"); | ||
| 1922 | for (i = 0 ; i < len ; i += 16) { | 2052 | for (i = 0 ; i < len ; i += 16) { |
| 1923 | printk(KERN_ERR "%04d: ", i); | 2053 | char b[128]; |
| 1924 | 2054 | ||
| 1925 | for (j = 0 ; j < 16 ; j++) { | 2055 | for (j = 0, n = 0; j < 16; j++) { |
| 1926 | unsigned char c = buf[i+j]; | 2056 | unsigned char c = buf[i+j]; |
| 2057 | |||
| 1927 | if (c >= 0x20 && c < 0x7e) | 2058 | if (c >= 0x20 && c < 0x7e) |
| 1928 | printk(" %c ", buf[i+j]); | 2059 | n += scnprintf(b + n, sizeof(b) - n, |
| 2060 | " %c ", buf[i+j]); | ||
| 1929 | else | 2061 | else |
| 1930 | printk("%02x ", buf[i+j]); | 2062 | n += scnprintf(b + n, sizeof(b) - n, |
| 2063 | "%02x ", buf[i+j]); | ||
| 1931 | } | 2064 | } |
| 1932 | 2065 | pr_err("%04d: %s\n", i, b); | |
| 1933 | printk("\n"); | ||
| 1934 | } | 2066 | } |
| 1935 | } | 2067 | } |
| 1936 | 2068 | ||
| @@ -2092,13 +2224,12 @@ static void unmap_region(sector_t lba, unsigned int len) | |||
| 2092 | } | 2224 | } |
| 2093 | 2225 | ||
| 2094 | static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | 2226 | static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, |
| 2095 | unsigned int num, struct sdebug_dev_info *devip, | 2227 | unsigned int num, u32 ei_lba) |
| 2096 | u32 ei_lba) | ||
| 2097 | { | 2228 | { |
| 2098 | unsigned long iflags; | 2229 | unsigned long iflags; |
| 2099 | int ret; | 2230 | int ret; |
| 2100 | 2231 | ||
| 2101 | ret = check_device_access_params(devip, lba, num); | 2232 | ret = check_device_access_params(SCpnt, lba, num); |
| 2102 | if (ret) | 2233 | if (ret) |
| 2103 | return ret; | 2234 | return ret; |
| 2104 | 2235 | ||
| @@ -2110,12 +2241,13 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 2110 | 2241 | ||
| 2111 | if (prot_ret) { | 2242 | if (prot_ret) { |
| 2112 | write_unlock_irqrestore(&atomic_rw, iflags); | 2243 | write_unlock_irqrestore(&atomic_rw, iflags); |
| 2113 | mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret); | 2244 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, |
| 2245 | prot_ret); | ||
| 2114 | return illegal_condition_result; | 2246 | return illegal_condition_result; |
| 2115 | } | 2247 | } |
| 2116 | } | 2248 | } |
| 2117 | 2249 | ||
| 2118 | ret = do_device_access(SCpnt, devip, lba, num, 1); | 2250 | ret = do_device_access(SCpnt, lba, num, 1); |
| 2119 | if (scsi_debug_lbp()) | 2251 | if (scsi_debug_lbp()) |
| 2120 | map_region(lba, num); | 2252 | map_region(lba, num); |
| 2121 | write_unlock_irqrestore(&atomic_rw, iflags); | 2253 | write_unlock_irqrestore(&atomic_rw, iflags); |
| @@ -2123,26 +2255,26 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
| 2123 | return (DID_ERROR << 16); | 2255 | return (DID_ERROR << 16); |
| 2124 | else if ((ret < (num * scsi_debug_sector_size)) && | 2256 | else if ((ret < (num * scsi_debug_sector_size)) && |
| 2125 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 2257 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) |
| 2126 | printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " | 2258 | sdev_printk(KERN_INFO, SCpnt->device, |
| 2127 | " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); | 2259 | "%s: write: cdb indicated=%u, IO sent=%d bytes\n", |
| 2260 | my_name, num * scsi_debug_sector_size, ret); | ||
| 2128 | 2261 | ||
| 2129 | return 0; | 2262 | return 0; |
| 2130 | } | 2263 | } |
| 2131 | 2264 | ||
| 2132 | static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, | 2265 | static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, |
| 2133 | unsigned int num, struct sdebug_dev_info *devip, | 2266 | unsigned int num, u32 ei_lba, unsigned int unmap) |
| 2134 | u32 ei_lba, unsigned int unmap) | ||
| 2135 | { | 2267 | { |
| 2136 | unsigned long iflags; | 2268 | unsigned long iflags; |
| 2137 | unsigned long long i; | 2269 | unsigned long long i; |
| 2138 | int ret; | 2270 | int ret; |
| 2139 | 2271 | ||
| 2140 | ret = check_device_access_params(devip, lba, num); | 2272 | ret = check_device_access_params(scmd, lba, num); |
| 2141 | if (ret) | 2273 | if (ret) |
| 2142 | return ret; | 2274 | return ret; |
| 2143 | 2275 | ||
| 2144 | if (num > scsi_debug_write_same_length) { | 2276 | if (num > scsi_debug_write_same_length) { |
| 2145 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 2277 | mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 2146 | 0); | 2278 | 0); |
| 2147 | return check_condition_result; | 2279 | return check_condition_result; |
| 2148 | } | 2280 | } |
| @@ -2164,8 +2296,10 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, | |||
| 2164 | return (DID_ERROR << 16); | 2296 | return (DID_ERROR << 16); |
| 2165 | } else if ((ret < (num * scsi_debug_sector_size)) && | 2297 | } else if ((ret < (num * scsi_debug_sector_size)) && |
| 2166 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | 2298 | (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) |
| 2167 | printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, " | 2299 | sdev_printk(KERN_INFO, scmd->device, |
| 2168 | " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); | 2300 | "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", |
| 2301 | my_name, "write same", | ||
| 2302 | num * scsi_debug_sector_size, ret); | ||
| 2169 | 2303 | ||
| 2170 | /* Copy first sector to remaining blocks */ | 2304 | /* Copy first sector to remaining blocks */ |
| 2171 | for (i = 1 ; i < num ; i++) | 2305 | for (i = 1 ; i < num ; i++) |
| @@ -2195,7 +2329,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) | |||
| 2195 | int ret; | 2329 | int ret; |
| 2196 | unsigned long iflags; | 2330 | unsigned long iflags; |
| 2197 | 2331 | ||
| 2198 | ret = check_readiness(scmd, 1, devip); | 2332 | ret = check_readiness(scmd, UAS_ONLY, devip); |
| 2199 | if (ret) | 2333 | if (ret) |
| 2200 | return ret; | 2334 | return ret; |
| 2201 | 2335 | ||
| @@ -2221,7 +2355,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) | |||
| 2221 | unsigned long long lba = get_unaligned_be64(&desc[i].lba); | 2355 | unsigned long long lba = get_unaligned_be64(&desc[i].lba); |
| 2222 | unsigned int num = get_unaligned_be32(&desc[i].blocks); | 2356 | unsigned int num = get_unaligned_be32(&desc[i].blocks); |
| 2223 | 2357 | ||
| 2224 | ret = check_device_access_params(devip, lba, num); | 2358 | ret = check_device_access_params(scmd, lba, num); |
| 2225 | if (ret) | 2359 | if (ret) |
| 2226 | goto out; | 2360 | goto out; |
| 2227 | 2361 | ||
| @@ -2247,7 +2381,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd, | |||
| 2247 | unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; | 2381 | unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; |
| 2248 | int ret; | 2382 | int ret; |
| 2249 | 2383 | ||
| 2250 | ret = check_readiness(scmd, 1, devip); | 2384 | ret = check_readiness(scmd, UAS_ONLY, devip); |
| 2251 | if (ret) | 2385 | if (ret) |
| 2252 | return ret; | 2386 | return ret; |
| 2253 | 2387 | ||
| @@ -2257,7 +2391,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd, | |||
| 2257 | if (alloc_len < 24) | 2391 | if (alloc_len < 24) |
| 2258 | return 0; | 2392 | return 0; |
| 2259 | 2393 | ||
| 2260 | ret = check_device_access_params(devip, lba, 1); | 2394 | ret = check_device_access_params(scmd, lba, 1); |
| 2261 | if (ret) | 2395 | if (ret) |
| 2262 | return ret; | 2396 | return ret; |
| 2263 | 2397 | ||
| @@ -2278,7 +2412,8 @@ static int resp_report_luns(struct scsi_cmnd * scp, | |||
| 2278 | struct sdebug_dev_info * devip) | 2412 | struct sdebug_dev_info * devip) |
| 2279 | { | 2413 | { |
| 2280 | unsigned int alloc_len; | 2414 | unsigned int alloc_len; |
| 2281 | int lun_cnt, i, upper, num, n, wlun, lun; | 2415 | int lun_cnt, i, upper, num, n; |
| 2416 | u64 wlun, lun; | ||
| 2282 | unsigned char *cmd = (unsigned char *)scp->cmnd; | 2417 | unsigned char *cmd = (unsigned char *)scp->cmnd; |
| 2283 | int select_report = (int)cmd[2]; | 2418 | int select_report = (int)cmd[2]; |
| 2284 | struct scsi_lun *one_lun; | 2419 | struct scsi_lun *one_lun; |
| @@ -2287,7 +2422,7 @@ static int resp_report_luns(struct scsi_cmnd * scp, | |||
| 2287 | 2422 | ||
| 2288 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); | 2423 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); |
| 2289 | if ((alloc_len < 4) || (select_report > 2)) { | 2424 | if ((alloc_len < 4) || (select_report > 2)) { |
| 2290 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, | 2425 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, |
| 2291 | 0); | 2426 | 0); |
| 2292 | return check_condition_result; | 2427 | return check_condition_result; |
| 2293 | } | 2428 | } |
| @@ -2341,7 +2476,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | |||
| 2341 | /* better not to use temporary buffer. */ | 2476 | /* better not to use temporary buffer. */ |
| 2342 | buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); | 2477 | buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); |
| 2343 | if (!buf) { | 2478 | if (!buf) { |
| 2344 | mk_sense_buffer(devip, NOT_READY, | 2479 | mk_sense_buffer(scp, NOT_READY, |
| 2345 | LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 2480 | LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
| 2346 | return check_condition_result; | 2481 | return check_condition_result; |
| 2347 | } | 2482 | } |
| @@ -2365,34 +2500,125 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, | |||
| 2365 | return 0; | 2500 | return 0; |
| 2366 | } | 2501 | } |
| 2367 | 2502 | ||
| 2368 | /* When timer goes off this function is called. */ | 2503 | /* When timer or tasklet goes off this function is called. */ |
| 2369 | static void timer_intr_handler(unsigned long indx) | 2504 | static void sdebug_q_cmd_complete(unsigned long indx) |
| 2370 | { | 2505 | { |
| 2371 | struct sdebug_queued_cmd * sqcp; | 2506 | int qa_indx; |
| 2507 | int retiring = 0; | ||
| 2372 | unsigned long iflags; | 2508 | unsigned long iflags; |
| 2509 | struct sdebug_queued_cmd *sqcp; | ||
| 2510 | struct scsi_cmnd *scp; | ||
| 2511 | struct sdebug_dev_info *devip; | ||
| 2373 | 2512 | ||
| 2374 | if (indx >= scsi_debug_max_queue) { | 2513 | atomic_inc(&sdebug_completions); |
| 2375 | printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too " | 2514 | qa_indx = indx; |
| 2376 | "large\n"); | 2515 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { |
| 2516 | pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); | ||
| 2377 | return; | 2517 | return; |
| 2378 | } | 2518 | } |
| 2379 | spin_lock_irqsave(&queued_arr_lock, iflags); | 2519 | spin_lock_irqsave(&queued_arr_lock, iflags); |
| 2380 | sqcp = &queued_arr[(int)indx]; | 2520 | sqcp = &queued_arr[qa_indx]; |
| 2381 | if (! sqcp->in_use) { | 2521 | scp = sqcp->a_cmnd; |
| 2382 | printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected " | 2522 | if (NULL == scp) { |
| 2383 | "interrupt\n"); | 2523 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
| 2524 | pr_err("%s: scp is NULL\n", __func__); | ||
| 2525 | return; | ||
| 2526 | } | ||
| 2527 | devip = (struct sdebug_dev_info *)scp->device->hostdata; | ||
| 2528 | if (devip) | ||
| 2529 | atomic_dec(&devip->num_in_q); | ||
| 2530 | else | ||
| 2531 | pr_err("%s: devip=NULL\n", __func__); | ||
| 2532 | if (atomic_read(&retired_max_queue) > 0) | ||
| 2533 | retiring = 1; | ||
| 2534 | |||
| 2535 | sqcp->a_cmnd = NULL; | ||
| 2536 | if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { | ||
| 2384 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 2537 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
| 2538 | pr_err("%s: Unexpected completion\n", __func__); | ||
| 2385 | return; | 2539 | return; |
| 2386 | } | 2540 | } |
| 2387 | sqcp->in_use = 0; | 2541 | |
| 2388 | if (sqcp->done_funct) { | 2542 | if (unlikely(retiring)) { /* user has reduced max_queue */ |
| 2389 | sqcp->a_cmnd->result = sqcp->scsi_result; | 2543 | int k, retval; |
| 2390 | sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */ | 2544 | |
| 2545 | retval = atomic_read(&retired_max_queue); | ||
| 2546 | if (qa_indx >= retval) { | ||
| 2547 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2548 | pr_err("%s: index %d too large\n", __func__, retval); | ||
| 2549 | return; | ||
| 2550 | } | ||
| 2551 | k = find_last_bit(queued_in_use_bm, retval); | ||
| 2552 | if ((k < scsi_debug_max_queue) || (k == retval)) | ||
| 2553 | atomic_set(&retired_max_queue, 0); | ||
| 2554 | else | ||
| 2555 | atomic_set(&retired_max_queue, k + 1); | ||
| 2391 | } | 2556 | } |
| 2392 | sqcp->done_funct = NULL; | ||
| 2393 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 2557 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
| 2558 | scp->scsi_done(scp); /* callback to mid level */ | ||
| 2394 | } | 2559 | } |
| 2395 | 2560 | ||
| 2561 | /* When high resolution timer goes off this function is called. */ | ||
| 2562 | static enum hrtimer_restart | ||
| 2563 | sdebug_q_cmd_hrt_complete(struct hrtimer *timer) | ||
| 2564 | { | ||
| 2565 | int qa_indx; | ||
| 2566 | int retiring = 0; | ||
| 2567 | unsigned long iflags; | ||
| 2568 | struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer; | ||
| 2569 | struct sdebug_queued_cmd *sqcp; | ||
| 2570 | struct scsi_cmnd *scp; | ||
| 2571 | struct sdebug_dev_info *devip; | ||
| 2572 | |||
| 2573 | atomic_inc(&sdebug_completions); | ||
| 2574 | qa_indx = sd_hrtp->qa_indx; | ||
| 2575 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { | ||
| 2576 | pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); | ||
| 2577 | goto the_end; | ||
| 2578 | } | ||
| 2579 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 2580 | sqcp = &queued_arr[qa_indx]; | ||
| 2581 | scp = sqcp->a_cmnd; | ||
| 2582 | if (NULL == scp) { | ||
| 2583 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2584 | pr_err("%s: scp is NULL\n", __func__); | ||
| 2585 | goto the_end; | ||
| 2586 | } | ||
| 2587 | devip = (struct sdebug_dev_info *)scp->device->hostdata; | ||
| 2588 | if (devip) | ||
| 2589 | atomic_dec(&devip->num_in_q); | ||
| 2590 | else | ||
| 2591 | pr_err("%s: devip=NULL\n", __func__); | ||
| 2592 | if (atomic_read(&retired_max_queue) > 0) | ||
| 2593 | retiring = 1; | ||
| 2594 | |||
| 2595 | sqcp->a_cmnd = NULL; | ||
| 2596 | if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { | ||
| 2597 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2598 | pr_err("%s: Unexpected completion\n", __func__); | ||
| 2599 | goto the_end; | ||
| 2600 | } | ||
| 2601 | |||
| 2602 | if (unlikely(retiring)) { /* user has reduced max_queue */ | ||
| 2603 | int k, retval; | ||
| 2604 | |||
| 2605 | retval = atomic_read(&retired_max_queue); | ||
| 2606 | if (qa_indx >= retval) { | ||
| 2607 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2608 | pr_err("%s: index %d too large\n", __func__, retval); | ||
| 2609 | goto the_end; | ||
| 2610 | } | ||
| 2611 | k = find_last_bit(queued_in_use_bm, retval); | ||
| 2612 | if ((k < scsi_debug_max_queue) || (k == retval)) | ||
| 2613 | atomic_set(&retired_max_queue, 0); | ||
| 2614 | else | ||
| 2615 | atomic_set(&retired_max_queue, k + 1); | ||
| 2616 | } | ||
| 2617 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2618 | scp->scsi_done(scp); /* callback to mid level */ | ||
| 2619 | the_end: | ||
| 2620 | return HRTIMER_NORESTART; | ||
| 2621 | } | ||
| 2396 | 2622 | ||
| 2397 | static struct sdebug_dev_info * | 2623 | static struct sdebug_dev_info * |
| 2398 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) | 2624 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) |
| @@ -2418,7 +2644,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | |||
| 2418 | return devip; | 2644 | return devip; |
| 2419 | sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); | 2645 | sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); |
| 2420 | if (!sdbg_host) { | 2646 | if (!sdbg_host) { |
| 2421 | printk(KERN_ERR "Host info NULL\n"); | 2647 | pr_err("%s: Host info NULL\n", __func__); |
| 2422 | return NULL; | 2648 | return NULL; |
| 2423 | } | 2649 | } |
| 2424 | list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { | 2650 | list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { |
| @@ -2444,15 +2670,9 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | |||
| 2444 | open_devip->target = sdev->id; | 2670 | open_devip->target = sdev->id; |
| 2445 | open_devip->lun = sdev->lun; | 2671 | open_devip->lun = sdev->lun; |
| 2446 | open_devip->sdbg_host = sdbg_host; | 2672 | open_devip->sdbg_host = sdbg_host; |
| 2447 | open_devip->reset = 1; | 2673 | atomic_set(&open_devip->num_in_q, 0); |
| 2674 | set_bit(SDEBUG_UA_POR, open_devip->uas_bm); | ||
| 2448 | open_devip->used = 1; | 2675 | open_devip->used = 1; |
| 2449 | memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN); | ||
| 2450 | if (scsi_debug_dsense) | ||
| 2451 | open_devip->sense_buff[0] = 0x72; | ||
| 2452 | else { | ||
| 2453 | open_devip->sense_buff[0] = 0x70; | ||
| 2454 | open_devip->sense_buff[7] = 0xa; | ||
| 2455 | } | ||
| 2456 | if (sdev->lun == SAM2_WLUN_REPORT_LUNS) | 2676 | if (sdev->lun == SAM2_WLUN_REPORT_LUNS) |
| 2457 | open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; | 2677 | open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; |
| 2458 | 2678 | ||
| @@ -2462,7 +2682,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) | |||
| 2462 | static int scsi_debug_slave_alloc(struct scsi_device *sdp) | 2682 | static int scsi_debug_slave_alloc(struct scsi_device *sdp) |
| 2463 | { | 2683 | { |
| 2464 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 2684 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 2465 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", | 2685 | printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n", |
| 2466 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 2686 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
| 2467 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); | 2687 | queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); |
| 2468 | return 0; | 2688 | return 0; |
| @@ -2473,7 +2693,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) | |||
| 2473 | struct sdebug_dev_info *devip; | 2693 | struct sdebug_dev_info *devip; |
| 2474 | 2694 | ||
| 2475 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 2695 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 2476 | printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n", | 2696 | printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n", |
| 2477 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 2697 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
| 2478 | if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) | 2698 | if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) |
| 2479 | sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; | 2699 | sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; |
| @@ -2481,10 +2701,11 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) | |||
| 2481 | if (NULL == devip) | 2701 | if (NULL == devip) |
| 2482 | return 1; /* no resources, will be marked offline */ | 2702 | return 1; /* no resources, will be marked offline */ |
| 2483 | sdp->hostdata = devip; | 2703 | sdp->hostdata = devip; |
| 2704 | sdp->tagged_supported = 1; | ||
| 2484 | if (sdp->host->cmd_per_lun) | 2705 | if (sdp->host->cmd_per_lun) |
| 2485 | scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, | 2706 | scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING, |
| 2486 | sdp->host->cmd_per_lun); | 2707 | DEF_CMD_PER_LUN); |
| 2487 | blk_queue_max_segment_size(sdp->request_queue, 256 * 1024); | 2708 | blk_queue_max_segment_size(sdp->request_queue, -1U); |
| 2488 | if (scsi_debug_no_uld) | 2709 | if (scsi_debug_no_uld) |
| 2489 | sdp->no_uld_attach = 1; | 2710 | sdp->no_uld_attach = 1; |
| 2490 | return 0; | 2711 | return 0; |
| @@ -2496,7 +2717,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) | |||
| 2496 | (struct sdebug_dev_info *)sdp->hostdata; | 2717 | (struct sdebug_dev_info *)sdp->hostdata; |
| 2497 | 2718 | ||
| 2498 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 2719 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 2499 | printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n", | 2720 | printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n", |
| 2500 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); | 2721 | sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); |
| 2501 | if (devip) { | 2722 | if (devip) { |
| 2502 | /* make this slot available for re-use */ | 2723 | /* make this slot available for re-use */ |
| @@ -2505,150 +2726,230 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) | |||
| 2505 | } | 2726 | } |
| 2506 | } | 2727 | } |
| 2507 | 2728 | ||
| 2508 | /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */ | 2729 | /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ |
| 2509 | static int stop_queued_cmnd(struct scsi_cmnd *cmnd) | 2730 | static int stop_queued_cmnd(struct scsi_cmnd *cmnd) |
| 2510 | { | 2731 | { |
| 2511 | unsigned long iflags; | 2732 | unsigned long iflags; |
| 2512 | int k; | 2733 | int k, qmax, r_qmax; |
| 2513 | struct sdebug_queued_cmd *sqcp; | 2734 | struct sdebug_queued_cmd *sqcp; |
| 2735 | struct sdebug_dev_info *devip; | ||
| 2514 | 2736 | ||
| 2515 | spin_lock_irqsave(&queued_arr_lock, iflags); | 2737 | spin_lock_irqsave(&queued_arr_lock, iflags); |
| 2516 | for (k = 0; k < scsi_debug_max_queue; ++k) { | 2738 | qmax = scsi_debug_max_queue; |
| 2517 | sqcp = &queued_arr[k]; | 2739 | r_qmax = atomic_read(&retired_max_queue); |
| 2518 | if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) { | 2740 | if (r_qmax > qmax) |
| 2519 | del_timer_sync(&sqcp->cmnd_timer); | 2741 | qmax = r_qmax; |
| 2520 | sqcp->in_use = 0; | 2742 | for (k = 0; k < qmax; ++k) { |
| 2521 | sqcp->a_cmnd = NULL; | 2743 | if (test_bit(k, queued_in_use_bm)) { |
| 2522 | break; | 2744 | sqcp = &queued_arr[k]; |
| 2745 | if (cmnd == sqcp->a_cmnd) { | ||
| 2746 | if (scsi_debug_ndelay > 0) { | ||
| 2747 | if (sqcp->sd_hrtp) | ||
| 2748 | hrtimer_cancel( | ||
| 2749 | &sqcp->sd_hrtp->hrt); | ||
| 2750 | } else if (scsi_debug_delay > 0) { | ||
| 2751 | if (sqcp->cmnd_timerp) | ||
| 2752 | del_timer_sync( | ||
| 2753 | sqcp->cmnd_timerp); | ||
| 2754 | } else if (scsi_debug_delay < 0) { | ||
| 2755 | if (sqcp->tletp) | ||
| 2756 | tasklet_kill(sqcp->tletp); | ||
| 2757 | } | ||
| 2758 | __clear_bit(k, queued_in_use_bm); | ||
| 2759 | devip = (struct sdebug_dev_info *) | ||
| 2760 | cmnd->device->hostdata; | ||
| 2761 | if (devip) | ||
| 2762 | atomic_dec(&devip->num_in_q); | ||
| 2763 | sqcp->a_cmnd = NULL; | ||
| 2764 | break; | ||
| 2765 | } | ||
| 2523 | } | 2766 | } |
| 2524 | } | 2767 | } |
| 2525 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 2768 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
| 2526 | return (k < scsi_debug_max_queue) ? 1 : 0; | 2769 | return (k < qmax) ? 1 : 0; |
| 2527 | } | 2770 | } |
| 2528 | 2771 | ||
| 2529 | /* Deletes (stops) timers of all queued commands */ | 2772 | /* Deletes (stops) timers or tasklets of all queued commands */ |
| 2530 | static void stop_all_queued(void) | 2773 | static void stop_all_queued(void) |
| 2531 | { | 2774 | { |
| 2532 | unsigned long iflags; | 2775 | unsigned long iflags; |
| 2533 | int k; | 2776 | int k; |
| 2534 | struct sdebug_queued_cmd *sqcp; | 2777 | struct sdebug_queued_cmd *sqcp; |
| 2778 | struct sdebug_dev_info *devip; | ||
| 2535 | 2779 | ||
| 2536 | spin_lock_irqsave(&queued_arr_lock, iflags); | 2780 | spin_lock_irqsave(&queued_arr_lock, iflags); |
| 2537 | for (k = 0; k < scsi_debug_max_queue; ++k) { | 2781 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { |
| 2538 | sqcp = &queued_arr[k]; | 2782 | if (test_bit(k, queued_in_use_bm)) { |
| 2539 | if (sqcp->in_use && sqcp->a_cmnd) { | 2783 | sqcp = &queued_arr[k]; |
| 2540 | del_timer_sync(&sqcp->cmnd_timer); | 2784 | if (sqcp->a_cmnd) { |
| 2541 | sqcp->in_use = 0; | 2785 | if (scsi_debug_ndelay > 0) { |
| 2542 | sqcp->a_cmnd = NULL; | 2786 | if (sqcp->sd_hrtp) |
| 2787 | hrtimer_cancel( | ||
| 2788 | &sqcp->sd_hrtp->hrt); | ||
| 2789 | } else if (scsi_debug_delay > 0) { | ||
| 2790 | if (sqcp->cmnd_timerp) | ||
| 2791 | del_timer_sync( | ||
| 2792 | sqcp->cmnd_timerp); | ||
| 2793 | } else if (scsi_debug_delay < 0) { | ||
| 2794 | if (sqcp->tletp) | ||
| 2795 | tasklet_kill(sqcp->tletp); | ||
| 2796 | } | ||
| 2797 | __clear_bit(k, queued_in_use_bm); | ||
| 2798 | devip = (struct sdebug_dev_info *) | ||
| 2799 | sqcp->a_cmnd->device->hostdata; | ||
| 2800 | if (devip) | ||
| 2801 | atomic_dec(&devip->num_in_q); | ||
| 2802 | sqcp->a_cmnd = NULL; | ||
| 2803 | } | ||
| 2543 | } | 2804 | } |
| 2544 | } | 2805 | } |
| 2545 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 2806 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
| 2546 | } | 2807 | } |
| 2547 | 2808 | ||
| 2548 | static int scsi_debug_abort(struct scsi_cmnd * SCpnt) | 2809 | /* Free queued command memory on heap */ |
| 2810 | static void free_all_queued(void) | ||
| 2549 | { | 2811 | { |
| 2550 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 2812 | unsigned long iflags; |
| 2551 | printk(KERN_INFO "scsi_debug: abort\n"); | 2813 | int k; |
| 2552 | ++num_aborts; | 2814 | struct sdebug_queued_cmd *sqcp; |
| 2553 | stop_queued_cmnd(SCpnt); | 2815 | |
| 2554 | return SUCCESS; | 2816 | spin_lock_irqsave(&queued_arr_lock, iflags); |
| 2817 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | ||
| 2818 | sqcp = &queued_arr[k]; | ||
| 2819 | kfree(sqcp->cmnd_timerp); | ||
| 2820 | sqcp->cmnd_timerp = NULL; | ||
| 2821 | kfree(sqcp->tletp); | ||
| 2822 | sqcp->tletp = NULL; | ||
| 2823 | kfree(sqcp->sd_hrtp); | ||
| 2824 | sqcp->sd_hrtp = NULL; | ||
| 2825 | } | ||
| 2826 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2555 | } | 2827 | } |
| 2556 | 2828 | ||
| 2557 | static int scsi_debug_biosparam(struct scsi_device *sdev, | 2829 | static int scsi_debug_abort(struct scsi_cmnd *SCpnt) |
| 2558 | struct block_device * bdev, sector_t capacity, int *info) | ||
| 2559 | { | 2830 | { |
| 2560 | int res; | 2831 | ++num_aborts; |
| 2561 | unsigned char *buf; | 2832 | if (SCpnt) { |
| 2562 | 2833 | if (SCpnt->device && | |
| 2563 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 2834 | (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) |
| 2564 | printk(KERN_INFO "scsi_debug: biosparam\n"); | 2835 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", |
| 2565 | buf = scsi_bios_ptable(bdev); | 2836 | __func__); |
| 2566 | if (buf) { | 2837 | stop_queued_cmnd(SCpnt); |
| 2567 | res = scsi_partsize(buf, capacity, | 2838 | } |
| 2568 | &info[2], &info[0], &info[1]); | 2839 | return SUCCESS; |
| 2569 | kfree(buf); | ||
| 2570 | if (! res) | ||
| 2571 | return res; | ||
| 2572 | } | ||
| 2573 | info[0] = sdebug_heads; | ||
| 2574 | info[1] = sdebug_sectors_per; | ||
| 2575 | info[2] = sdebug_cylinders_per; | ||
| 2576 | return 0; | ||
| 2577 | } | 2840 | } |
| 2578 | 2841 | ||
| 2579 | static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) | 2842 | static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) |
| 2580 | { | 2843 | { |
| 2581 | struct sdebug_dev_info * devip; | 2844 | struct sdebug_dev_info * devip; |
| 2582 | 2845 | ||
| 2583 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
| 2584 | printk(KERN_INFO "scsi_debug: device_reset\n"); | ||
| 2585 | ++num_dev_resets; | 2846 | ++num_dev_resets; |
| 2586 | if (SCpnt) { | 2847 | if (SCpnt && SCpnt->device) { |
| 2587 | devip = devInfoReg(SCpnt->device); | 2848 | struct scsi_device *sdp = SCpnt->device; |
| 2849 | |||
| 2850 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | ||
| 2851 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | ||
| 2852 | devip = devInfoReg(sdp); | ||
| 2588 | if (devip) | 2853 | if (devip) |
| 2589 | devip->reset = 1; | 2854 | set_bit(SDEBUG_UA_POR, devip->uas_bm); |
| 2855 | } | ||
| 2856 | return SUCCESS; | ||
| 2857 | } | ||
| 2858 | |||
| 2859 | static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) | ||
| 2860 | { | ||
| 2861 | struct sdebug_host_info *sdbg_host; | ||
| 2862 | struct sdebug_dev_info *devip; | ||
| 2863 | struct scsi_device *sdp; | ||
| 2864 | struct Scsi_Host *hp; | ||
| 2865 | int k = 0; | ||
| 2866 | |||
| 2867 | ++num_target_resets; | ||
| 2868 | if (!SCpnt) | ||
| 2869 | goto lie; | ||
| 2870 | sdp = SCpnt->device; | ||
| 2871 | if (!sdp) | ||
| 2872 | goto lie; | ||
| 2873 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | ||
| 2874 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | ||
| 2875 | hp = sdp->host; | ||
| 2876 | if (!hp) | ||
| 2877 | goto lie; | ||
| 2878 | sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); | ||
| 2879 | if (sdbg_host) { | ||
| 2880 | list_for_each_entry(devip, | ||
| 2881 | &sdbg_host->dev_info_list, | ||
| 2882 | dev_list) | ||
| 2883 | if (devip->target == sdp->id) { | ||
| 2884 | set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); | ||
| 2885 | ++k; | ||
| 2886 | } | ||
| 2590 | } | 2887 | } |
| 2888 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | ||
| 2889 | sdev_printk(KERN_INFO, sdp, | ||
| 2890 | "%s: %d device(s) found in target\n", __func__, k); | ||
| 2891 | lie: | ||
| 2591 | return SUCCESS; | 2892 | return SUCCESS; |
| 2592 | } | 2893 | } |
| 2593 | 2894 | ||
| 2594 | static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) | 2895 | static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) |
| 2595 | { | 2896 | { |
| 2596 | struct sdebug_host_info *sdbg_host; | 2897 | struct sdebug_host_info *sdbg_host; |
| 2597 | struct sdebug_dev_info * dev_info; | 2898 | struct sdebug_dev_info *devip; |
| 2598 | struct scsi_device * sdp; | 2899 | struct scsi_device * sdp; |
| 2599 | struct Scsi_Host * hp; | 2900 | struct Scsi_Host * hp; |
| 2901 | int k = 0; | ||
| 2600 | 2902 | ||
| 2601 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
| 2602 | printk(KERN_INFO "scsi_debug: bus_reset\n"); | ||
| 2603 | ++num_bus_resets; | 2903 | ++num_bus_resets; |
| 2604 | if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) { | 2904 | if (!(SCpnt && SCpnt->device)) |
| 2905 | goto lie; | ||
| 2906 | sdp = SCpnt->device; | ||
| 2907 | if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) | ||
| 2908 | sdev_printk(KERN_INFO, sdp, "%s\n", __func__); | ||
| 2909 | hp = sdp->host; | ||
| 2910 | if (hp) { | ||
| 2605 | sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); | 2911 | sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); |
| 2606 | if (sdbg_host) { | 2912 | if (sdbg_host) { |
| 2607 | list_for_each_entry(dev_info, | 2913 | list_for_each_entry(devip, |
| 2608 | &sdbg_host->dev_info_list, | 2914 | &sdbg_host->dev_info_list, |
| 2609 | dev_list) | 2915 | dev_list) { |
| 2610 | dev_info->reset = 1; | 2916 | set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); |
| 2917 | ++k; | ||
| 2918 | } | ||
| 2611 | } | 2919 | } |
| 2612 | } | 2920 | } |
| 2921 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | ||
| 2922 | sdev_printk(KERN_INFO, sdp, | ||
| 2923 | "%s: %d device(s) found in host\n", __func__, k); | ||
| 2924 | lie: | ||
| 2613 | return SUCCESS; | 2925 | return SUCCESS; |
| 2614 | } | 2926 | } |
| 2615 | 2927 | ||
| 2616 | static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) | 2928 | static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) |
| 2617 | { | 2929 | { |
| 2618 | struct sdebug_host_info * sdbg_host; | 2930 | struct sdebug_host_info * sdbg_host; |
| 2619 | struct sdebug_dev_info * dev_info; | 2931 | struct sdebug_dev_info *devip; |
| 2932 | int k = 0; | ||
| 2620 | 2933 | ||
| 2621 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
| 2622 | printk(KERN_INFO "scsi_debug: host_reset\n"); | ||
| 2623 | ++num_host_resets; | 2934 | ++num_host_resets; |
| 2935 | if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) | ||
| 2936 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); | ||
| 2624 | spin_lock(&sdebug_host_list_lock); | 2937 | spin_lock(&sdebug_host_list_lock); |
| 2625 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { | 2938 | list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { |
| 2626 | list_for_each_entry(dev_info, &sdbg_host->dev_info_list, | 2939 | list_for_each_entry(devip, &sdbg_host->dev_info_list, |
| 2627 | dev_list) | 2940 | dev_list) { |
| 2628 | dev_info->reset = 1; | 2941 | set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); |
| 2942 | ++k; | ||
| 2943 | } | ||
| 2629 | } | 2944 | } |
| 2630 | spin_unlock(&sdebug_host_list_lock); | 2945 | spin_unlock(&sdebug_host_list_lock); |
| 2631 | stop_all_queued(); | 2946 | stop_all_queued(); |
| 2947 | if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) | ||
| 2948 | sdev_printk(KERN_INFO, SCpnt->device, | ||
| 2949 | "%s: %d device(s) found\n", __func__, k); | ||
| 2632 | return SUCCESS; | 2950 | return SUCCESS; |
| 2633 | } | 2951 | } |
| 2634 | 2952 | ||
| 2635 | /* Initializes timers in queued array */ | ||
| 2636 | static void __init init_all_queued(void) | ||
| 2637 | { | ||
| 2638 | unsigned long iflags; | ||
| 2639 | int k; | ||
| 2640 | struct sdebug_queued_cmd * sqcp; | ||
| 2641 | |||
| 2642 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 2643 | for (k = 0; k < scsi_debug_max_queue; ++k) { | ||
| 2644 | sqcp = &queued_arr[k]; | ||
| 2645 | init_timer(&sqcp->cmnd_timer); | ||
| 2646 | sqcp->in_use = 0; | ||
| 2647 | sqcp->a_cmnd = NULL; | ||
| 2648 | } | ||
| 2649 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2650 | } | ||
| 2651 | |||
| 2652 | static void __init sdebug_build_parts(unsigned char *ramp, | 2953 | static void __init sdebug_build_parts(unsigned char *ramp, |
| 2653 | unsigned long store_size) | 2954 | unsigned long store_size) |
| 2654 | { | 2955 | { |
| @@ -2662,8 +2963,8 @@ static void __init sdebug_build_parts(unsigned char *ramp, | |||
| 2662 | return; | 2963 | return; |
| 2663 | if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { | 2964 | if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { |
| 2664 | scsi_debug_num_parts = SDEBUG_MAX_PARTS; | 2965 | scsi_debug_num_parts = SDEBUG_MAX_PARTS; |
| 2665 | printk(KERN_WARNING "scsi_debug:build_parts: reducing " | 2966 | pr_warn("%s: reducing partitions to %d\n", __func__, |
| 2666 | "partitions to %d\n", SDEBUG_MAX_PARTS); | 2967 | SDEBUG_MAX_PARTS); |
| 2667 | } | 2968 | } |
| 2668 | num_sectors = (int)sdebug_store_sectors; | 2969 | num_sectors = (int)sdebug_store_sectors; |
| 2669 | sectors_per_part = (num_sectors - sdebug_sectors_per) | 2970 | sectors_per_part = (num_sectors - sdebug_sectors_per) |
| @@ -2700,62 +3001,130 @@ static void __init sdebug_build_parts(unsigned char *ramp, | |||
| 2700 | } | 3001 | } |
| 2701 | } | 3002 | } |
| 2702 | 3003 | ||
| 2703 | static int schedule_resp(struct scsi_cmnd * cmnd, | 3004 | static int |
| 2704 | struct sdebug_dev_info * devip, | 3005 | schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, |
| 2705 | done_funct_t done, int scsi_result, int delta_jiff) | 3006 | int scsi_result, int delta_jiff) |
| 2706 | { | 3007 | { |
| 2707 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) { | 3008 | unsigned long iflags; |
| 2708 | if (scsi_result) { | 3009 | int k, num_in_q, tsf, qdepth, inject; |
| 2709 | struct scsi_device * sdp = cmnd->device; | 3010 | struct sdebug_queued_cmd *sqcp = NULL; |
| 3011 | struct scsi_device *sdp = cmnd->device; | ||
| 3012 | |||
| 3013 | if (NULL == cmnd || NULL == devip) { | ||
| 3014 | pr_warn("%s: called with NULL cmnd or devip pointer\n", | ||
| 3015 | __func__); | ||
| 3016 | /* no particularly good error to report back */ | ||
| 3017 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 3018 | } | ||
| 3019 | if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) | ||
| 3020 | sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", | ||
| 3021 | __func__, scsi_result); | ||
| 3022 | if (delta_jiff == 0) { | ||
| 3023 | /* using same thread to call back mid-layer */ | ||
| 3024 | cmnd->result = scsi_result; | ||
| 3025 | cmnd->scsi_done(cmnd); | ||
| 3026 | return 0; | ||
| 3027 | } | ||
| 2710 | 3028 | ||
| 2711 | printk(KERN_INFO "scsi_debug: <%u %u %u %u> " | 3029 | /* deferred response cases */ |
| 2712 | "non-zero result=0x%x\n", sdp->host->host_no, | 3030 | spin_lock_irqsave(&queued_arr_lock, iflags); |
| 2713 | sdp->channel, sdp->id, sdp->lun, scsi_result); | 3031 | num_in_q = atomic_read(&devip->num_in_q); |
| 3032 | qdepth = cmnd->device->queue_depth; | ||
| 3033 | k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); | ||
| 3034 | tsf = 0; | ||
| 3035 | inject = 0; | ||
| 3036 | if ((qdepth > 0) && (num_in_q >= qdepth)) | ||
| 3037 | tsf = 1; | ||
| 3038 | else if ((scsi_debug_every_nth != 0) && | ||
| 3039 | (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) { | ||
| 3040 | if ((num_in_q == (qdepth - 1)) && | ||
| 3041 | (atomic_inc_return(&sdebug_a_tsf) >= | ||
| 3042 | abs(scsi_debug_every_nth))) { | ||
| 3043 | atomic_set(&sdebug_a_tsf, 0); | ||
| 3044 | inject = 1; | ||
| 3045 | tsf = 1; | ||
| 2714 | } | 3046 | } |
| 2715 | } | 3047 | } |
| 2716 | if (cmnd && devip) { | ||
| 2717 | /* simulate autosense by this driver */ | ||
| 2718 | if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff)) | ||
| 2719 | memcpy(cmnd->sense_buffer, devip->sense_buff, | ||
| 2720 | (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ? | ||
| 2721 | SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE); | ||
| 2722 | } | ||
| 2723 | if (delta_jiff <= 0) { | ||
| 2724 | if (cmnd) | ||
| 2725 | cmnd->result = scsi_result; | ||
| 2726 | if (done) | ||
| 2727 | done(cmnd); | ||
| 2728 | return 0; | ||
| 2729 | } else { | ||
| 2730 | unsigned long iflags; | ||
| 2731 | int k; | ||
| 2732 | struct sdebug_queued_cmd * sqcp = NULL; | ||
| 2733 | 3048 | ||
| 2734 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3049 | /* if (tsf) simulate device reporting SCSI status of TASK SET FULL. |
| 2735 | for (k = 0; k < scsi_debug_max_queue; ++k) { | 3050 | * Might override existing CHECK CONDITION. */ |
| 2736 | sqcp = &queued_arr[k]; | 3051 | if (tsf) |
| 2737 | if (! sqcp->in_use) | 3052 | scsi_result = device_qfull_result; |
| 2738 | break; | 3053 | if (k >= scsi_debug_max_queue) { |
| 3054 | if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) | ||
| 3055 | tsf = 1; | ||
| 3056 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 3057 | if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) | ||
| 3058 | sdev_printk(KERN_INFO, sdp, | ||
| 3059 | "%s: num_in_q=%d, bypass q, %s%s\n", | ||
| 3060 | __func__, num_in_q, | ||
| 3061 | (inject ? "<inject> " : ""), | ||
| 3062 | (tsf ? "status: TASK SET FULL" : | ||
| 3063 | "report: host busy")); | ||
| 3064 | if (tsf) { | ||
| 3065 | /* queued_arr full so respond in same thread */ | ||
| 3066 | cmnd->result = scsi_result; | ||
| 3067 | cmnd->scsi_done(cmnd); | ||
| 3068 | /* As scsi_done() is called "inline" must return 0 */ | ||
| 3069 | return 0; | ||
| 3070 | } else | ||
| 3071 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 3072 | } | ||
| 3073 | __set_bit(k, queued_in_use_bm); | ||
| 3074 | atomic_inc(&devip->num_in_q); | ||
| 3075 | sqcp = &queued_arr[k]; | ||
| 3076 | sqcp->a_cmnd = cmnd; | ||
| 3077 | cmnd->result = scsi_result; | ||
| 3078 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 3079 | if (delta_jiff > 0) { | ||
| 3080 | if (NULL == sqcp->cmnd_timerp) { | ||
| 3081 | sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list), | ||
| 3082 | GFP_ATOMIC); | ||
| 3083 | if (NULL == sqcp->cmnd_timerp) | ||
| 3084 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 3085 | init_timer(sqcp->cmnd_timerp); | ||
| 2739 | } | 3086 | } |
| 2740 | if (k >= scsi_debug_max_queue) { | 3087 | sqcp->cmnd_timerp->function = sdebug_q_cmd_complete; |
| 2741 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3088 | sqcp->cmnd_timerp->data = k; |
| 2742 | printk(KERN_WARNING "scsi_debug: can_queue exceeded\n"); | 3089 | sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff; |
| 2743 | return 1; /* report busy to mid level */ | 3090 | add_timer(sqcp->cmnd_timerp); |
| 3091 | } else if (scsi_debug_ndelay > 0) { | ||
| 3092 | ktime_t kt = ktime_set(0, scsi_debug_ndelay); | ||
| 3093 | struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp; | ||
| 3094 | |||
| 3095 | if (NULL == sd_hp) { | ||
| 3096 | sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC); | ||
| 3097 | if (NULL == sd_hp) | ||
| 3098 | return SCSI_MLQUEUE_HOST_BUSY; | ||
| 3099 | sqcp->sd_hrtp = sd_hp; | ||
| 3100 | hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, | ||
| 3101 | HRTIMER_MODE_REL); | ||
| 3102 | sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; | ||
| 3103 | sd_hp->qa_indx = k; | ||
| 2744 | } | 3104 | } |
| 2745 | sqcp->in_use = 1; | 3105 | hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); |
| 2746 | sqcp->a_cmnd = cmnd; | 3106 | } else { /* delay < 0 */ |
| 2747 | sqcp->scsi_result = scsi_result; | 3107 | if (NULL == sqcp->tletp) { |
| 2748 | sqcp->done_funct = done; | 3108 | sqcp->tletp = kmalloc(sizeof(*sqcp->tletp), |
| 2749 | sqcp->cmnd_timer.function = timer_intr_handler; | 3109 | GFP_ATOMIC); |
| 2750 | sqcp->cmnd_timer.data = k; | 3110 | if (NULL == sqcp->tletp) |
| 2751 | sqcp->cmnd_timer.expires = jiffies + delta_jiff; | 3111 | return SCSI_MLQUEUE_HOST_BUSY; |
| 2752 | add_timer(&sqcp->cmnd_timer); | 3112 | tasklet_init(sqcp->tletp, |
| 2753 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3113 | sdebug_q_cmd_complete, k); |
| 2754 | if (cmnd) | 3114 | } |
| 2755 | cmnd->result = 0; | 3115 | if (-1 == delta_jiff) |
| 2756 | return 0; | 3116 | tasklet_hi_schedule(sqcp->tletp); |
| 3117 | else | ||
| 3118 | tasklet_schedule(sqcp->tletp); | ||
| 2757 | } | 3119 | } |
| 3120 | if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)) | ||
| 3121 | sdev_printk(KERN_INFO, sdp, | ||
| 3122 | "%s: num_in_q=%d +1, %s%s\n", __func__, | ||
| 3123 | num_in_q, (inject ? "<inject> " : ""), | ||
| 3124 | "status: TASK SET FULL"); | ||
| 3125 | return 0; | ||
| 2758 | } | 3126 | } |
| 3127 | |||
| 2759 | /* Note: The following macros create attribute files in the | 3128 | /* Note: The following macros create attribute files in the |
| 2760 | /sys/module/scsi_debug/parameters directory. Unfortunately this | 3129 | /sys/module/scsi_debug/parameters directory. Unfortunately this |
| 2761 | driver is unaware of a change and cannot trigger auxiliary actions | 3130 | driver is unaware of a change and cannot trigger auxiliary actions |
| @@ -2773,6 +3142,7 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); | |||
| 2773 | module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); | 3142 | module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); |
| 2774 | module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); | 3143 | module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); |
| 2775 | module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); | 3144 | module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); |
| 3145 | module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR); | ||
| 2776 | module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); | 3146 | module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); |
| 2777 | module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); | 3147 | module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); |
| 2778 | module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); | 3148 | module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); |
| @@ -2780,6 +3150,7 @@ module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); | |||
| 2780 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); | 3150 | module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); |
| 2781 | module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); | 3151 | module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); |
| 2782 | module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); | 3152 | module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); |
| 3153 | module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR); | ||
| 2783 | module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); | 3154 | module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); |
| 2784 | module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); | 3155 | module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); |
| 2785 | module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); | 3156 | module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); |
| @@ -2809,7 +3180,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION); | |||
| 2809 | MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); | 3180 | MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); |
| 2810 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); | 3181 | MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); |
| 2811 | MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); | 3182 | MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); |
| 2812 | MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); | 3183 | MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); |
| 2813 | MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); | 3184 | MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); |
| 2814 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); | 3185 | MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); |
| 2815 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); | 3186 | MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); |
| @@ -2817,13 +3188,15 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); | |||
| 2817 | MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); | 3188 | MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); |
| 2818 | MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); | 3189 | MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); |
| 2819 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); | 3190 | MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); |
| 3191 | MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)"); | ||
| 2820 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); | 3192 | MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); |
| 2821 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); | 3193 | MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); |
| 2822 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); | 3194 | MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); |
| 2823 | MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); | 3195 | MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); |
| 2824 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); | 3196 | MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); |
| 2825 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); | 3197 | MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); |
| 2826 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); | 3198 | MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); |
| 3199 | MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)"); | ||
| 2827 | MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); | 3200 | MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); |
| 2828 | MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); | 3201 | MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); |
| 2829 | MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); | 3202 | MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); |
| @@ -2854,9 +3227,7 @@ static const char * scsi_debug_info(struct Scsi_Host * shp) | |||
| 2854 | return sdebug_info; | 3227 | return sdebug_info; |
| 2855 | } | 3228 | } |
| 2856 | 3229 | ||
| 2857 | /* scsi_debug_proc_info | 3230 | /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ |
| 2858 | * Used if the driver currently has no own support for /proc/scsi | ||
| 2859 | */ | ||
| 2860 | static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) | 3231 | static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) |
| 2861 | { | 3232 | { |
| 2862 | char arr[16]; | 3233 | char arr[16]; |
| @@ -2871,27 +3242,49 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt | |||
| 2871 | return -EINVAL; | 3242 | return -EINVAL; |
| 2872 | scsi_debug_opts = opts; | 3243 | scsi_debug_opts = opts; |
| 2873 | if (scsi_debug_every_nth != 0) | 3244 | if (scsi_debug_every_nth != 0) |
| 2874 | scsi_debug_cmnd_count = 0; | 3245 | atomic_set(&sdebug_cmnd_count, 0); |
| 2875 | return length; | 3246 | return length; |
| 2876 | } | 3247 | } |
| 2877 | 3248 | ||
| 3249 | /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the | ||
| 3250 | * same for each scsi_debug host (if more than one). Some of the counters | ||
| 3251 | * output are not atomics so might be inaccurate in a busy system. */ | ||
| 2878 | static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) | 3252 | static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) |
| 2879 | { | 3253 | { |
| 2880 | seq_printf(m, "scsi_debug adapter driver, version " | 3254 | int f, l; |
| 2881 | "%s [%s]\n" | 3255 | char b[32]; |
| 2882 | "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " | 3256 | |
| 2883 | "every_nth=%d(curr:%d)\n" | 3257 | if (scsi_debug_every_nth > 0) |
| 2884 | "delay=%d, max_luns=%d, scsi_level=%d\n" | 3258 | snprintf(b, sizeof(b), " (curr:%d)", |
| 2885 | "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" | 3259 | ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ? |
| 2886 | "number of aborts=%d, device_reset=%d, bus_resets=%d, " | 3260 | atomic_read(&sdebug_a_tsf) : |
| 2887 | "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n", | 3261 | atomic_read(&sdebug_cmnd_count))); |
| 2888 | SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts, | 3262 | else |
| 2889 | scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, | 3263 | b[0] = '\0'; |
| 2890 | scsi_debug_cmnd_count, scsi_debug_delay, | 3264 | |
| 2891 | scsi_debug_max_luns, scsi_debug_scsi_level, | 3265 | seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n" |
| 2892 | scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, | 3266 | "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " |
| 2893 | sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets, | 3267 | "every_nth=%d%s\n" |
| 2894 | num_host_resets, dix_reads, dix_writes, dif_errors); | 3268 | "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n" |
| 3269 | "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" | ||
| 3270 | "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, " | ||
| 3271 | "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d " | ||
| 3272 | "usec_in_jiffy=%lu\n", | ||
| 3273 | SCSI_DEBUG_VERSION, scsi_debug_version_date, | ||
| 3274 | scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts, | ||
| 3275 | scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay, | ||
| 3276 | scsi_debug_max_luns, atomic_read(&sdebug_completions), | ||
| 3277 | scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, | ||
| 3278 | sdebug_sectors_per, num_aborts, num_dev_resets, | ||
| 3279 | num_target_resets, num_bus_resets, num_host_resets, | ||
| 3280 | dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000); | ||
| 3281 | |||
| 3282 | f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue); | ||
| 3283 | if (f != scsi_debug_max_queue) { | ||
| 3284 | l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue); | ||
| 3285 | seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n", | ||
| 3286 | "queued_in_use_bm", f, l); | ||
| 3287 | } | ||
| 2895 | return 0; | 3288 | return 0; |
| 2896 | } | 3289 | } |
| 2897 | 3290 | ||
| @@ -2899,23 +3292,69 @@ static ssize_t delay_show(struct device_driver *ddp, char *buf) | |||
| 2899 | { | 3292 | { |
| 2900 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); | 3293 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); |
| 2901 | } | 3294 | } |
| 2902 | 3295 | /* Returns -EBUSY if delay is being changed and commands are queued */ | |
| 2903 | static ssize_t delay_store(struct device_driver *ddp, const char *buf, | 3296 | static ssize_t delay_store(struct device_driver *ddp, const char *buf, |
| 2904 | size_t count) | 3297 | size_t count) |
| 2905 | { | 3298 | { |
| 2906 | int delay; | 3299 | int delay, res; |
| 2907 | char work[20]; | 3300 | |
| 2908 | 3301 | if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) { | |
| 2909 | if (1 == sscanf(buf, "%10s", work)) { | 3302 | res = count; |
| 2910 | if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) { | 3303 | if (scsi_debug_delay != delay) { |
| 2911 | scsi_debug_delay = delay; | 3304 | unsigned long iflags; |
| 2912 | return count; | 3305 | int k; |
| 3306 | |||
| 3307 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 3308 | k = find_first_bit(queued_in_use_bm, | ||
| 3309 | scsi_debug_max_queue); | ||
| 3310 | if (k != scsi_debug_max_queue) | ||
| 3311 | res = -EBUSY; /* have queued commands */ | ||
| 3312 | else { | ||
| 3313 | scsi_debug_delay = delay; | ||
| 3314 | scsi_debug_ndelay = 0; | ||
| 3315 | } | ||
| 3316 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 2913 | } | 3317 | } |
| 3318 | return res; | ||
| 2914 | } | 3319 | } |
| 2915 | return -EINVAL; | 3320 | return -EINVAL; |
| 2916 | } | 3321 | } |
| 2917 | static DRIVER_ATTR_RW(delay); | 3322 | static DRIVER_ATTR_RW(delay); |
| 2918 | 3323 | ||
| 3324 | static ssize_t ndelay_show(struct device_driver *ddp, char *buf) | ||
| 3325 | { | ||
| 3326 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay); | ||
| 3327 | } | ||
| 3328 | /* Returns -EBUSY if ndelay is being changed and commands are queued */ | ||
| 3329 | /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */ | ||
| 3330 | static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, | ||
| 3331 | size_t count) | ||
| 3332 | { | ||
| 3333 | unsigned long iflags; | ||
| 3334 | int ndelay, res, k; | ||
| 3335 | |||
| 3336 | if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && | ||
| 3337 | (ndelay >= 0) && (ndelay < 1000000000)) { | ||
| 3338 | res = count; | ||
| 3339 | if (scsi_debug_ndelay != ndelay) { | ||
| 3340 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 3341 | k = find_first_bit(queued_in_use_bm, | ||
| 3342 | scsi_debug_max_queue); | ||
| 3343 | if (k != scsi_debug_max_queue) | ||
| 3344 | res = -EBUSY; /* have queued commands */ | ||
| 3345 | else { | ||
| 3346 | scsi_debug_ndelay = ndelay; | ||
| 3347 | scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN | ||
| 3348 | : DEF_DELAY; | ||
| 3349 | } | ||
| 3350 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 3351 | } | ||
| 3352 | return res; | ||
| 3353 | } | ||
| 3354 | return -EINVAL; | ||
| 3355 | } | ||
| 3356 | static DRIVER_ATTR_RW(ndelay); | ||
| 3357 | |||
| 2919 | static ssize_t opts_show(struct device_driver *ddp, char *buf) | 3358 | static ssize_t opts_show(struct device_driver *ddp, char *buf) |
| 2920 | { | 3359 | { |
| 2921 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); | 3360 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); |
| @@ -2939,7 +3378,8 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf, | |||
| 2939 | return -EINVAL; | 3378 | return -EINVAL; |
| 2940 | opts_done: | 3379 | opts_done: |
| 2941 | scsi_debug_opts = opts; | 3380 | scsi_debug_opts = opts; |
| 2942 | scsi_debug_cmnd_count = 0; | 3381 | atomic_set(&sdebug_cmnd_count, 0); |
| 3382 | atomic_set(&sdebug_a_tsf, 0); | ||
| 2943 | return count; | 3383 | return count; |
| 2944 | } | 3384 | } |
| 2945 | static DRIVER_ATTR_RW(opts); | 3385 | static DRIVER_ATTR_RW(opts); |
| @@ -2988,7 +3428,24 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, | |||
| 2988 | int n; | 3428 | int n; |
| 2989 | 3429 | ||
| 2990 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 3430 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
| 2991 | scsi_debug_fake_rw = n; | 3431 | n = (n > 0); |
| 3432 | scsi_debug_fake_rw = (scsi_debug_fake_rw > 0); | ||
| 3433 | if (scsi_debug_fake_rw != n) { | ||
| 3434 | if ((0 == n) && (NULL == fake_storep)) { | ||
| 3435 | unsigned long sz = | ||
| 3436 | (unsigned long)scsi_debug_dev_size_mb * | ||
| 3437 | 1048576; | ||
| 3438 | |||
| 3439 | fake_storep = vmalloc(sz); | ||
| 3440 | if (NULL == fake_storep) { | ||
| 3441 | pr_err("%s: out of memory, 9\n", | ||
| 3442 | __func__); | ||
| 3443 | return -ENOMEM; | ||
| 3444 | } | ||
| 3445 | memset(fake_storep, 0, sz); | ||
| 3446 | } | ||
| 3447 | scsi_debug_fake_rw = n; | ||
| 3448 | } | ||
| 2992 | return count; | 3449 | return count; |
| 2993 | } | 3450 | } |
| 2994 | return -EINVAL; | 3451 | return -EINVAL; |
| @@ -3053,7 +3510,7 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, | |||
| 3053 | 3510 | ||
| 3054 | if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { | 3511 | if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { |
| 3055 | scsi_debug_every_nth = nth; | 3512 | scsi_debug_every_nth = nth; |
| 3056 | scsi_debug_cmnd_count = 0; | 3513 | atomic_set(&sdebug_cmnd_count, 0); |
| 3057 | return count; | 3514 | return count; |
| 3058 | } | 3515 | } |
| 3059 | return -EINVAL; | 3516 | return -EINVAL; |
| @@ -3082,14 +3539,26 @@ static ssize_t max_queue_show(struct device_driver *ddp, char *buf) | |||
| 3082 | { | 3539 | { |
| 3083 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); | 3540 | return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); |
| 3084 | } | 3541 | } |
| 3542 | /* N.B. max_queue can be changed while there are queued commands. In flight | ||
| 3543 | * commands beyond the new max_queue will be completed. */ | ||
| 3085 | static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, | 3544 | static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, |
| 3086 | size_t count) | 3545 | size_t count) |
| 3087 | { | 3546 | { |
| 3088 | int n; | 3547 | unsigned long iflags; |
| 3548 | int n, k; | ||
| 3089 | 3549 | ||
| 3090 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && | 3550 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && |
| 3091 | (n <= SCSI_DEBUG_CANQUEUE)) { | 3551 | (n <= SCSI_DEBUG_CANQUEUE)) { |
| 3552 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 3553 | k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE); | ||
| 3092 | scsi_debug_max_queue = n; | 3554 | scsi_debug_max_queue = n; |
| 3555 | if (SCSI_DEBUG_CANQUEUE == k) | ||
| 3556 | atomic_set(&retired_max_queue, 0); | ||
| 3557 | else if (k >= n) | ||
| 3558 | atomic_set(&retired_max_queue, k + 1); | ||
| 3559 | else | ||
| 3560 | atomic_set(&retired_max_queue, 0); | ||
| 3561 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 3093 | return count; | 3562 | return count; |
| 3094 | } | 3563 | } |
| 3095 | return -EINVAL; | 3564 | return -EINVAL; |
| @@ -3234,6 +3703,40 @@ static ssize_t removable_store(struct device_driver *ddp, const char *buf, | |||
| 3234 | } | 3703 | } |
| 3235 | static DRIVER_ATTR_RW(removable); | 3704 | static DRIVER_ATTR_RW(removable); |
| 3236 | 3705 | ||
| 3706 | static ssize_t host_lock_show(struct device_driver *ddp, char *buf) | ||
| 3707 | { | ||
| 3708 | return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock); | ||
| 3709 | } | ||
| 3710 | /* Returns -EBUSY if host_lock is being changed and commands are queued */ | ||
| 3711 | static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, | ||
| 3712 | size_t count) | ||
| 3713 | { | ||
| 3714 | int n, res; | ||
| 3715 | |||
| 3716 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | ||
| 3717 | bool new_host_lock = (n > 0); | ||
| 3718 | |||
| 3719 | res = count; | ||
| 3720 | if (new_host_lock != scsi_debug_host_lock) { | ||
| 3721 | unsigned long iflags; | ||
| 3722 | int k; | ||
| 3723 | |||
| 3724 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 3725 | k = find_first_bit(queued_in_use_bm, | ||
| 3726 | scsi_debug_max_queue); | ||
| 3727 | if (k != scsi_debug_max_queue) | ||
| 3728 | res = -EBUSY; /* have queued commands */ | ||
| 3729 | else | ||
| 3730 | scsi_debug_host_lock = new_host_lock; | ||
| 3731 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 3732 | } | ||
| 3733 | return res; | ||
| 3734 | } | ||
| 3735 | return -EINVAL; | ||
| 3736 | } | ||
| 3737 | static DRIVER_ATTR_RW(host_lock); | ||
| 3738 | |||
| 3739 | |||
| 3237 | /* Note: The following array creates attribute files in the | 3740 | /* Note: The following array creates attribute files in the |
| 3238 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these | 3741 | /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these |
| 3239 | files (over those found in the /sys/module/scsi_debug/parameters | 3742 | files (over those found in the /sys/module/scsi_debug/parameters |
| @@ -3266,6 +3769,8 @@ static struct attribute *sdebug_drv_attrs[] = { | |||
| 3266 | &driver_attr_ato.attr, | 3769 | &driver_attr_ato.attr, |
| 3267 | &driver_attr_map.attr, | 3770 | &driver_attr_map.attr, |
| 3268 | &driver_attr_removable.attr, | 3771 | &driver_attr_removable.attr, |
| 3772 | &driver_attr_host_lock.attr, | ||
| 3773 | &driver_attr_ndelay.attr, | ||
| 3269 | NULL, | 3774 | NULL, |
| 3270 | }; | 3775 | }; |
| 3271 | ATTRIBUTE_GROUPS(sdebug_drv); | 3776 | ATTRIBUTE_GROUPS(sdebug_drv); |
| @@ -3279,6 +3784,17 @@ static int __init scsi_debug_init(void) | |||
| 3279 | int k; | 3784 | int k; |
| 3280 | int ret; | 3785 | int ret; |
| 3281 | 3786 | ||
| 3787 | atomic_set(&sdebug_cmnd_count, 0); | ||
| 3788 | atomic_set(&sdebug_completions, 0); | ||
| 3789 | atomic_set(&retired_max_queue, 0); | ||
| 3790 | |||
| 3791 | if (scsi_debug_ndelay >= 1000000000) { | ||
| 3792 | pr_warn("%s: ndelay must be less than 1 second, ignored\n", | ||
| 3793 | __func__); | ||
| 3794 | scsi_debug_ndelay = 0; | ||
| 3795 | } else if (scsi_debug_ndelay > 0) | ||
| 3796 | scsi_debug_delay = DELAY_OVERRIDDEN; | ||
| 3797 | |||
| 3282 | switch (scsi_debug_sector_size) { | 3798 | switch (scsi_debug_sector_size) { |
| 3283 | case 512: | 3799 | case 512: |
| 3284 | case 1024: | 3800 | case 1024: |
| @@ -3286,7 +3802,7 @@ static int __init scsi_debug_init(void) | |||
| 3286 | case 4096: | 3802 | case 4096: |
| 3287 | break; | 3803 | break; |
| 3288 | default: | 3804 | default: |
| 3289 | printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n", | 3805 | pr_err("%s: invalid sector_size %d\n", __func__, |
| 3290 | scsi_debug_sector_size); | 3806 | scsi_debug_sector_size); |
| 3291 | return -EINVAL; | 3807 | return -EINVAL; |
| 3292 | } | 3808 | } |
| @@ -3300,28 +3816,28 @@ static int __init scsi_debug_init(void) | |||
| 3300 | break; | 3816 | break; |
| 3301 | 3817 | ||
| 3302 | default: | 3818 | default: |
| 3303 | printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n"); | 3819 | pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__); |
| 3304 | return -EINVAL; | 3820 | return -EINVAL; |
| 3305 | } | 3821 | } |
| 3306 | 3822 | ||
| 3307 | if (scsi_debug_guard > 1) { | 3823 | if (scsi_debug_guard > 1) { |
| 3308 | printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n"); | 3824 | pr_err("%s: guard must be 0 or 1\n", __func__); |
| 3309 | return -EINVAL; | 3825 | return -EINVAL; |
| 3310 | } | 3826 | } |
| 3311 | 3827 | ||
| 3312 | if (scsi_debug_ato > 1) { | 3828 | if (scsi_debug_ato > 1) { |
| 3313 | printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n"); | 3829 | pr_err("%s: ato must be 0 or 1\n", __func__); |
| 3314 | return -EINVAL; | 3830 | return -EINVAL; |
| 3315 | } | 3831 | } |
| 3316 | 3832 | ||
| 3317 | if (scsi_debug_physblk_exp > 15) { | 3833 | if (scsi_debug_physblk_exp > 15) { |
| 3318 | printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n", | 3834 | pr_err("%s: invalid physblk_exp %u\n", __func__, |
| 3319 | scsi_debug_physblk_exp); | 3835 | scsi_debug_physblk_exp); |
| 3320 | return -EINVAL; | 3836 | return -EINVAL; |
| 3321 | } | 3837 | } |
| 3322 | 3838 | ||
| 3323 | if (scsi_debug_lowest_aligned > 0x3fff) { | 3839 | if (scsi_debug_lowest_aligned > 0x3fff) { |
| 3324 | printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n", | 3840 | pr_err("%s: lowest_aligned too big: %u\n", __func__, |
| 3325 | scsi_debug_lowest_aligned); | 3841 | scsi_debug_lowest_aligned); |
| 3326 | return -EINVAL; | 3842 | return -EINVAL; |
| 3327 | } | 3843 | } |
| @@ -3349,14 +3865,16 @@ static int __init scsi_debug_init(void) | |||
| 3349 | (sdebug_sectors_per * sdebug_heads); | 3865 | (sdebug_sectors_per * sdebug_heads); |
| 3350 | } | 3866 | } |
| 3351 | 3867 | ||
| 3352 | fake_storep = vmalloc(sz); | 3868 | if (0 == scsi_debug_fake_rw) { |
| 3353 | if (NULL == fake_storep) { | 3869 | fake_storep = vmalloc(sz); |
| 3354 | printk(KERN_ERR "scsi_debug_init: out of memory, 1\n"); | 3870 | if (NULL == fake_storep) { |
| 3355 | return -ENOMEM; | 3871 | pr_err("%s: out of memory, 1\n", __func__); |
| 3872 | return -ENOMEM; | ||
| 3873 | } | ||
| 3874 | memset(fake_storep, 0, sz); | ||
| 3875 | if (scsi_debug_num_parts > 0) | ||
| 3876 | sdebug_build_parts(fake_storep, sz); | ||
| 3356 | } | 3877 | } |
| 3357 | memset(fake_storep, 0, sz); | ||
| 3358 | if (scsi_debug_num_parts > 0) | ||
| 3359 | sdebug_build_parts(fake_storep, sz); | ||
| 3360 | 3878 | ||
| 3361 | if (scsi_debug_dix) { | 3879 | if (scsi_debug_dix) { |
| 3362 | int dif_size; | 3880 | int dif_size; |
| @@ -3364,11 +3882,11 @@ static int __init scsi_debug_init(void) | |||
| 3364 | dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); | 3882 | dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); |
| 3365 | dif_storep = vmalloc(dif_size); | 3883 | dif_storep = vmalloc(dif_size); |
| 3366 | 3884 | ||
| 3367 | printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n", | 3885 | pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size, |
| 3368 | dif_size, dif_storep); | 3886 | dif_storep); |
| 3369 | 3887 | ||
| 3370 | if (dif_storep == NULL) { | 3888 | if (dif_storep == NULL) { |
| 3371 | printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n"); | 3889 | pr_err("%s: out of mem. (DIX)\n", __func__); |
| 3372 | ret = -ENOMEM; | 3890 | ret = -ENOMEM; |
| 3373 | goto free_vm; | 3891 | goto free_vm; |
| 3374 | } | 3892 | } |
| @@ -3390,8 +3908,7 @@ static int __init scsi_debug_init(void) | |||
| 3390 | if (scsi_debug_unmap_alignment && | 3908 | if (scsi_debug_unmap_alignment && |
| 3391 | scsi_debug_unmap_granularity <= | 3909 | scsi_debug_unmap_granularity <= |
| 3392 | scsi_debug_unmap_alignment) { | 3910 | scsi_debug_unmap_alignment) { |
| 3393 | printk(KERN_ERR | 3911 | pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n", |
| 3394 | "%s: ERR: unmap_granularity <= unmap_alignment\n", | ||
| 3395 | __func__); | 3912 | __func__); |
| 3396 | return -EINVAL; | 3913 | return -EINVAL; |
| 3397 | } | 3914 | } |
| @@ -3399,11 +3916,10 @@ static int __init scsi_debug_init(void) | |||
| 3399 | map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; | 3916 | map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; |
| 3400 | map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); | 3917 | map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); |
| 3401 | 3918 | ||
| 3402 | printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", | 3919 | pr_info("%s: %lu provisioning blocks\n", __func__, map_size); |
| 3403 | map_size); | ||
| 3404 | 3920 | ||
| 3405 | if (map_storep == NULL) { | 3921 | if (map_storep == NULL) { |
| 3406 | printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n"); | 3922 | pr_err("%s: out of mem. (MAP)\n", __func__); |
| 3407 | ret = -ENOMEM; | 3923 | ret = -ENOMEM; |
| 3408 | goto free_vm; | 3924 | goto free_vm; |
| 3409 | } | 3925 | } |
| @@ -3417,39 +3933,35 @@ static int __init scsi_debug_init(void) | |||
| 3417 | 3933 | ||
| 3418 | pseudo_primary = root_device_register("pseudo_0"); | 3934 | pseudo_primary = root_device_register("pseudo_0"); |
| 3419 | if (IS_ERR(pseudo_primary)) { | 3935 | if (IS_ERR(pseudo_primary)) { |
| 3420 | printk(KERN_WARNING "scsi_debug: root_device_register() error\n"); | 3936 | pr_warn("%s: root_device_register() error\n", __func__); |
| 3421 | ret = PTR_ERR(pseudo_primary); | 3937 | ret = PTR_ERR(pseudo_primary); |
| 3422 | goto free_vm; | 3938 | goto free_vm; |
| 3423 | } | 3939 | } |
| 3424 | ret = bus_register(&pseudo_lld_bus); | 3940 | ret = bus_register(&pseudo_lld_bus); |
| 3425 | if (ret < 0) { | 3941 | if (ret < 0) { |
| 3426 | printk(KERN_WARNING "scsi_debug: bus_register error: %d\n", | 3942 | pr_warn("%s: bus_register error: %d\n", __func__, ret); |
| 3427 | ret); | ||
| 3428 | goto dev_unreg; | 3943 | goto dev_unreg; |
| 3429 | } | 3944 | } |
| 3430 | ret = driver_register(&sdebug_driverfs_driver); | 3945 | ret = driver_register(&sdebug_driverfs_driver); |
| 3431 | if (ret < 0) { | 3946 | if (ret < 0) { |
| 3432 | printk(KERN_WARNING "scsi_debug: driver_register error: %d\n", | 3947 | pr_warn("%s: driver_register error: %d\n", __func__, ret); |
| 3433 | ret); | ||
| 3434 | goto bus_unreg; | 3948 | goto bus_unreg; |
| 3435 | } | 3949 | } |
| 3436 | 3950 | ||
| 3437 | init_all_queued(); | ||
| 3438 | |||
| 3439 | host_to_add = scsi_debug_add_host; | 3951 | host_to_add = scsi_debug_add_host; |
| 3440 | scsi_debug_add_host = 0; | 3952 | scsi_debug_add_host = 0; |
| 3441 | 3953 | ||
| 3442 | for (k = 0; k < host_to_add; k++) { | 3954 | for (k = 0; k < host_to_add; k++) { |
| 3443 | if (sdebug_add_adapter()) { | 3955 | if (sdebug_add_adapter()) { |
| 3444 | printk(KERN_ERR "scsi_debug_init: " | 3956 | pr_err("%s: sdebug_add_adapter failed k=%d\n", |
| 3445 | "sdebug_add_adapter failed k=%d\n", k); | 3957 | __func__, k); |
| 3446 | break; | 3958 | break; |
| 3447 | } | 3959 | } |
| 3448 | } | 3960 | } |
| 3449 | 3961 | ||
| 3450 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { | 3962 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { |
| 3451 | printk(KERN_INFO "scsi_debug_init: built %d host(s)\n", | 3963 | pr_info("%s: built %d host(s)\n", __func__, |
| 3452 | scsi_debug_add_host); | 3964 | scsi_debug_add_host); |
| 3453 | } | 3965 | } |
| 3454 | return 0; | 3966 | return 0; |
| 3455 | 3967 | ||
| @@ -3472,6 +3984,7 @@ static void __exit scsi_debug_exit(void) | |||
| 3472 | int k = scsi_debug_add_host; | 3984 | int k = scsi_debug_add_host; |
| 3473 | 3985 | ||
| 3474 | stop_all_queued(); | 3986 | stop_all_queued(); |
| 3987 | free_all_queued(); | ||
| 3475 | for (; k; k--) | 3988 | for (; k; k--) |
| 3476 | sdebug_remove_adapter(); | 3989 | sdebug_remove_adapter(); |
| 3477 | driver_unregister(&sdebug_driverfs_driver); | 3990 | driver_unregister(&sdebug_driverfs_driver); |
| @@ -3569,8 +4082,8 @@ static void sdebug_remove_adapter(void) | |||
| 3569 | --scsi_debug_add_host; | 4082 | --scsi_debug_add_host; |
| 3570 | } | 4083 | } |
| 3571 | 4084 | ||
| 3572 | static | 4085 | static int |
| 3573 | int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | 4086 | scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) |
| 3574 | { | 4087 | { |
| 3575 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; | 4088 | unsigned char *cmd = (unsigned char *) SCpnt->cmnd; |
| 3576 | int len, k; | 4089 | int len, k; |
| @@ -3589,32 +4102,34 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3589 | int unmap = 0; | 4102 | int unmap = 0; |
| 3590 | 4103 | ||
| 3591 | scsi_set_resid(SCpnt, 0); | 4104 | scsi_set_resid(SCpnt, 0); |
| 3592 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { | 4105 | if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && |
| 3593 | printk(KERN_INFO "scsi_debug: cmd "); | 4106 | !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) { |
| 3594 | for (k = 0, len = SCpnt->cmd_len; k < len; ++k) | 4107 | char b[120]; |
| 3595 | printk("%02x ", (int)cmd[k]); | 4108 | int n; |
| 3596 | printk("\n"); | 4109 | |
| 3597 | } | 4110 | len = SCpnt->cmd_len; |
| 3598 | 4111 | if (len > 32) | |
| 3599 | if (target == SCpnt->device->host->hostt->this_id) { | 4112 | strcpy(b, "too long, over 32 bytes"); |
| 3600 | printk(KERN_INFO "scsi_debug: initiator's id used as " | 4113 | else { |
| 3601 | "target!\n"); | 4114 | for (k = 0, n = 0; k < len; ++k) |
| 3602 | return schedule_resp(SCpnt, NULL, done, | 4115 | n += scnprintf(b + n, sizeof(b) - n, "%02x ", |
| 3603 | DID_NO_CONNECT << 16, 0); | 4116 | (unsigned int)cmd[k]); |
| 4117 | } | ||
| 4118 | sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name, | ||
| 4119 | b); | ||
| 3604 | } | 4120 | } |
| 3605 | 4121 | ||
| 3606 | if ((SCpnt->device->lun >= scsi_debug_max_luns) && | 4122 | if ((SCpnt->device->lun >= scsi_debug_max_luns) && |
| 3607 | (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) | 4123 | (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) |
| 3608 | return schedule_resp(SCpnt, NULL, done, | 4124 | return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0); |
| 3609 | DID_NO_CONNECT << 16, 0); | ||
| 3610 | devip = devInfoReg(SCpnt->device); | 4125 | devip = devInfoReg(SCpnt->device); |
| 3611 | if (NULL == devip) | 4126 | if (NULL == devip) |
| 3612 | return schedule_resp(SCpnt, NULL, done, | 4127 | return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0); |
| 3613 | DID_NO_CONNECT << 16, 0); | ||
| 3614 | 4128 | ||
| 3615 | if ((scsi_debug_every_nth != 0) && | 4129 | if ((scsi_debug_every_nth != 0) && |
| 3616 | (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) { | 4130 | (atomic_inc_return(&sdebug_cmnd_count) >= |
| 3617 | scsi_debug_cmnd_count = 0; | 4131 | abs(scsi_debug_every_nth))) { |
| 4132 | atomic_set(&sdebug_cmnd_count, 0); | ||
| 3618 | if (scsi_debug_every_nth < -1) | 4133 | if (scsi_debug_every_nth < -1) |
| 3619 | scsi_debug_every_nth = -1; | 4134 | scsi_debug_every_nth = -1; |
| 3620 | if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) | 4135 | if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) |
| @@ -3645,11 +4160,10 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3645 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 4160 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 3646 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x " | 4161 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x " |
| 3647 | "not supported for wlun\n", *cmd); | 4162 | "not supported for wlun\n", *cmd); |
| 3648 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4163 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3649 | INVALID_OPCODE, 0); | 4164 | INVALID_OPCODE, 0); |
| 3650 | errsts = check_condition_result; | 4165 | errsts = check_condition_result; |
| 3651 | return schedule_resp(SCpnt, devip, done, errsts, | 4166 | return schedule_resp(SCpnt, devip, errsts, 0); |
| 3652 | 0); | ||
| 3653 | } | 4167 | } |
| 3654 | } | 4168 | } |
| 3655 | 4169 | ||
| @@ -3667,7 +4181,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3667 | errsts = resp_start_stop(SCpnt, devip); | 4181 | errsts = resp_start_stop(SCpnt, devip); |
| 3668 | break; | 4182 | break; |
| 3669 | case ALLOW_MEDIUM_REMOVAL: | 4183 | case ALLOW_MEDIUM_REMOVAL: |
| 3670 | errsts = check_readiness(SCpnt, 1, devip); | 4184 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3671 | if (errsts) | 4185 | if (errsts) |
| 3672 | break; | 4186 | break; |
| 3673 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 4187 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| @@ -3675,23 +4189,23 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3675 | cmd[4] ? "inhibited" : "enabled"); | 4189 | cmd[4] ? "inhibited" : "enabled"); |
| 3676 | break; | 4190 | break; |
| 3677 | case SEND_DIAGNOSTIC: /* mandatory */ | 4191 | case SEND_DIAGNOSTIC: /* mandatory */ |
| 3678 | errsts = check_readiness(SCpnt, 1, devip); | 4192 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3679 | break; | 4193 | break; |
| 3680 | case TEST_UNIT_READY: /* mandatory */ | 4194 | case TEST_UNIT_READY: /* mandatory */ |
| 3681 | delay_override = 1; | 4195 | /* delay_override = 1; */ |
| 3682 | errsts = check_readiness(SCpnt, 0, devip); | 4196 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3683 | break; | 4197 | break; |
| 3684 | case RESERVE: | 4198 | case RESERVE: |
| 3685 | errsts = check_readiness(SCpnt, 1, devip); | 4199 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3686 | break; | 4200 | break; |
| 3687 | case RESERVE_10: | 4201 | case RESERVE_10: |
| 3688 | errsts = check_readiness(SCpnt, 1, devip); | 4202 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3689 | break; | 4203 | break; |
| 3690 | case RELEASE: | 4204 | case RELEASE: |
| 3691 | errsts = check_readiness(SCpnt, 1, devip); | 4205 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3692 | break; | 4206 | break; |
| 3693 | case RELEASE_10: | 4207 | case RELEASE_10: |
| 3694 | errsts = check_readiness(SCpnt, 1, devip); | 4208 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3695 | break; | 4209 | break; |
| 3696 | case READ_CAPACITY: | 4210 | case READ_CAPACITY: |
| 3697 | errsts = resp_readcap(SCpnt, devip); | 4211 | errsts = resp_readcap(SCpnt, devip); |
| @@ -3702,20 +4216,20 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3702 | else if (cmd[1] == SAI_GET_LBA_STATUS) { | 4216 | else if (cmd[1] == SAI_GET_LBA_STATUS) { |
| 3703 | 4217 | ||
| 3704 | if (scsi_debug_lbp() == 0) { | 4218 | if (scsi_debug_lbp() == 0) { |
| 3705 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4219 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3706 | INVALID_COMMAND_OPCODE, 0); | 4220 | INVALID_COMMAND_OPCODE, 0); |
| 3707 | errsts = check_condition_result; | 4221 | errsts = check_condition_result; |
| 3708 | } else | 4222 | } else |
| 3709 | errsts = resp_get_lba_status(SCpnt, devip); | 4223 | errsts = resp_get_lba_status(SCpnt, devip); |
| 3710 | } else { | 4224 | } else { |
| 3711 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4225 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3712 | INVALID_OPCODE, 0); | 4226 | INVALID_OPCODE, 0); |
| 3713 | errsts = check_condition_result; | 4227 | errsts = check_condition_result; |
| 3714 | } | 4228 | } |
| 3715 | break; | 4229 | break; |
| 3716 | case MAINTENANCE_IN: | 4230 | case MAINTENANCE_IN: |
| 3717 | if (MI_REPORT_TARGET_PGS != cmd[1]) { | 4231 | if (MI_REPORT_TARGET_PGS != cmd[1]) { |
| 3718 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4232 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3719 | INVALID_OPCODE, 0); | 4233 | INVALID_OPCODE, 0); |
| 3720 | errsts = check_condition_result; | 4234 | errsts = check_condition_result; |
| 3721 | break; | 4235 | break; |
| @@ -3728,7 +4242,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3728 | /* READ{10,12,16} and DIF Type 2 are natural enemies */ | 4242 | /* READ{10,12,16} and DIF Type 2 are natural enemies */ |
| 3729 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 4243 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && |
| 3730 | cmd[1] & 0xe0) { | 4244 | cmd[1] & 0xe0) { |
| 3731 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4245 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3732 | INVALID_COMMAND_OPCODE, 0); | 4246 | INVALID_COMMAND_OPCODE, 0); |
| 3733 | errsts = check_condition_result; | 4247 | errsts = check_condition_result; |
| 3734 | break; | 4248 | break; |
| @@ -3742,7 +4256,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) | |||
| 3742 | /* fall through */ | 4256 | /* fall through */ |
| 3743 | case READ_6: | 4257 | case READ_6: |
| 3744 | read: | 4258 | read: |
| 3745 | errsts = check_readiness(SCpnt, 0, devip); | 4259 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3746 | if (errsts) | 4260 | if (errsts) |
| 3747 | break; | 4261 | break; |
| 3748 | if (scsi_debug_fake_rw) | 4262 | if (scsi_debug_fake_rw) |
| @@ -3752,20 +4266,21 @@ read: | |||
| 3752 | if (inj_short) | 4266 | if (inj_short) |
| 3753 | num /= 2; | 4267 | num /= 2; |
| 3754 | 4268 | ||
| 3755 | errsts = resp_read(SCpnt, lba, num, devip, ei_lba); | 4269 | errsts = resp_read(SCpnt, lba, num, ei_lba); |
| 3756 | if (inj_recovered && (0 == errsts)) { | 4270 | if (inj_recovered && (0 == errsts)) { |
| 3757 | mk_sense_buffer(devip, RECOVERED_ERROR, | 4271 | mk_sense_buffer(SCpnt, RECOVERED_ERROR, |
| 3758 | THRESHOLD_EXCEEDED, 0); | 4272 | THRESHOLD_EXCEEDED, 0); |
| 3759 | errsts = check_condition_result; | 4273 | errsts = check_condition_result; |
| 3760 | } else if (inj_transport && (0 == errsts)) { | 4274 | } else if (inj_transport && (0 == errsts)) { |
| 3761 | mk_sense_buffer(devip, ABORTED_COMMAND, | 4275 | mk_sense_buffer(SCpnt, ABORTED_COMMAND, |
| 3762 | TRANSPORT_PROBLEM, ACK_NAK_TO); | 4276 | TRANSPORT_PROBLEM, ACK_NAK_TO); |
| 3763 | errsts = check_condition_result; | 4277 | errsts = check_condition_result; |
| 3764 | } else if (inj_dif && (0 == errsts)) { | 4278 | } else if (inj_dif && (0 == errsts)) { |
| 3765 | mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1); | 4279 | /* Logical block guard check failed */ |
| 4280 | mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1); | ||
| 3766 | errsts = illegal_condition_result; | 4281 | errsts = illegal_condition_result; |
| 3767 | } else if (inj_dix && (0 == errsts)) { | 4282 | } else if (inj_dix && (0 == errsts)) { |
| 3768 | mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1); | 4283 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1); |
| 3769 | errsts = illegal_condition_result; | 4284 | errsts = illegal_condition_result; |
| 3770 | } | 4285 | } |
| 3771 | break; | 4286 | break; |
| @@ -3774,7 +4289,7 @@ read: | |||
| 3774 | errsts = resp_report_luns(SCpnt, devip); | 4289 | errsts = resp_report_luns(SCpnt, devip); |
| 3775 | break; | 4290 | break; |
| 3776 | case VERIFY: /* 10 byte SBC-2 command */ | 4291 | case VERIFY: /* 10 byte SBC-2 command */ |
| 3777 | errsts = check_readiness(SCpnt, 0, devip); | 4292 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3778 | break; | 4293 | break; |
| 3779 | case WRITE_16: | 4294 | case WRITE_16: |
| 3780 | case WRITE_12: | 4295 | case WRITE_12: |
| @@ -3782,7 +4297,7 @@ read: | |||
| 3782 | /* WRITE{10,12,16} and DIF Type 2 are natural enemies */ | 4297 | /* WRITE{10,12,16} and DIF Type 2 are natural enemies */ |
| 3783 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && | 4298 | if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && |
| 3784 | cmd[1] & 0xe0) { | 4299 | cmd[1] & 0xe0) { |
| 3785 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4300 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3786 | INVALID_COMMAND_OPCODE, 0); | 4301 | INVALID_COMMAND_OPCODE, 0); |
| 3787 | errsts = check_condition_result; | 4302 | errsts = check_condition_result; |
| 3788 | break; | 4303 | break; |
| @@ -3796,22 +4311,22 @@ read: | |||
| 3796 | /* fall through */ | 4311 | /* fall through */ |
| 3797 | case WRITE_6: | 4312 | case WRITE_6: |
| 3798 | write: | 4313 | write: |
| 3799 | errsts = check_readiness(SCpnt, 0, devip); | 4314 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3800 | if (errsts) | 4315 | if (errsts) |
| 3801 | break; | 4316 | break; |
| 3802 | if (scsi_debug_fake_rw) | 4317 | if (scsi_debug_fake_rw) |
| 3803 | break; | 4318 | break; |
| 3804 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); | 4319 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); |
| 3805 | errsts = resp_write(SCpnt, lba, num, devip, ei_lba); | 4320 | errsts = resp_write(SCpnt, lba, num, ei_lba); |
| 3806 | if (inj_recovered && (0 == errsts)) { | 4321 | if (inj_recovered && (0 == errsts)) { |
| 3807 | mk_sense_buffer(devip, RECOVERED_ERROR, | 4322 | mk_sense_buffer(SCpnt, RECOVERED_ERROR, |
| 3808 | THRESHOLD_EXCEEDED, 0); | 4323 | THRESHOLD_EXCEEDED, 0); |
| 3809 | errsts = check_condition_result; | 4324 | errsts = check_condition_result; |
| 3810 | } else if (inj_dif && (0 == errsts)) { | 4325 | } else if (inj_dif && (0 == errsts)) { |
| 3811 | mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1); | 4326 | mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1); |
| 3812 | errsts = illegal_condition_result; | 4327 | errsts = illegal_condition_result; |
| 3813 | } else if (inj_dix && (0 == errsts)) { | 4328 | } else if (inj_dix && (0 == errsts)) { |
| 3814 | mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1); | 4329 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1); |
| 3815 | errsts = illegal_condition_result; | 4330 | errsts = illegal_condition_result; |
| 3816 | } | 4331 | } |
| 3817 | break; | 4332 | break; |
| @@ -3820,7 +4335,7 @@ write: | |||
| 3820 | if (cmd[1] & 0x8) { | 4335 | if (cmd[1] & 0x8) { |
| 3821 | if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || | 4336 | if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || |
| 3822 | (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { | 4337 | (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { |
| 3823 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4338 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3824 | INVALID_FIELD_IN_CDB, 0); | 4339 | INVALID_FIELD_IN_CDB, 0); |
| 3825 | errsts = check_condition_result; | 4340 | errsts = check_condition_result; |
| 3826 | } else | 4341 | } else |
| @@ -3828,19 +4343,23 @@ write: | |||
| 3828 | } | 4343 | } |
| 3829 | if (errsts) | 4344 | if (errsts) |
| 3830 | break; | 4345 | break; |
| 3831 | errsts = check_readiness(SCpnt, 0, devip); | 4346 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3832 | if (errsts) | 4347 | if (errsts) |
| 3833 | break; | 4348 | break; |
| 4349 | if (scsi_debug_fake_rw) | ||
| 4350 | break; | ||
| 3834 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); | 4351 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); |
| 3835 | errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap); | 4352 | errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap); |
| 3836 | break; | 4353 | break; |
| 3837 | case UNMAP: | 4354 | case UNMAP: |
| 3838 | errsts = check_readiness(SCpnt, 0, devip); | 4355 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3839 | if (errsts) | 4356 | if (errsts) |
| 3840 | break; | 4357 | break; |
| 4358 | if (scsi_debug_fake_rw) | ||
| 4359 | break; | ||
| 3841 | 4360 | ||
| 3842 | if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { | 4361 | if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { |
| 3843 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4362 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3844 | INVALID_COMMAND_OPCODE, 0); | 4363 | INVALID_COMMAND_OPCODE, 0); |
| 3845 | errsts = check_condition_result; | 4364 | errsts = check_condition_result; |
| 3846 | } else | 4365 | } else |
| @@ -3861,29 +4380,29 @@ write: | |||
| 3861 | break; | 4380 | break; |
| 3862 | case SYNCHRONIZE_CACHE: | 4381 | case SYNCHRONIZE_CACHE: |
| 3863 | delay_override = 1; | 4382 | delay_override = 1; |
| 3864 | errsts = check_readiness(SCpnt, 0, devip); | 4383 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3865 | break; | 4384 | break; |
| 3866 | case WRITE_BUFFER: | 4385 | case WRITE_BUFFER: |
| 3867 | errsts = check_readiness(SCpnt, 1, devip); | 4386 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); |
| 3868 | break; | 4387 | break; |
| 3869 | case XDWRITEREAD_10: | 4388 | case XDWRITEREAD_10: |
| 3870 | if (!scsi_bidi_cmnd(SCpnt)) { | 4389 | if (!scsi_bidi_cmnd(SCpnt)) { |
| 3871 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4390 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3872 | INVALID_FIELD_IN_CDB, 0); | 4391 | INVALID_FIELD_IN_CDB, 0); |
| 3873 | errsts = check_condition_result; | 4392 | errsts = check_condition_result; |
| 3874 | break; | 4393 | break; |
| 3875 | } | 4394 | } |
| 3876 | 4395 | ||
| 3877 | errsts = check_readiness(SCpnt, 0, devip); | 4396 | errsts = check_readiness(SCpnt, UAS_TUR, devip); |
| 3878 | if (errsts) | 4397 | if (errsts) |
| 3879 | break; | 4398 | break; |
| 3880 | if (scsi_debug_fake_rw) | 4399 | if (scsi_debug_fake_rw) |
| 3881 | break; | 4400 | break; |
| 3882 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); | 4401 | get_data_transfer_info(cmd, &lba, &num, &ei_lba); |
| 3883 | errsts = resp_read(SCpnt, lba, num, devip, ei_lba); | 4402 | errsts = resp_read(SCpnt, lba, num, ei_lba); |
| 3884 | if (errsts) | 4403 | if (errsts) |
| 3885 | break; | 4404 | break; |
| 3886 | errsts = resp_write(SCpnt, lba, num, devip, ei_lba); | 4405 | errsts = resp_write(SCpnt, lba, num, ei_lba); |
| 3887 | if (errsts) | 4406 | if (errsts) |
| 3888 | break; | 4407 | break; |
| 3889 | errsts = resp_xdwriteread(SCpnt, lba, num, devip); | 4408 | errsts = resp_xdwriteread(SCpnt, lba, num, devip); |
| @@ -3906,27 +4425,138 @@ write: | |||
| 3906 | } | 4425 | } |
| 3907 | } | 4426 | } |
| 3908 | 4427 | ||
| 3909 | mk_sense_buffer(devip, ILLEGAL_REQUEST, | 4428 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, |
| 3910 | INVALID_FIELD_IN_CDB, 0); | 4429 | INVALID_FIELD_IN_CDB, 0); |
| 3911 | errsts = check_condition_result; | 4430 | errsts = check_condition_result; |
| 3912 | break; | 4431 | break; |
| 3913 | 4432 | case 0x85: | |
| 4433 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | ||
| 4434 | sdev_printk(KERN_INFO, SCpnt->device, | ||
| 4435 | "%s: ATA PASS-THROUGH(16) not supported\n", my_name); | ||
| 4436 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, | ||
| 4437 | INVALID_OPCODE, 0); | ||
| 4438 | errsts = check_condition_result; | ||
| 4439 | break; | ||
| 3914 | default: | 4440 | default: |
| 3915 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) | 4441 | if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) |
| 3916 | printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " | 4442 | sdev_printk(KERN_INFO, SCpnt->device, |
| 3917 | "supported\n", *cmd); | 4443 | "%s: Opcode: 0x%x not supported\n", |
| 3918 | errsts = check_readiness(SCpnt, 1, devip); | 4444 | my_name, *cmd); |
| 4445 | errsts = check_readiness(SCpnt, UAS_ONLY, devip); | ||
| 3919 | if (errsts) | 4446 | if (errsts) |
| 3920 | break; /* Unit attention takes precedence */ | 4447 | break; /* Unit attention takes precedence */ |
| 3921 | mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); | 4448 | mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0); |
| 3922 | errsts = check_condition_result; | 4449 | errsts = check_condition_result; |
| 3923 | break; | 4450 | break; |
| 3924 | } | 4451 | } |
| 3925 | return schedule_resp(SCpnt, devip, done, errsts, | 4452 | return schedule_resp(SCpnt, devip, errsts, |
| 3926 | (delay_override ? 0 : scsi_debug_delay)); | 4453 | (delay_override ? 0 : scsi_debug_delay)); |
| 3927 | } | 4454 | } |
| 3928 | 4455 | ||
| 3929 | static DEF_SCSI_QCMD(scsi_debug_queuecommand) | 4456 | static int |
| 4457 | sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | ||
| 4458 | { | ||
| 4459 | if (scsi_debug_host_lock) { | ||
| 4460 | unsigned long iflags; | ||
| 4461 | int rc; | ||
| 4462 | |||
| 4463 | spin_lock_irqsave(shost->host_lock, iflags); | ||
| 4464 | rc = scsi_debug_queuecommand(cmd); | ||
| 4465 | spin_unlock_irqrestore(shost->host_lock, iflags); | ||
| 4466 | return rc; | ||
| 4467 | } else | ||
| 4468 | return scsi_debug_queuecommand(cmd); | ||
| 4469 | } | ||
| 4470 | |||
| 4471 | static int | ||
| 4472 | sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason) | ||
| 4473 | { | ||
| 4474 | int num_in_q = 0; | ||
| 4475 | int bad = 0; | ||
| 4476 | unsigned long iflags; | ||
| 4477 | struct sdebug_dev_info *devip; | ||
| 4478 | |||
| 4479 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
| 4480 | devip = (struct sdebug_dev_info *)sdev->hostdata; | ||
| 4481 | if (NULL == devip) { | ||
| 4482 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 4483 | return -ENODEV; | ||
| 4484 | } | ||
| 4485 | num_in_q = atomic_read(&devip->num_in_q); | ||
| 4486 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
| 4487 | if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { | ||
| 4488 | if (qdepth < 1) | ||
| 4489 | qdepth = 1; | ||
| 4490 | /* allow to exceed max host queued_arr elements for testing */ | ||
| 4491 | if (qdepth > SCSI_DEBUG_CANQUEUE + 10) | ||
| 4492 | qdepth = SCSI_DEBUG_CANQUEUE + 10; | ||
| 4493 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | ||
| 4494 | } else if (reason == SCSI_QDEPTH_QFULL) | ||
| 4495 | scsi_track_queue_full(sdev, qdepth); | ||
| 4496 | else | ||
| 4497 | bad = 1; | ||
| 4498 | if (bad) | ||
| 4499 | sdev_printk(KERN_WARNING, sdev, | ||
| 4500 | "%s: unknown reason=0x%x\n", __func__, reason); | ||
| 4501 | if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { | ||
| 4502 | if (SCSI_QDEPTH_QFULL == reason) | ||
| 4503 | sdev_printk(KERN_INFO, sdev, | ||
| 4504 | "%s: -> %d, num_in_q=%d, reason: queue full\n", | ||
| 4505 | __func__, qdepth, num_in_q); | ||
| 4506 | else { | ||
| 4507 | const char *cp; | ||
| 4508 | |||
| 4509 | switch (reason) { | ||
| 4510 | case SCSI_QDEPTH_DEFAULT: | ||
| 4511 | cp = "default (sysfs ?)"; | ||
| 4512 | break; | ||
| 4513 | case SCSI_QDEPTH_RAMP_UP: | ||
| 4514 | cp = "ramp up"; | ||
| 4515 | break; | ||
| 4516 | default: | ||
| 4517 | cp = "unknown"; | ||
| 4518 | break; | ||
| 4519 | } | ||
| 4520 | sdev_printk(KERN_INFO, sdev, | ||
| 4521 | "%s: qdepth=%d, num_in_q=%d, reason: %s\n", | ||
| 4522 | __func__, qdepth, num_in_q, cp); | ||
| 4523 | } | ||
| 4524 | } | ||
| 4525 | return sdev->queue_depth; | ||
| 4526 | } | ||
| 4527 | |||
| 4528 | static int | ||
| 4529 | sdebug_change_qtype(struct scsi_device *sdev, int qtype) | ||
| 4530 | { | ||
| 4531 | if (sdev->tagged_supported) { | ||
| 4532 | scsi_set_tag_type(sdev, qtype); | ||
| 4533 | if (qtype) | ||
| 4534 | scsi_activate_tcq(sdev, sdev->queue_depth); | ||
| 4535 | else | ||
| 4536 | scsi_deactivate_tcq(sdev, sdev->queue_depth); | ||
| 4537 | } else | ||
| 4538 | qtype = 0; | ||
| 4539 | if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { | ||
| 4540 | const char *cp; | ||
| 4541 | |||
| 4542 | switch (qtype) { | ||
| 4543 | case 0: | ||
| 4544 | cp = "untagged"; | ||
| 4545 | break; | ||
| 4546 | case MSG_SIMPLE_TAG: | ||
| 4547 | cp = "simple tags"; | ||
| 4548 | break; | ||
| 4549 | case MSG_ORDERED_TAG: | ||
| 4550 | cp = "ordered tags"; | ||
| 4551 | break; | ||
| 4552 | default: | ||
| 4553 | cp = "unknown"; | ||
| 4554 | break; | ||
| 4555 | } | ||
| 4556 | sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp); | ||
| 4557 | } | ||
| 4558 | return qtype; | ||
| 4559 | } | ||
| 3930 | 4560 | ||
| 3931 | static struct scsi_host_template sdebug_driver_template = { | 4561 | static struct scsi_host_template sdebug_driver_template = { |
| 3932 | .show_info = scsi_debug_show_info, | 4562 | .show_info = scsi_debug_show_info, |
| @@ -3938,17 +4568,19 @@ static struct scsi_host_template sdebug_driver_template = { | |||
| 3938 | .slave_configure = scsi_debug_slave_configure, | 4568 | .slave_configure = scsi_debug_slave_configure, |
| 3939 | .slave_destroy = scsi_debug_slave_destroy, | 4569 | .slave_destroy = scsi_debug_slave_destroy, |
| 3940 | .ioctl = scsi_debug_ioctl, | 4570 | .ioctl = scsi_debug_ioctl, |
| 3941 | .queuecommand = scsi_debug_queuecommand, | 4571 | .queuecommand = sdebug_queuecommand_lock_or_not, |
| 4572 | .change_queue_depth = sdebug_change_qdepth, | ||
| 4573 | .change_queue_type = sdebug_change_qtype, | ||
| 3942 | .eh_abort_handler = scsi_debug_abort, | 4574 | .eh_abort_handler = scsi_debug_abort, |
| 3943 | .eh_bus_reset_handler = scsi_debug_bus_reset, | ||
| 3944 | .eh_device_reset_handler = scsi_debug_device_reset, | 4575 | .eh_device_reset_handler = scsi_debug_device_reset, |
| 4576 | .eh_target_reset_handler = scsi_debug_target_reset, | ||
| 4577 | .eh_bus_reset_handler = scsi_debug_bus_reset, | ||
| 3945 | .eh_host_reset_handler = scsi_debug_host_reset, | 4578 | .eh_host_reset_handler = scsi_debug_host_reset, |
| 3946 | .bios_param = scsi_debug_biosparam, | ||
| 3947 | .can_queue = SCSI_DEBUG_CANQUEUE, | 4579 | .can_queue = SCSI_DEBUG_CANQUEUE, |
| 3948 | .this_id = 7, | 4580 | .this_id = 7, |
| 3949 | .sg_tablesize = 256, | 4581 | .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, |
| 3950 | .cmd_per_lun = 16, | 4582 | .cmd_per_lun = DEF_CMD_PER_LUN, |
| 3951 | .max_sectors = 0xffff, | 4583 | .max_sectors = -1U, |
| 3952 | .use_clustering = DISABLE_CLUSTERING, | 4584 | .use_clustering = DISABLE_CLUSTERING, |
| 3953 | .module = THIS_MODULE, | 4585 | .module = THIS_MODULE, |
| 3954 | }; | 4586 | }; |
| @@ -4032,8 +4664,7 @@ static int sdebug_driver_probe(struct device * dev) | |||
| 4032 | } else | 4664 | } else |
| 4033 | scsi_scan_host(hpnt); | 4665 | scsi_scan_host(hpnt); |
| 4034 | 4666 | ||
| 4035 | 4667 | return error; | |
| 4036 | return error; | ||
| 4037 | } | 4668 | } |
| 4038 | 4669 | ||
| 4039 | static int sdebug_driver_remove(struct device * dev) | 4670 | static int sdebug_driver_remove(struct device * dev) |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index f969aca0b54e..49014a143c6a 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -222,6 +222,7 @@ static struct { | |||
| 222 | {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 222 | {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 223 | {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 223 | {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 224 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 224 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 225 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | ||
| 225 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 226 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
| 226 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 227 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
| 227 | {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, | 228 | {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 7e957918f33f..5db8454474ee 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -59,11 +59,11 @@ static int scsi_try_to_abort_cmd(struct scsi_host_template *, | |||
| 59 | /* called with shost->host_lock held */ | 59 | /* called with shost->host_lock held */ |
| 60 | void scsi_eh_wakeup(struct Scsi_Host *shost) | 60 | void scsi_eh_wakeup(struct Scsi_Host *shost) |
| 61 | { | 61 | { |
| 62 | if (shost->host_busy == shost->host_failed) { | 62 | if (atomic_read(&shost->host_busy) == shost->host_failed) { |
| 63 | trace_scsi_eh_wakeup(shost); | 63 | trace_scsi_eh_wakeup(shost); |
| 64 | wake_up_process(shost->ehandler); | 64 | wake_up_process(shost->ehandler); |
| 65 | SCSI_LOG_ERROR_RECOVERY(5, | 65 | SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, |
| 66 | printk("Waking error handler thread\n")); | 66 | "Waking error handler thread\n")); |
| 67 | } | 67 | } |
| 68 | } | 68 | } |
| 69 | 69 | ||
| @@ -193,7 +193,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
| 193 | SCSI_LOG_ERROR_RECOVERY(3, | 193 | SCSI_LOG_ERROR_RECOVERY(3, |
| 194 | scmd_printk(KERN_INFO, scmd, | 194 | scmd_printk(KERN_INFO, scmd, |
| 195 | "scmd %p previous abort failed\n", scmd)); | 195 | "scmd %p previous abort failed\n", scmd)); |
| 196 | cancel_delayed_work(&scmd->abort_work); | 196 | BUG_ON(delayed_work_pending(&scmd->abort_work)); |
| 197 | return FAILED; | 197 | return FAILED; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| @@ -319,8 +319,8 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev) | |||
| 319 | 319 | ||
| 320 | online = scsi_device_online(sdev); | 320 | online = scsi_device_online(sdev); |
| 321 | 321 | ||
| 322 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__, | 322 | SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev, |
| 323 | online)); | 323 | "%s: rtn: %d\n", __func__, online)); |
| 324 | 324 | ||
| 325 | return online; | 325 | return online; |
| 326 | } | 326 | } |
| @@ -365,8 +365,9 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, | |||
| 365 | } | 365 | } |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" | 368 | SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost, |
| 369 | " devices require eh work\n", | 369 | "Total of %d commands on %d" |
| 370 | " devices require eh work\n", | ||
| 370 | total_failures, devices_failed)); | 371 | total_failures, devices_failed)); |
| 371 | } | 372 | } |
| 372 | #endif | 373 | #endif |
| @@ -738,8 +739,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) | |||
| 738 | { | 739 | { |
| 739 | struct completion *eh_action; | 740 | struct completion *eh_action; |
| 740 | 741 | ||
| 741 | SCSI_LOG_ERROR_RECOVERY(3, | 742 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 742 | printk("%s scmd: %p result: %x\n", | 743 | "%s scmd: %p result: %x\n", |
| 743 | __func__, scmd, scmd->result)); | 744 | __func__, scmd, scmd->result)); |
| 744 | 745 | ||
| 745 | eh_action = scmd->device->host->eh_action; | 746 | eh_action = scmd->device->host->eh_action; |
| @@ -758,8 +759,8 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd) | |||
| 758 | struct Scsi_Host *host = scmd->device->host; | 759 | struct Scsi_Host *host = scmd->device->host; |
| 759 | struct scsi_host_template *hostt = host->hostt; | 760 | struct scsi_host_template *hostt = host->hostt; |
| 760 | 761 | ||
| 761 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", | 762 | SCSI_LOG_ERROR_RECOVERY(3, |
| 762 | __func__)); | 763 | shost_printk(KERN_INFO, host, "Snd Host RST\n")); |
| 763 | 764 | ||
| 764 | if (!hostt->eh_host_reset_handler) | 765 | if (!hostt->eh_host_reset_handler) |
| 765 | return FAILED; | 766 | return FAILED; |
| @@ -788,8 +789,8 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd) | |||
| 788 | struct Scsi_Host *host = scmd->device->host; | 789 | struct Scsi_Host *host = scmd->device->host; |
| 789 | struct scsi_host_template *hostt = host->hostt; | 790 | struct scsi_host_template *hostt = host->hostt; |
| 790 | 791 | ||
| 791 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", | 792 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 792 | __func__)); | 793 | "%s: Snd Bus RST\n", __func__)); |
| 793 | 794 | ||
| 794 | if (!hostt->eh_bus_reset_handler) | 795 | if (!hostt->eh_bus_reset_handler) |
| 795 | return FAILED; | 796 | return FAILED; |
| @@ -1036,8 +1037,8 @@ retry: | |||
| 1036 | 1037 | ||
| 1037 | scsi_log_completion(scmd, rtn); | 1038 | scsi_log_completion(scmd, rtn); |
| 1038 | 1039 | ||
| 1039 | SCSI_LOG_ERROR_RECOVERY(3, | 1040 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 1040 | printk("%s: scmd: %p, timeleft: %ld\n", | 1041 | "%s: scmd: %p, timeleft: %ld\n", |
| 1041 | __func__, scmd, timeleft)); | 1042 | __func__, scmd, timeleft)); |
| 1042 | 1043 | ||
| 1043 | /* | 1044 | /* |
| @@ -1051,9 +1052,8 @@ retry: | |||
| 1051 | */ | 1052 | */ |
| 1052 | if (timeleft) { | 1053 | if (timeleft) { |
| 1053 | rtn = scsi_eh_completed_normally(scmd); | 1054 | rtn = scsi_eh_completed_normally(scmd); |
| 1054 | SCSI_LOG_ERROR_RECOVERY(3, | 1055 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 1055 | printk("%s: scsi_eh_completed_normally %x\n", | 1056 | "%s: scsi_eh_completed_normally %x\n", __func__, rtn)); |
| 1056 | __func__, rtn)); | ||
| 1057 | 1057 | ||
| 1058 | switch (rtn) { | 1058 | switch (rtn) { |
| 1059 | case SUCCESS: | 1059 | case SUCCESS: |
| @@ -1177,9 +1177,9 @@ int scsi_eh_get_sense(struct list_head *work_q, | |||
| 1177 | if (rtn != SUCCESS) | 1177 | if (rtn != SUCCESS) |
| 1178 | continue; | 1178 | continue; |
| 1179 | 1179 | ||
| 1180 | SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" | 1180 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 1181 | " result %x\n", scmd, | 1181 | "sense requested for %p result %x\n", |
| 1182 | scmd->result)); | 1182 | scmd, scmd->result)); |
| 1183 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); | 1183 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); |
| 1184 | 1184 | ||
| 1185 | rtn = scsi_decide_disposition(scmd); | 1185 | rtn = scsi_decide_disposition(scmd); |
| @@ -1220,8 +1220,8 @@ retry_tur: | |||
| 1220 | rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, | 1220 | rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, |
| 1221 | scmd->device->eh_timeout, 0); | 1221 | scmd->device->eh_timeout, 0); |
| 1222 | 1222 | ||
| 1223 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | 1223 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
| 1224 | __func__, scmd, rtn)); | 1224 | "%s: scmd %p rtn %x\n", __func__, scmd, rtn)); |
| 1225 | 1225 | ||
| 1226 | switch (rtn) { | 1226 | switch (rtn) { |
| 1227 | case NEEDS_RETRY: | 1227 | case NEEDS_RETRY: |
| @@ -1323,16 +1323,16 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, | |||
| 1323 | __func__)); | 1323 | __func__)); |
| 1324 | return list_empty(work_q); | 1324 | return list_empty(work_q); |
| 1325 | } | 1325 | } |
| 1326 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" | 1326 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1327 | "0x%p\n", current->comm, | 1327 | shost_printk(KERN_INFO, shost, |
| 1328 | scmd)); | 1328 | "%s: aborting cmd: 0x%p\n", |
| 1329 | current->comm, scmd)); | ||
| 1329 | rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); | 1330 | rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); |
| 1330 | if (rtn == FAILED) { | 1331 | if (rtn == FAILED) { |
| 1331 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" | 1332 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1332 | " cmd failed:" | 1333 | shost_printk(KERN_INFO, shost, |
| 1333 | "0x%p\n", | 1334 | "%s: aborting cmd failed: 0x%p\n", |
| 1334 | current->comm, | 1335 | current->comm, scmd)); |
| 1335 | scmd)); | ||
| 1336 | list_splice_init(&check_list, work_q); | 1336 | list_splice_init(&check_list, work_q); |
| 1337 | return list_empty(work_q); | 1337 | return list_empty(work_q); |
| 1338 | } | 1338 | } |
| @@ -1406,8 +1406,10 @@ static int scsi_eh_stu(struct Scsi_Host *shost, | |||
| 1406 | if (!stu_scmd) | 1406 | if (!stu_scmd) |
| 1407 | continue; | 1407 | continue; |
| 1408 | 1408 | ||
| 1409 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" | 1409 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1410 | " 0x%p\n", current->comm, sdev)); | 1410 | shost_printk(KERN_INFO, shost, |
| 1411 | "%s: Sending START_UNIT to sdev: 0x%p\n", | ||
| 1412 | current->comm, sdev)); | ||
| 1411 | 1413 | ||
| 1412 | if (!scsi_eh_try_stu(stu_scmd)) { | 1414 | if (!scsi_eh_try_stu(stu_scmd)) { |
| 1413 | if (!scsi_device_online(sdev) || | 1415 | if (!scsi_device_online(sdev) || |
| @@ -1421,8 +1423,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost, | |||
| 1421 | } | 1423 | } |
| 1422 | } else { | 1424 | } else { |
| 1423 | SCSI_LOG_ERROR_RECOVERY(3, | 1425 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1424 | printk("%s: START_UNIT failed to sdev:" | 1426 | shost_printk(KERN_INFO, shost, |
| 1425 | " 0x%p\n", current->comm, sdev)); | 1427 | "%s: START_UNIT failed to sdev:" |
| 1428 | " 0x%p\n", current->comm, sdev)); | ||
| 1426 | } | 1429 | } |
| 1427 | } | 1430 | } |
| 1428 | 1431 | ||
| @@ -1468,9 +1471,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |||
| 1468 | if (!bdr_scmd) | 1471 | if (!bdr_scmd) |
| 1469 | continue; | 1472 | continue; |
| 1470 | 1473 | ||
| 1471 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" | 1474 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1472 | " 0x%p\n", current->comm, | 1475 | shost_printk(KERN_INFO, shost, |
| 1473 | sdev)); | 1476 | "%s: Sending BDR sdev: 0x%p\n", |
| 1477 | current->comm, sdev)); | ||
| 1474 | rtn = scsi_try_bus_device_reset(bdr_scmd); | 1478 | rtn = scsi_try_bus_device_reset(bdr_scmd); |
| 1475 | if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { | 1479 | if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { |
| 1476 | if (!scsi_device_online(sdev) || | 1480 | if (!scsi_device_online(sdev) || |
| @@ -1485,11 +1489,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |||
| 1485 | } | 1489 | } |
| 1486 | } | 1490 | } |
| 1487 | } else { | 1491 | } else { |
| 1488 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" | 1492 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1489 | " failed sdev:" | 1493 | shost_printk(KERN_INFO, shost, |
| 1490 | "0x%p\n", | 1494 | "%s: BDR failed sdev: 0x%p\n", |
| 1491 | current->comm, | 1495 | current->comm, sdev)); |
| 1492 | sdev)); | ||
| 1493 | } | 1496 | } |
| 1494 | } | 1497 | } |
| 1495 | 1498 | ||
| @@ -1533,15 +1536,17 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
| 1533 | scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); | 1536 | scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); |
| 1534 | id = scmd_id(scmd); | 1537 | id = scmd_id(scmd); |
| 1535 | 1538 | ||
| 1536 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " | 1539 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1537 | "to target %d\n", | 1540 | shost_printk(KERN_INFO, shost, |
| 1538 | current->comm, id)); | 1541 | "%s: Sending target reset to target %d\n", |
| 1542 | current->comm, id)); | ||
| 1539 | rtn = scsi_try_target_reset(scmd); | 1543 | rtn = scsi_try_target_reset(scmd); |
| 1540 | if (rtn != SUCCESS && rtn != FAST_IO_FAIL) | 1544 | if (rtn != SUCCESS && rtn != FAST_IO_FAIL) |
| 1541 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" | 1545 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1542 | " failed target: " | 1546 | shost_printk(KERN_INFO, shost, |
| 1543 | "%d\n", | 1547 | "%s: Target reset failed" |
| 1544 | current->comm, id)); | 1548 | " target: %d\n", |
| 1549 | current->comm, id)); | ||
| 1545 | list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { | 1550 | list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { |
| 1546 | if (scmd_id(scmd) != id) | 1551 | if (scmd_id(scmd) != id) |
| 1547 | continue; | 1552 | continue; |
| @@ -1605,9 +1610,10 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
| 1605 | 1610 | ||
| 1606 | if (!chan_scmd) | 1611 | if (!chan_scmd) |
| 1607 | continue; | 1612 | continue; |
| 1608 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" | 1613 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1609 | " %d\n", current->comm, | 1614 | shost_printk(KERN_INFO, shost, |
| 1610 | channel)); | 1615 | "%s: Sending BRST chan: %d\n", |
| 1616 | current->comm, channel)); | ||
| 1611 | rtn = scsi_try_bus_reset(chan_scmd); | 1617 | rtn = scsi_try_bus_reset(chan_scmd); |
| 1612 | if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { | 1618 | if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { |
| 1613 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { | 1619 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
| @@ -1621,10 +1627,10 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
| 1621 | } | 1627 | } |
| 1622 | } | 1628 | } |
| 1623 | } else { | 1629 | } else { |
| 1624 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" | 1630 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1625 | " failed chan: %d\n", | 1631 | shost_printk(KERN_INFO, shost, |
| 1626 | current->comm, | 1632 | "%s: BRST failed chan: %d\n", |
| 1627 | channel)); | 1633 | current->comm, channel)); |
| 1628 | } | 1634 | } |
| 1629 | } | 1635 | } |
| 1630 | return scsi_eh_test_devices(&check_list, work_q, done_q, 0); | 1636 | return scsi_eh_test_devices(&check_list, work_q, done_q, 0); |
| @@ -1635,7 +1641,8 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
| 1635 | * @work_q: list_head for processed commands. | 1641 | * @work_q: list_head for processed commands. |
| 1636 | * @done_q: list_head for processed commands. | 1642 | * @done_q: list_head for processed commands. |
| 1637 | */ | 1643 | */ |
| 1638 | static int scsi_eh_host_reset(struct list_head *work_q, | 1644 | static int scsi_eh_host_reset(struct Scsi_Host *shost, |
| 1645 | struct list_head *work_q, | ||
| 1639 | struct list_head *done_q) | 1646 | struct list_head *done_q) |
| 1640 | { | 1647 | { |
| 1641 | struct scsi_cmnd *scmd, *next; | 1648 | struct scsi_cmnd *scmd, *next; |
| @@ -1646,8 +1653,10 @@ static int scsi_eh_host_reset(struct list_head *work_q, | |||
| 1646 | scmd = list_entry(work_q->next, | 1653 | scmd = list_entry(work_q->next, |
| 1647 | struct scsi_cmnd, eh_entry); | 1654 | struct scsi_cmnd, eh_entry); |
| 1648 | 1655 | ||
| 1649 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" | 1656 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1650 | , current->comm)); | 1657 | shost_printk(KERN_INFO, shost, |
| 1658 | "%s: Sending HRST\n", | ||
| 1659 | current->comm)); | ||
| 1651 | 1660 | ||
| 1652 | rtn = scsi_try_host_reset(scmd); | 1661 | rtn = scsi_try_host_reset(scmd); |
| 1653 | if (rtn == SUCCESS) { | 1662 | if (rtn == SUCCESS) { |
| @@ -1657,9 +1666,10 @@ static int scsi_eh_host_reset(struct list_head *work_q, | |||
| 1657 | scsi_eh_finish_cmd(scmd, done_q); | 1666 | scsi_eh_finish_cmd(scmd, done_q); |
| 1658 | } | 1667 | } |
| 1659 | } else { | 1668 | } else { |
| 1660 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" | 1669 | SCSI_LOG_ERROR_RECOVERY(3, |
| 1661 | " failed\n", | 1670 | shost_printk(KERN_INFO, shost, |
| 1662 | current->comm)); | 1671 | "%s: HRST failed\n", |
| 1672 | current->comm)); | ||
| 1663 | } | 1673 | } |
| 1664 | } | 1674 | } |
| 1665 | return scsi_eh_test_devices(&check_list, work_q, done_q, 1); | 1675 | return scsi_eh_test_devices(&check_list, work_q, done_q, 1); |
| @@ -1751,9 +1761,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) | |||
| 1751 | * up to the top level. | 1761 | * up to the top level. |
| 1752 | */ | 1762 | */ |
| 1753 | if (!scsi_device_online(scmd->device)) { | 1763 | if (!scsi_device_online(scmd->device)) { |
| 1754 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" | 1764 | SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd, |
| 1755 | " as SUCCESS\n", | 1765 | "%s: device offline - report as SUCCESS\n", __func__)); |
| 1756 | __func__)); | ||
| 1757 | return SUCCESS; | 1766 | return SUCCESS; |
| 1758 | } | 1767 | } |
| 1759 | 1768 | ||
| @@ -1999,8 +2008,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost) | |||
| 1999 | * ioctls to queued block devices. | 2008 | * ioctls to queued block devices. |
| 2000 | */ | 2009 | */ |
| 2001 | SCSI_LOG_ERROR_RECOVERY(3, | 2010 | SCSI_LOG_ERROR_RECOVERY(3, |
| 2002 | printk("scsi_eh_%d waking up host to restart\n", | 2011 | shost_printk(KERN_INFO, shost, "waking up host to restart\n")); |
| 2003 | shost->host_no)); | ||
| 2004 | 2012 | ||
| 2005 | spin_lock_irqsave(shost->host_lock, flags); | 2013 | spin_lock_irqsave(shost->host_lock, flags); |
| 2006 | if (scsi_host_set_state(shost, SHOST_RUNNING)) | 2014 | if (scsi_host_set_state(shost, SHOST_RUNNING)) |
| @@ -2047,7 +2055,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost, | |||
| 2047 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) | 2055 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) |
| 2048 | if (!scsi_eh_target_reset(shost, work_q, done_q)) | 2056 | if (!scsi_eh_target_reset(shost, work_q, done_q)) |
| 2049 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) | 2057 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) |
| 2050 | if (!scsi_eh_host_reset(work_q, done_q)) | 2058 | if (!scsi_eh_host_reset(shost, work_q, done_q)) |
| 2051 | scsi_eh_offline_sdevs(work_q, | 2059 | scsi_eh_offline_sdevs(work_q, |
| 2052 | done_q); | 2060 | done_q); |
| 2053 | } | 2061 | } |
| @@ -2066,10 +2074,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
| 2066 | if (scsi_device_online(scmd->device) && | 2074 | if (scsi_device_online(scmd->device) && |
| 2067 | !scsi_noretry_cmd(scmd) && | 2075 | !scsi_noretry_cmd(scmd) && |
| 2068 | (++scmd->retries <= scmd->allowed)) { | 2076 | (++scmd->retries <= scmd->allowed)) { |
| 2069 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" | 2077 | SCSI_LOG_ERROR_RECOVERY(3, |
| 2070 | " retry cmd: %p\n", | 2078 | scmd_printk(KERN_INFO, scmd, |
| 2071 | current->comm, | 2079 | "%s: flush retry cmd: %p\n", |
| 2072 | scmd)); | 2080 | current->comm, scmd)); |
| 2073 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | 2081 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); |
| 2074 | } else { | 2082 | } else { |
| 2075 | /* | 2083 | /* |
| @@ -2079,9 +2087,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
| 2079 | */ | 2087 | */ |
| 2080 | if (!scmd->result) | 2088 | if (!scmd->result) |
| 2081 | scmd->result |= (DRIVER_TIMEOUT << 24); | 2089 | scmd->result |= (DRIVER_TIMEOUT << 24); |
| 2082 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" | 2090 | SCSI_LOG_ERROR_RECOVERY(3, |
| 2083 | " cmd: %p\n", | 2091 | scmd_printk(KERN_INFO, scmd, |
| 2084 | current->comm, scmd)); | 2092 | "%s: flush finish cmd: %p\n", |
| 2093 | current->comm, scmd)); | ||
| 2085 | scsi_finish_command(scmd); | 2094 | scsi_finish_command(scmd); |
| 2086 | } | 2095 | } |
| 2087 | } | 2096 | } |
| @@ -2155,19 +2164,22 @@ int scsi_error_handler(void *data) | |||
| 2155 | while (!kthread_should_stop()) { | 2164 | while (!kthread_should_stop()) { |
| 2156 | set_current_state(TASK_INTERRUPTIBLE); | 2165 | set_current_state(TASK_INTERRUPTIBLE); |
| 2157 | if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || | 2166 | if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || |
| 2158 | shost->host_failed != shost->host_busy) { | 2167 | shost->host_failed != atomic_read(&shost->host_busy)) { |
| 2159 | SCSI_LOG_ERROR_RECOVERY(1, | 2168 | SCSI_LOG_ERROR_RECOVERY(1, |
| 2160 | printk("scsi_eh_%d: sleeping\n", | 2169 | shost_printk(KERN_INFO, shost, |
| 2161 | shost->host_no)); | 2170 | "scsi_eh_%d: sleeping\n", |
| 2171 | shost->host_no)); | ||
| 2162 | schedule(); | 2172 | schedule(); |
| 2163 | continue; | 2173 | continue; |
| 2164 | } | 2174 | } |
| 2165 | 2175 | ||
| 2166 | __set_current_state(TASK_RUNNING); | 2176 | __set_current_state(TASK_RUNNING); |
| 2167 | SCSI_LOG_ERROR_RECOVERY(1, | 2177 | SCSI_LOG_ERROR_RECOVERY(1, |
| 2168 | printk("scsi_eh_%d: waking up %d/%d/%d\n", | 2178 | shost_printk(KERN_INFO, shost, |
| 2169 | shost->host_no, shost->host_eh_scheduled, | 2179 | "scsi_eh_%d: waking up %d/%d/%d\n", |
| 2170 | shost->host_failed, shost->host_busy)); | 2180 | shost->host_no, shost->host_eh_scheduled, |
| 2181 | shost->host_failed, | ||
| 2182 | atomic_read(&shost->host_busy))); | ||
| 2171 | 2183 | ||
| 2172 | /* | 2184 | /* |
| 2173 | * We have a host that is failing for some reason. Figure out | 2185 | * We have a host that is failing for some reason. Figure out |
| @@ -2201,7 +2213,9 @@ int scsi_error_handler(void *data) | |||
| 2201 | __set_current_state(TASK_RUNNING); | 2213 | __set_current_state(TASK_RUNNING); |
| 2202 | 2214 | ||
| 2203 | SCSI_LOG_ERROR_RECOVERY(1, | 2215 | SCSI_LOG_ERROR_RECOVERY(1, |
| 2204 | printk("Error handler scsi_eh_%d exiting\n", shost->host_no)); | 2216 | shost_printk(KERN_INFO, shost, |
| 2217 | "Error handler scsi_eh_%d exiting\n", | ||
| 2218 | shost->host_no)); | ||
| 2205 | shost->ehandler = NULL; | 2219 | shost->ehandler = NULL; |
| 2206 | return 0; | 2220 | return 0; |
| 2207 | } | 2221 | } |
| @@ -2362,8 +2376,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
| 2362 | * suspended while we performed the TMF. | 2376 | * suspended while we performed the TMF. |
| 2363 | */ | 2377 | */ |
| 2364 | SCSI_LOG_ERROR_RECOVERY(3, | 2378 | SCSI_LOG_ERROR_RECOVERY(3, |
| 2365 | printk("%s: waking up host to restart after TMF\n", | 2379 | shost_printk(KERN_INFO, shost, |
| 2366 | __func__)); | 2380 | "waking up host to restart after TMF\n")); |
| 2367 | 2381 | ||
| 2368 | wake_up(&shost->host_wait); | 2382 | wake_up(&shost->host_wait); |
| 2369 | 2383 | ||
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index d9564fb04f62..1aaaf43c6803 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
| @@ -91,12 +91,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
| 91 | int result; | 91 | int result; |
| 92 | struct scsi_sense_hdr sshdr; | 92 | struct scsi_sense_hdr sshdr; |
| 93 | 93 | ||
| 94 | SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); | 94 | SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev, |
| 95 | "Trying ioctl with scsi command %d\n", *cmd)); | ||
| 95 | 96 | ||
| 96 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, | 97 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, |
| 97 | &sshdr, timeout, retries, NULL); | 98 | &sshdr, timeout, retries, NULL); |
| 98 | 99 | ||
| 99 | SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result)); | 100 | SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, |
| 101 | "Ioctl returned 0x%x\n", result)); | ||
| 100 | 102 | ||
| 101 | if ((driver_byte(result) & DRIVER_SENSE) && | 103 | if ((driver_byte(result) & DRIVER_SENSE) && |
| 102 | (scsi_sense_valid(&sshdr))) { | 104 | (scsi_sense_valid(&sshdr))) { |
| @@ -105,9 +107,11 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
| 105 | if (cmd[0] == ALLOW_MEDIUM_REMOVAL) | 107 | if (cmd[0] == ALLOW_MEDIUM_REMOVAL) |
| 106 | sdev->lockable = 0; | 108 | sdev->lockable = 0; |
| 107 | else | 109 | else |
| 108 | printk(KERN_INFO "ioctl_internal_command: " | 110 | sdev_printk(KERN_INFO, sdev, |
| 109 | "ILLEGAL REQUEST asc=0x%x ascq=0x%x\n", | 111 | "ioctl_internal_command: " |
| 110 | sshdr.asc, sshdr.ascq); | 112 | "ILLEGAL REQUEST " |
| 113 | "asc=0x%x ascq=0x%x\n", | ||
| 114 | sshdr.asc, sshdr.ascq); | ||
| 111 | break; | 115 | break; |
| 112 | case NOT_READY: /* This happens if there is no disc in drive */ | 116 | case NOT_READY: /* This happens if there is no disc in drive */ |
| 113 | if (sdev->removable) | 117 | if (sdev->removable) |
| @@ -127,7 +131,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
| 127 | } | 131 | } |
| 128 | } | 132 | } |
| 129 | 133 | ||
| 130 | SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); | 134 | SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, |
| 135 | "IOCTL Releasing command\n")); | ||
| 131 | return result; | 136 | return result; |
| 132 | } | 137 | } |
| 133 | 138 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f7e316368c99..d837dc180522 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * scsi_lib.c Copyright (C) 1999 Eric Youngdale | 2 | * Copyright (C) 1999 Eric Youngdale |
| 3 | * Copyright (C) 2014 Christoph Hellwig | ||
| 3 | * | 4 | * |
| 4 | * SCSI queueing library. | 5 | * SCSI queueing library. |
| 5 | * Initial versions: Eric Youngdale (eric@andante.org). | 6 | * Initial versions: Eric Youngdale (eric@andante.org). |
| @@ -20,6 +21,7 @@ | |||
| 20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
| 21 | #include <linux/hardirq.h> | 22 | #include <linux/hardirq.h> |
| 22 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
| 24 | #include <linux/blk-mq.h> | ||
| 23 | 25 | ||
| 24 | #include <scsi/scsi.h> | 26 | #include <scsi/scsi.h> |
| 25 | #include <scsi/scsi_cmnd.h> | 27 | #include <scsi/scsi_cmnd.h> |
| @@ -29,6 +31,8 @@ | |||
| 29 | #include <scsi/scsi_eh.h> | 31 | #include <scsi/scsi_eh.h> |
| 30 | #include <scsi/scsi_host.h> | 32 | #include <scsi/scsi_host.h> |
| 31 | 33 | ||
| 34 | #include <trace/events/scsi.h> | ||
| 35 | |||
| 32 | #include "scsi_priv.h" | 36 | #include "scsi_priv.h" |
| 33 | #include "scsi_logging.h" | 37 | #include "scsi_logging.h" |
| 34 | 38 | ||
| @@ -75,28 +79,12 @@ struct kmem_cache *scsi_sdb_cache; | |||
| 75 | */ | 79 | */ |
| 76 | #define SCSI_QUEUE_DELAY 3 | 80 | #define SCSI_QUEUE_DELAY 3 |
| 77 | 81 | ||
| 78 | /** | 82 | static void |
| 79 | * __scsi_queue_insert - private queue insertion | 83 | scsi_set_blocked(struct scsi_cmnd *cmd, int reason) |
| 80 | * @cmd: The SCSI command being requeued | ||
| 81 | * @reason: The reason for the requeue | ||
| 82 | * @unbusy: Whether the queue should be unbusied | ||
| 83 | * | ||
| 84 | * This is a private queue insertion. The public interface | ||
| 85 | * scsi_queue_insert() always assumes the queue should be unbusied | ||
| 86 | * because it's always called before the completion. This function is | ||
| 87 | * for a requeue after completion, which should only occur in this | ||
| 88 | * file. | ||
| 89 | */ | ||
| 90 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | ||
| 91 | { | 84 | { |
| 92 | struct Scsi_Host *host = cmd->device->host; | 85 | struct Scsi_Host *host = cmd->device->host; |
| 93 | struct scsi_device *device = cmd->device; | 86 | struct scsi_device *device = cmd->device; |
| 94 | struct scsi_target *starget = scsi_target(device); | 87 | struct scsi_target *starget = scsi_target(device); |
| 95 | struct request_queue *q = device->request_queue; | ||
| 96 | unsigned long flags; | ||
| 97 | |||
| 98 | SCSI_LOG_MLQUEUE(1, | ||
| 99 | printk("Inserting command %p into mlqueue\n", cmd)); | ||
| 100 | 88 | ||
| 101 | /* | 89 | /* |
| 102 | * Set the appropriate busy bit for the device/host. | 90 | * Set the appropriate busy bit for the device/host. |
| @@ -113,16 +101,52 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
| 113 | */ | 101 | */ |
| 114 | switch (reason) { | 102 | switch (reason) { |
| 115 | case SCSI_MLQUEUE_HOST_BUSY: | 103 | case SCSI_MLQUEUE_HOST_BUSY: |
| 116 | host->host_blocked = host->max_host_blocked; | 104 | atomic_set(&host->host_blocked, host->max_host_blocked); |
| 117 | break; | 105 | break; |
| 118 | case SCSI_MLQUEUE_DEVICE_BUSY: | 106 | case SCSI_MLQUEUE_DEVICE_BUSY: |
| 119 | case SCSI_MLQUEUE_EH_RETRY: | 107 | case SCSI_MLQUEUE_EH_RETRY: |
| 120 | device->device_blocked = device->max_device_blocked; | 108 | atomic_set(&device->device_blocked, |
| 109 | device->max_device_blocked); | ||
| 121 | break; | 110 | break; |
| 122 | case SCSI_MLQUEUE_TARGET_BUSY: | 111 | case SCSI_MLQUEUE_TARGET_BUSY: |
| 123 | starget->target_blocked = starget->max_target_blocked; | 112 | atomic_set(&starget->target_blocked, |
| 113 | starget->max_target_blocked); | ||
| 124 | break; | 114 | break; |
| 125 | } | 115 | } |
| 116 | } | ||
| 117 | |||
| 118 | static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) | ||
| 119 | { | ||
| 120 | struct scsi_device *sdev = cmd->device; | ||
| 121 | struct request_queue *q = cmd->request->q; | ||
| 122 | |||
| 123 | blk_mq_requeue_request(cmd->request); | ||
| 124 | blk_mq_kick_requeue_list(q); | ||
| 125 | put_device(&sdev->sdev_gendev); | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 129 | * __scsi_queue_insert - private queue insertion | ||
| 130 | * @cmd: The SCSI command being requeued | ||
| 131 | * @reason: The reason for the requeue | ||
| 132 | * @unbusy: Whether the queue should be unbusied | ||
| 133 | * | ||
| 134 | * This is a private queue insertion. The public interface | ||
| 135 | * scsi_queue_insert() always assumes the queue should be unbusied | ||
| 136 | * because it's always called before the completion. This function is | ||
| 137 | * for a requeue after completion, which should only occur in this | ||
| 138 | * file. | ||
| 139 | */ | ||
| 140 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | ||
| 141 | { | ||
| 142 | struct scsi_device *device = cmd->device; | ||
| 143 | struct request_queue *q = device->request_queue; | ||
| 144 | unsigned long flags; | ||
| 145 | |||
| 146 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, | ||
| 147 | "Inserting command %p into mlqueue\n", cmd)); | ||
| 148 | |||
| 149 | scsi_set_blocked(cmd, reason); | ||
| 126 | 150 | ||
| 127 | /* | 151 | /* |
| 128 | * Decrement the counters, since these commands are no longer | 152 | * Decrement the counters, since these commands are no longer |
| @@ -138,6 +162,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
| 138 | * before blk_cleanup_queue() finishes. | 162 | * before blk_cleanup_queue() finishes. |
| 139 | */ | 163 | */ |
| 140 | cmd->result = 0; | 164 | cmd->result = 0; |
| 165 | if (q->mq_ops) { | ||
| 166 | scsi_mq_requeue_cmd(cmd); | ||
| 167 | return; | ||
| 168 | } | ||
| 141 | spin_lock_irqsave(q->queue_lock, flags); | 169 | spin_lock_irqsave(q->queue_lock, flags); |
| 142 | blk_requeue_request(q, cmd->request); | 170 | blk_requeue_request(q, cmd->request); |
| 143 | kblockd_schedule_work(&device->requeue_work); | 171 | kblockd_schedule_work(&device->requeue_work); |
| @@ -282,16 +310,26 @@ void scsi_device_unbusy(struct scsi_device *sdev) | |||
| 282 | struct scsi_target *starget = scsi_target(sdev); | 310 | struct scsi_target *starget = scsi_target(sdev); |
| 283 | unsigned long flags; | 311 | unsigned long flags; |
| 284 | 312 | ||
| 285 | spin_lock_irqsave(shost->host_lock, flags); | 313 | atomic_dec(&shost->host_busy); |
| 286 | shost->host_busy--; | 314 | if (starget->can_queue > 0) |
| 287 | starget->target_busy--; | 315 | atomic_dec(&starget->target_busy); |
| 316 | |||
| 288 | if (unlikely(scsi_host_in_recovery(shost) && | 317 | if (unlikely(scsi_host_in_recovery(shost) && |
| 289 | (shost->host_failed || shost->host_eh_scheduled))) | 318 | (shost->host_failed || shost->host_eh_scheduled))) { |
| 319 | spin_lock_irqsave(shost->host_lock, flags); | ||
| 290 | scsi_eh_wakeup(shost); | 320 | scsi_eh_wakeup(shost); |
| 291 | spin_unlock(shost->host_lock); | 321 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 292 | spin_lock(sdev->request_queue->queue_lock); | 322 | } |
| 293 | sdev->device_busy--; | 323 | |
| 294 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); | 324 | atomic_dec(&sdev->device_busy); |
| 325 | } | ||
| 326 | |||
| 327 | static void scsi_kick_queue(struct request_queue *q) | ||
| 328 | { | ||
| 329 | if (q->mq_ops) | ||
| 330 | blk_mq_start_hw_queues(q); | ||
| 331 | else | ||
| 332 | blk_run_queue(q); | ||
| 295 | } | 333 | } |
| 296 | 334 | ||
| 297 | /* | 335 | /* |
| @@ -318,7 +356,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
| 318 | * but in most cases, we will be first. Ideally, each LU on the | 356 | * but in most cases, we will be first. Ideally, each LU on the |
| 319 | * target would get some limited time or requests on the target. | 357 | * target would get some limited time or requests on the target. |
| 320 | */ | 358 | */ |
| 321 | blk_run_queue(current_sdev->request_queue); | 359 | scsi_kick_queue(current_sdev->request_queue); |
| 322 | 360 | ||
| 323 | spin_lock_irqsave(shost->host_lock, flags); | 361 | spin_lock_irqsave(shost->host_lock, flags); |
| 324 | if (starget->starget_sdev_user) | 362 | if (starget->starget_sdev_user) |
| @@ -331,7 +369,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
| 331 | continue; | 369 | continue; |
| 332 | 370 | ||
| 333 | spin_unlock_irqrestore(shost->host_lock, flags); | 371 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 334 | blk_run_queue(sdev->request_queue); | 372 | scsi_kick_queue(sdev->request_queue); |
| 335 | spin_lock_irqsave(shost->host_lock, flags); | 373 | spin_lock_irqsave(shost->host_lock, flags); |
| 336 | 374 | ||
| 337 | scsi_device_put(sdev); | 375 | scsi_device_put(sdev); |
| @@ -340,28 +378,36 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) | |||
| 340 | spin_unlock_irqrestore(shost->host_lock, flags); | 378 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 341 | } | 379 | } |
| 342 | 380 | ||
| 343 | static inline int scsi_device_is_busy(struct scsi_device *sdev) | 381 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
| 344 | { | 382 | { |
| 345 | if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) | 383 | if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) |
| 346 | return 1; | 384 | return true; |
| 347 | 385 | if (atomic_read(&sdev->device_blocked) > 0) | |
| 348 | return 0; | 386 | return true; |
| 387 | return false; | ||
| 349 | } | 388 | } |
| 350 | 389 | ||
| 351 | static inline int scsi_target_is_busy(struct scsi_target *starget) | 390 | static inline bool scsi_target_is_busy(struct scsi_target *starget) |
| 352 | { | 391 | { |
| 353 | return ((starget->can_queue > 0 && | 392 | if (starget->can_queue > 0) { |
| 354 | starget->target_busy >= starget->can_queue) || | 393 | if (atomic_read(&starget->target_busy) >= starget->can_queue) |
| 355 | starget->target_blocked); | 394 | return true; |
| 395 | if (atomic_read(&starget->target_blocked) > 0) | ||
| 396 | return true; | ||
| 397 | } | ||
| 398 | return false; | ||
| 356 | } | 399 | } |
| 357 | 400 | ||
| 358 | static inline int scsi_host_is_busy(struct Scsi_Host *shost) | 401 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
| 359 | { | 402 | { |
| 360 | if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || | 403 | if (shost->can_queue > 0 && |
| 361 | shost->host_blocked || shost->host_self_blocked) | 404 | atomic_read(&shost->host_busy) >= shost->can_queue) |
| 362 | return 1; | 405 | return true; |
| 363 | 406 | if (atomic_read(&shost->host_blocked) > 0) | |
| 364 | return 0; | 407 | return true; |
| 408 | if (shost->host_self_blocked) | ||
| 409 | return true; | ||
| 410 | return false; | ||
| 365 | } | 411 | } |
| 366 | 412 | ||
| 367 | static void scsi_starved_list_run(struct Scsi_Host *shost) | 413 | static void scsi_starved_list_run(struct Scsi_Host *shost) |
| @@ -413,7 +459,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) | |||
| 413 | continue; | 459 | continue; |
| 414 | spin_unlock_irqrestore(shost->host_lock, flags); | 460 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 415 | 461 | ||
| 416 | blk_run_queue(slq); | 462 | scsi_kick_queue(slq); |
| 417 | blk_put_queue(slq); | 463 | blk_put_queue(slq); |
| 418 | 464 | ||
| 419 | spin_lock_irqsave(shost->host_lock, flags); | 465 | spin_lock_irqsave(shost->host_lock, flags); |
| @@ -444,7 +490,10 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 444 | if (!list_empty(&sdev->host->starved_list)) | 490 | if (!list_empty(&sdev->host->starved_list)) |
| 445 | scsi_starved_list_run(sdev->host); | 491 | scsi_starved_list_run(sdev->host); |
| 446 | 492 | ||
| 447 | blk_run_queue(q); | 493 | if (q->mq_ops) |
| 494 | blk_mq_start_stopped_hw_queues(q, false); | ||
| 495 | else | ||
| 496 | blk_run_queue(q); | ||
| 448 | } | 497 | } |
| 449 | 498 | ||
| 450 | void scsi_requeue_run_queue(struct work_struct *work) | 499 | void scsi_requeue_run_queue(struct work_struct *work) |
| @@ -542,25 +591,70 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) | |||
| 542 | return mempool_alloc(sgp->pool, gfp_mask); | 591 | return mempool_alloc(sgp->pool, gfp_mask); |
| 543 | } | 592 | } |
| 544 | 593 | ||
| 594 | static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) | ||
| 595 | { | ||
| 596 | if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) | ||
| 597 | return; | ||
| 598 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); | ||
| 599 | } | ||
| 600 | |||
| 545 | static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, | 601 | static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, |
| 546 | gfp_t gfp_mask) | 602 | gfp_t gfp_mask, bool mq) |
| 547 | { | 603 | { |
| 604 | struct scatterlist *first_chunk = NULL; | ||
| 548 | int ret; | 605 | int ret; |
| 549 | 606 | ||
| 550 | BUG_ON(!nents); | 607 | BUG_ON(!nents); |
| 551 | 608 | ||
| 609 | if (mq) { | ||
| 610 | if (nents <= SCSI_MAX_SG_SEGMENTS) { | ||
| 611 | sdb->table.nents = nents; | ||
| 612 | sg_init_table(sdb->table.sgl, sdb->table.nents); | ||
| 613 | return 0; | ||
| 614 | } | ||
| 615 | first_chunk = sdb->table.sgl; | ||
| 616 | } | ||
| 617 | |||
| 552 | ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, | 618 | ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, |
| 553 | gfp_mask, scsi_sg_alloc); | 619 | first_chunk, gfp_mask, scsi_sg_alloc); |
| 554 | if (unlikely(ret)) | 620 | if (unlikely(ret)) |
| 555 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, | 621 | scsi_free_sgtable(sdb, mq); |
| 556 | scsi_sg_free); | ||
| 557 | |||
| 558 | return ret; | 622 | return ret; |
| 559 | } | 623 | } |
| 560 | 624 | ||
| 561 | static void scsi_free_sgtable(struct scsi_data_buffer *sdb) | 625 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) |
| 626 | { | ||
| 627 | if (cmd->request->cmd_type == REQ_TYPE_FS) { | ||
| 628 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); | ||
| 629 | |||
| 630 | if (drv->uninit_command) | ||
| 631 | drv->uninit_command(cmd); | ||
| 632 | } | ||
| 633 | } | ||
| 634 | |||
| 635 | static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) | ||
| 562 | { | 636 | { |
| 563 | __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); | 637 | if (cmd->sdb.table.nents) |
| 638 | scsi_free_sgtable(&cmd->sdb, true); | ||
| 639 | if (cmd->request->next_rq && cmd->request->next_rq->special) | ||
| 640 | scsi_free_sgtable(cmd->request->next_rq->special, true); | ||
| 641 | if (scsi_prot_sg_count(cmd)) | ||
| 642 | scsi_free_sgtable(cmd->prot_sdb, true); | ||
| 643 | } | ||
| 644 | |||
| 645 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) | ||
| 646 | { | ||
| 647 | struct scsi_device *sdev = cmd->device; | ||
| 648 | unsigned long flags; | ||
| 649 | |||
| 650 | BUG_ON(list_empty(&cmd->list)); | ||
| 651 | |||
| 652 | scsi_mq_free_sgtables(cmd); | ||
| 653 | scsi_uninit_cmd(cmd); | ||
| 654 | |||
| 655 | spin_lock_irqsave(&sdev->list_lock, flags); | ||
| 656 | list_del_init(&cmd->list); | ||
| 657 | spin_unlock_irqrestore(&sdev->list_lock, flags); | ||
| 564 | } | 658 | } |
| 565 | 659 | ||
| 566 | /* | 660 | /* |
| @@ -579,27 +673,79 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) | |||
| 579 | * the __init_io() function. Primarily this would involve | 673 | * the __init_io() function. Primarily this would involve |
| 580 | * the scatter-gather table. | 674 | * the scatter-gather table. |
| 581 | */ | 675 | */ |
| 582 | void scsi_release_buffers(struct scsi_cmnd *cmd) | 676 | static void scsi_release_buffers(struct scsi_cmnd *cmd) |
| 583 | { | 677 | { |
| 584 | if (cmd->sdb.table.nents) | 678 | if (cmd->sdb.table.nents) |
| 585 | scsi_free_sgtable(&cmd->sdb); | 679 | scsi_free_sgtable(&cmd->sdb, false); |
| 586 | 680 | ||
| 587 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | 681 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
| 588 | 682 | ||
| 589 | if (scsi_prot_sg_count(cmd)) | 683 | if (scsi_prot_sg_count(cmd)) |
| 590 | scsi_free_sgtable(cmd->prot_sdb); | 684 | scsi_free_sgtable(cmd->prot_sdb, false); |
| 591 | } | 685 | } |
| 592 | EXPORT_SYMBOL(scsi_release_buffers); | ||
| 593 | 686 | ||
| 594 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | 687 | static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) |
| 595 | { | 688 | { |
| 596 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; | 689 | struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; |
| 597 | 690 | ||
| 598 | scsi_free_sgtable(bidi_sdb); | 691 | scsi_free_sgtable(bidi_sdb, false); |
| 599 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); | 692 | kmem_cache_free(scsi_sdb_cache, bidi_sdb); |
| 600 | cmd->request->next_rq->special = NULL; | 693 | cmd->request->next_rq->special = NULL; |
| 601 | } | 694 | } |
| 602 | 695 | ||
| 696 | static bool scsi_end_request(struct request *req, int error, | ||
| 697 | unsigned int bytes, unsigned int bidi_bytes) | ||
| 698 | { | ||
| 699 | struct scsi_cmnd *cmd = req->special; | ||
| 700 | struct scsi_device *sdev = cmd->device; | ||
| 701 | struct request_queue *q = sdev->request_queue; | ||
| 702 | |||
| 703 | if (blk_update_request(req, error, bytes)) | ||
| 704 | return true; | ||
| 705 | |||
| 706 | /* Bidi request must be completed as a whole */ | ||
| 707 | if (unlikely(bidi_bytes) && | ||
| 708 | blk_update_request(req->next_rq, error, bidi_bytes)) | ||
| 709 | return true; | ||
| 710 | |||
| 711 | if (blk_queue_add_random(q)) | ||
| 712 | add_disk_randomness(req->rq_disk); | ||
| 713 | |||
| 714 | if (req->mq_ctx) { | ||
| 715 | /* | ||
| 716 | * In the MQ case the command gets freed by __blk_mq_end_io, | ||
| 717 | * so we have to do all cleanup that depends on it earlier. | ||
| 718 | * | ||
| 719 | * We also can't kick the queues from irq context, so we | ||
| 720 | * will have to defer it to a workqueue. | ||
| 721 | */ | ||
| 722 | scsi_mq_uninit_cmd(cmd); | ||
| 723 | |||
| 724 | __blk_mq_end_io(req, error); | ||
| 725 | |||
| 726 | if (scsi_target(sdev)->single_lun || | ||
| 727 | !list_empty(&sdev->host->starved_list)) | ||
| 728 | kblockd_schedule_work(&sdev->requeue_work); | ||
| 729 | else | ||
| 730 | blk_mq_start_stopped_hw_queues(q, true); | ||
| 731 | |||
| 732 | put_device(&sdev->sdev_gendev); | ||
| 733 | } else { | ||
| 734 | unsigned long flags; | ||
| 735 | |||
| 736 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 737 | blk_finish_request(req, error); | ||
| 738 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 739 | |||
| 740 | if (bidi_bytes) | ||
| 741 | scsi_release_bidi_buffers(cmd); | ||
| 742 | scsi_release_buffers(cmd); | ||
| 743 | scsi_next_command(cmd); | ||
| 744 | } | ||
| 745 | |||
| 746 | return false; | ||
| 747 | } | ||
| 748 | |||
| 603 | /** | 749 | /** |
| 604 | * __scsi_error_from_host_byte - translate SCSI error code into errno | 750 | * __scsi_error_from_host_byte - translate SCSI error code into errno |
| 605 | * @cmd: SCSI command (unused) | 751 | * @cmd: SCSI command (unused) |
| @@ -672,7 +818,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) | |||
| 672 | * be put back on the queue and retried using the same | 818 | * be put back on the queue and retried using the same |
| 673 | * command as before, possibly after a delay. | 819 | * command as before, possibly after a delay. |
| 674 | * | 820 | * |
| 675 | * c) We can call blk_end_request() with -EIO to fail | 821 | * c) We can call scsi_end_request() with -EIO to fail |
| 676 | * the remainder of the request. | 822 | * the remainder of the request. |
| 677 | */ | 823 | */ |
| 678 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 824 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
| @@ -686,7 +832,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 686 | int sense_deferred = 0; | 832 | int sense_deferred = 0; |
| 687 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, | 833 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, |
| 688 | ACTION_DELAYED_RETRY} action; | 834 | ACTION_DELAYED_RETRY} action; |
| 689 | char *description = NULL; | ||
| 690 | unsigned long wait_for = (cmd->allowed + 1) * req->timeout; | 835 | unsigned long wait_for = (cmd->allowed + 1) * req->timeout; |
| 691 | 836 | ||
| 692 | if (result) { | 837 | if (result) { |
| @@ -724,15 +869,19 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 724 | * both sides at once. | 869 | * both sides at once. |
| 725 | */ | 870 | */ |
| 726 | req->next_rq->resid_len = scsi_in(cmd)->resid; | 871 | req->next_rq->resid_len = scsi_in(cmd)->resid; |
| 727 | 872 | if (scsi_end_request(req, 0, blk_rq_bytes(req), | |
| 728 | scsi_release_buffers(cmd); | 873 | blk_rq_bytes(req->next_rq))) |
| 729 | scsi_release_bidi_buffers(cmd); | 874 | BUG(); |
| 730 | |||
| 731 | blk_end_request_all(req, 0); | ||
| 732 | |||
| 733 | scsi_next_command(cmd); | ||
| 734 | return; | 875 | return; |
| 735 | } | 876 | } |
| 877 | } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { | ||
| 878 | /* | ||
| 879 | * Certain non BLOCK_PC requests are commands that don't | ||
| 880 | * actually transfer anything (FLUSH), so cannot use | ||
| 881 | * good_bytes != blk_rq_bytes(req) as the signal for an error. | ||
| 882 | * This sets the error explicitly for the problem case. | ||
| 883 | */ | ||
| 884 | error = __scsi_error_from_host_byte(cmd, result); | ||
| 736 | } | 885 | } |
| 737 | 886 | ||
| 738 | /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ | 887 | /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ |
| @@ -742,9 +891,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 742 | * Next deal with any sectors which we were able to correctly | 891 | * Next deal with any sectors which we were able to correctly |
| 743 | * handle. | 892 | * handle. |
| 744 | */ | 893 | */ |
| 745 | SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " | 894 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, |
| 746 | "%d bytes done.\n", | 895 | "%u sectors total, %d bytes done.\n", |
| 747 | blk_rq_sectors(req), good_bytes)); | 896 | blk_rq_sectors(req), good_bytes)); |
| 748 | 897 | ||
| 749 | /* | 898 | /* |
| 750 | * Recovered errors need reporting, but they're always treated | 899 | * Recovered errors need reporting, but they're always treated |
| @@ -769,15 +918,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 769 | /* | 918 | /* |
| 770 | * If we finished all bytes in the request we are done now. | 919 | * If we finished all bytes in the request we are done now. |
| 771 | */ | 920 | */ |
| 772 | if (!blk_end_request(req, error, good_bytes)) | 921 | if (!scsi_end_request(req, error, good_bytes, 0)) |
| 773 | goto next_command; | 922 | return; |
| 774 | 923 | ||
| 775 | /* | 924 | /* |
| 776 | * Kill remainder if no retrys. | 925 | * Kill remainder if no retrys. |
| 777 | */ | 926 | */ |
| 778 | if (error && scsi_noretry_cmd(cmd)) { | 927 | if (error && scsi_noretry_cmd(cmd)) { |
| 779 | blk_end_request_all(req, error); | 928 | if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) |
| 780 | goto next_command; | 929 | BUG(); |
| 930 | return; | ||
| 781 | } | 931 | } |
| 782 | 932 | ||
| 783 | /* | 933 | /* |
| @@ -803,7 +953,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 803 | * and quietly refuse further access. | 953 | * and quietly refuse further access. |
| 804 | */ | 954 | */ |
| 805 | cmd->device->changed = 1; | 955 | cmd->device->changed = 1; |
| 806 | description = "Media Changed"; | ||
| 807 | action = ACTION_FAIL; | 956 | action = ACTION_FAIL; |
| 808 | } else { | 957 | } else { |
| 809 | /* Must have been a power glitch, or a | 958 | /* Must have been a power glitch, or a |
| @@ -831,27 +980,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 831 | cmd->device->use_10_for_rw = 0; | 980 | cmd->device->use_10_for_rw = 0; |
| 832 | action = ACTION_REPREP; | 981 | action = ACTION_REPREP; |
| 833 | } else if (sshdr.asc == 0x10) /* DIX */ { | 982 | } else if (sshdr.asc == 0x10) /* DIX */ { |
| 834 | description = "Host Data Integrity Failure"; | ||
| 835 | action = ACTION_FAIL; | 983 | action = ACTION_FAIL; |
| 836 | error = -EILSEQ; | 984 | error = -EILSEQ; |
| 837 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 985 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
| 838 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { | 986 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
| 839 | switch (cmd->cmnd[0]) { | ||
| 840 | case UNMAP: | ||
| 841 | description = "Discard failure"; | ||
| 842 | break; | ||
| 843 | case WRITE_SAME: | ||
| 844 | case WRITE_SAME_16: | ||
| 845 | if (cmd->cmnd[1] & 0x8) | ||
| 846 | description = "Discard failure"; | ||
| 847 | else | ||
| 848 | description = | ||
| 849 | "Write same failure"; | ||
| 850 | break; | ||
| 851 | default: | ||
| 852 | description = "Invalid command failure"; | ||
| 853 | break; | ||
| 854 | } | ||
| 855 | action = ACTION_FAIL; | 987 | action = ACTION_FAIL; |
| 856 | error = -EREMOTEIO; | 988 | error = -EREMOTEIO; |
| 857 | } else | 989 | } else |
| @@ -859,10 +991,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 859 | break; | 991 | break; |
| 860 | case ABORTED_COMMAND: | 992 | case ABORTED_COMMAND: |
| 861 | action = ACTION_FAIL; | 993 | action = ACTION_FAIL; |
| 862 | if (sshdr.asc == 0x10) { /* DIF */ | 994 | if (sshdr.asc == 0x10) /* DIF */ |
| 863 | description = "Target Data Integrity Failure"; | ||
| 864 | error = -EILSEQ; | 995 | error = -EILSEQ; |
| 865 | } | ||
| 866 | break; | 996 | break; |
| 867 | case NOT_READY: | 997 | case NOT_READY: |
| 868 | /* If the device is in the process of becoming | 998 | /* If the device is in the process of becoming |
| @@ -881,57 +1011,52 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 881 | action = ACTION_DELAYED_RETRY; | 1011 | action = ACTION_DELAYED_RETRY; |
| 882 | break; | 1012 | break; |
| 883 | default: | 1013 | default: |
| 884 | description = "Device not ready"; | ||
| 885 | action = ACTION_FAIL; | 1014 | action = ACTION_FAIL; |
| 886 | break; | 1015 | break; |
| 887 | } | 1016 | } |
| 888 | } else { | 1017 | } else |
| 889 | description = "Device not ready"; | ||
| 890 | action = ACTION_FAIL; | 1018 | action = ACTION_FAIL; |
| 891 | } | ||
| 892 | break; | 1019 | break; |
| 893 | case VOLUME_OVERFLOW: | 1020 | case VOLUME_OVERFLOW: |
| 894 | /* See SSC3rXX or current. */ | 1021 | /* See SSC3rXX or current. */ |
| 895 | action = ACTION_FAIL; | 1022 | action = ACTION_FAIL; |
| 896 | break; | 1023 | break; |
| 897 | default: | 1024 | default: |
| 898 | description = "Unhandled sense code"; | ||
| 899 | action = ACTION_FAIL; | 1025 | action = ACTION_FAIL; |
| 900 | break; | 1026 | break; |
| 901 | } | 1027 | } |
| 902 | } else { | 1028 | } else |
| 903 | description = "Unhandled error code"; | ||
| 904 | action = ACTION_FAIL; | 1029 | action = ACTION_FAIL; |
| 905 | } | ||
| 906 | 1030 | ||
| 907 | if (action != ACTION_FAIL && | 1031 | if (action != ACTION_FAIL && |
| 908 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | 1032 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) |
| 909 | action = ACTION_FAIL; | 1033 | action = ACTION_FAIL; |
| 910 | description = "Command timed out"; | ||
| 911 | } | ||
| 912 | 1034 | ||
| 913 | switch (action) { | 1035 | switch (action) { |
| 914 | case ACTION_FAIL: | 1036 | case ACTION_FAIL: |
| 915 | /* Give up and fail the remainder of the request */ | 1037 | /* Give up and fail the remainder of the request */ |
| 916 | if (!(req->cmd_flags & REQ_QUIET)) { | 1038 | if (!(req->cmd_flags & REQ_QUIET)) { |
| 917 | if (description) | ||
| 918 | scmd_printk(KERN_INFO, cmd, "%s\n", | ||
| 919 | description); | ||
| 920 | scsi_print_result(cmd); | 1039 | scsi_print_result(cmd); |
| 921 | if (driver_byte(result) & DRIVER_SENSE) | 1040 | if (driver_byte(result) & DRIVER_SENSE) |
| 922 | scsi_print_sense("", cmd); | 1041 | scsi_print_sense("", cmd); |
| 923 | scsi_print_command(cmd); | 1042 | scsi_print_command(cmd); |
| 924 | } | 1043 | } |
| 925 | if (!blk_end_request_err(req, error)) | 1044 | if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) |
| 926 | goto next_command; | 1045 | return; |
| 927 | /*FALLTHRU*/ | 1046 | /*FALLTHRU*/ |
| 928 | case ACTION_REPREP: | 1047 | case ACTION_REPREP: |
| 929 | requeue: | 1048 | requeue: |
| 930 | /* Unprep the request and put it back at the head of the queue. | 1049 | /* Unprep the request and put it back at the head of the queue. |
| 931 | * A new command will be prepared and issued. | 1050 | * A new command will be prepared and issued. |
| 932 | */ | 1051 | */ |
| 933 | scsi_release_buffers(cmd); | 1052 | if (q->mq_ops) { |
| 934 | scsi_requeue_command(q, cmd); | 1053 | cmd->request->cmd_flags &= ~REQ_DONTPREP; |
| 1054 | scsi_mq_uninit_cmd(cmd); | ||
| 1055 | scsi_mq_requeue_cmd(cmd); | ||
| 1056 | } else { | ||
| 1057 | scsi_release_buffers(cmd); | ||
| 1058 | scsi_requeue_command(q, cmd); | ||
| 1059 | } | ||
| 935 | break; | 1060 | break; |
| 936 | case ACTION_RETRY: | 1061 | case ACTION_RETRY: |
| 937 | /* Retry the same command immediately */ | 1062 | /* Retry the same command immediately */ |
| @@ -942,11 +1067,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 942 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); | 1067 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); |
| 943 | break; | 1068 | break; |
| 944 | } | 1069 | } |
| 945 | return; | ||
| 946 | |||
| 947 | next_command: | ||
| 948 | scsi_release_buffers(cmd); | ||
| 949 | scsi_next_command(cmd); | ||
| 950 | } | 1070 | } |
| 951 | 1071 | ||
| 952 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | 1072 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, |
| @@ -958,9 +1078,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
| 958 | * If sg table allocation fails, requeue request later. | 1078 | * If sg table allocation fails, requeue request later. |
| 959 | */ | 1079 | */ |
| 960 | if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, | 1080 | if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, |
| 961 | gfp_mask))) { | 1081 | gfp_mask, req->mq_ctx != NULL))) |
| 962 | return BLKPREP_DEFER; | 1082 | return BLKPREP_DEFER; |
| 963 | } | ||
| 964 | 1083 | ||
| 965 | /* | 1084 | /* |
| 966 | * Next, walk the list, and fill in the addresses and sizes of | 1085 | * Next, walk the list, and fill in the addresses and sizes of |
| @@ -988,21 +1107,29 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
| 988 | { | 1107 | { |
| 989 | struct scsi_device *sdev = cmd->device; | 1108 | struct scsi_device *sdev = cmd->device; |
| 990 | struct request *rq = cmd->request; | 1109 | struct request *rq = cmd->request; |
| 1110 | bool is_mq = (rq->mq_ctx != NULL); | ||
| 1111 | int error; | ||
| 1112 | |||
| 1113 | BUG_ON(!rq->nr_phys_segments); | ||
| 991 | 1114 | ||
| 992 | int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); | 1115 | error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); |
| 993 | if (error) | 1116 | if (error) |
| 994 | goto err_exit; | 1117 | goto err_exit; |
| 995 | 1118 | ||
| 996 | if (blk_bidi_rq(rq)) { | 1119 | if (blk_bidi_rq(rq)) { |
| 997 | struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( | 1120 | if (!rq->q->mq_ops) { |
| 998 | scsi_sdb_cache, GFP_ATOMIC); | 1121 | struct scsi_data_buffer *bidi_sdb = |
| 999 | if (!bidi_sdb) { | 1122 | kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); |
| 1000 | error = BLKPREP_DEFER; | 1123 | if (!bidi_sdb) { |
| 1001 | goto err_exit; | 1124 | error = BLKPREP_DEFER; |
| 1125 | goto err_exit; | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | rq->next_rq->special = bidi_sdb; | ||
| 1002 | } | 1129 | } |
| 1003 | 1130 | ||
| 1004 | rq->next_rq->special = bidi_sdb; | 1131 | error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special, |
| 1005 | error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); | 1132 | GFP_ATOMIC); |
| 1006 | if (error) | 1133 | if (error) |
| 1007 | goto err_exit; | 1134 | goto err_exit; |
| 1008 | } | 1135 | } |
| @@ -1014,7 +1141,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
| 1014 | BUG_ON(prot_sdb == NULL); | 1141 | BUG_ON(prot_sdb == NULL); |
| 1015 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); | 1142 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
| 1016 | 1143 | ||
| 1017 | if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { | 1144 | if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) { |
| 1018 | error = BLKPREP_DEFER; | 1145 | error = BLKPREP_DEFER; |
| 1019 | goto err_exit; | 1146 | goto err_exit; |
| 1020 | } | 1147 | } |
| @@ -1028,13 +1155,16 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
| 1028 | cmd->prot_sdb->table.nents = count; | 1155 | cmd->prot_sdb->table.nents = count; |
| 1029 | } | 1156 | } |
| 1030 | 1157 | ||
| 1031 | return BLKPREP_OK ; | 1158 | return BLKPREP_OK; |
| 1032 | |||
| 1033 | err_exit: | 1159 | err_exit: |
| 1034 | scsi_release_buffers(cmd); | 1160 | if (is_mq) { |
| 1035 | cmd->request->special = NULL; | 1161 | scsi_mq_free_sgtables(cmd); |
| 1036 | scsi_put_command(cmd); | 1162 | } else { |
| 1037 | put_device(&sdev->sdev_gendev); | 1163 | scsi_release_buffers(cmd); |
| 1164 | cmd->request->special = NULL; | ||
| 1165 | scsi_put_command(cmd); | ||
| 1166 | put_device(&sdev->sdev_gendev); | ||
| 1167 | } | ||
| 1038 | return error; | 1168 | return error; |
| 1039 | } | 1169 | } |
| 1040 | EXPORT_SYMBOL(scsi_init_io); | 1170 | EXPORT_SYMBOL(scsi_init_io); |
| @@ -1069,7 +1199,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, | |||
| 1069 | return cmd; | 1199 | return cmd; |
| 1070 | } | 1200 | } |
| 1071 | 1201 | ||
| 1072 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | 1202 | static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
| 1073 | { | 1203 | { |
| 1074 | struct scsi_cmnd *cmd = req->special; | 1204 | struct scsi_cmnd *cmd = req->special; |
| 1075 | 1205 | ||
| @@ -1080,11 +1210,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
| 1080 | * submit a request without an attached bio. | 1210 | * submit a request without an attached bio. |
| 1081 | */ | 1211 | */ |
| 1082 | if (req->bio) { | 1212 | if (req->bio) { |
| 1083 | int ret; | 1213 | int ret = scsi_init_io(cmd, GFP_ATOMIC); |
| 1084 | |||
| 1085 | BUG_ON(!req->nr_phys_segments); | ||
| 1086 | |||
| 1087 | ret = scsi_init_io(cmd, GFP_ATOMIC); | ||
| 1088 | if (unlikely(ret)) | 1214 | if (unlikely(ret)) |
| 1089 | return ret; | 1215 | return ret; |
| 1090 | } else { | 1216 | } else { |
| @@ -1094,25 +1220,16 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
| 1094 | } | 1220 | } |
| 1095 | 1221 | ||
| 1096 | cmd->cmd_len = req->cmd_len; | 1222 | cmd->cmd_len = req->cmd_len; |
| 1097 | if (!blk_rq_bytes(req)) | ||
| 1098 | cmd->sc_data_direction = DMA_NONE; | ||
| 1099 | else if (rq_data_dir(req) == WRITE) | ||
| 1100 | cmd->sc_data_direction = DMA_TO_DEVICE; | ||
| 1101 | else | ||
| 1102 | cmd->sc_data_direction = DMA_FROM_DEVICE; | ||
| 1103 | |||
| 1104 | cmd->transfersize = blk_rq_bytes(req); | 1223 | cmd->transfersize = blk_rq_bytes(req); |
| 1105 | cmd->allowed = req->retries; | 1224 | cmd->allowed = req->retries; |
| 1106 | return BLKPREP_OK; | 1225 | return BLKPREP_OK; |
| 1107 | } | 1226 | } |
| 1108 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | ||
| 1109 | 1227 | ||
| 1110 | /* | 1228 | /* |
| 1111 | * Setup a REQ_TYPE_FS command. These are simple read/write request | 1229 | * Setup a REQ_TYPE_FS command. These are simple request from filesystems |
| 1112 | * from filesystems that still need to be translated to SCSI CDBs from | 1230 | * that still need to be translated to SCSI CDBs from the ULD. |
| 1113 | * the ULD. | ||
| 1114 | */ | 1231 | */ |
| 1115 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | 1232 | static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
| 1116 | { | 1233 | { |
| 1117 | struct scsi_cmnd *cmd = req->special; | 1234 | struct scsi_cmnd *cmd = req->special; |
| 1118 | 1235 | ||
| @@ -1123,15 +1240,30 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) | |||
| 1123 | return ret; | 1240 | return ret; |
| 1124 | } | 1241 | } |
| 1125 | 1242 | ||
| 1126 | /* | ||
| 1127 | * Filesystem requests must transfer data. | ||
| 1128 | */ | ||
| 1129 | BUG_ON(!req->nr_phys_segments); | ||
| 1130 | |||
| 1131 | memset(cmd->cmnd, 0, BLK_MAX_CDB); | 1243 | memset(cmd->cmnd, 0, BLK_MAX_CDB); |
| 1132 | return scsi_init_io(cmd, GFP_ATOMIC); | 1244 | return scsi_cmd_to_driver(cmd)->init_command(cmd); |
| 1245 | } | ||
| 1246 | |||
| 1247 | static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) | ||
| 1248 | { | ||
| 1249 | struct scsi_cmnd *cmd = req->special; | ||
| 1250 | |||
| 1251 | if (!blk_rq_bytes(req)) | ||
| 1252 | cmd->sc_data_direction = DMA_NONE; | ||
| 1253 | else if (rq_data_dir(req) == WRITE) | ||
| 1254 | cmd->sc_data_direction = DMA_TO_DEVICE; | ||
| 1255 | else | ||
| 1256 | cmd->sc_data_direction = DMA_FROM_DEVICE; | ||
| 1257 | |||
| 1258 | switch (req->cmd_type) { | ||
| 1259 | case REQ_TYPE_FS: | ||
| 1260 | return scsi_setup_fs_cmnd(sdev, req); | ||
| 1261 | case REQ_TYPE_BLOCK_PC: | ||
| 1262 | return scsi_setup_blk_pc_cmnd(sdev, req); | ||
| 1263 | default: | ||
| 1264 | return BLKPREP_KILL; | ||
| 1265 | } | ||
| 1133 | } | 1266 | } |
| 1134 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); | ||
| 1135 | 1267 | ||
| 1136 | static int | 1268 | static int |
| 1137 | scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | 1269 | scsi_prep_state_check(struct scsi_device *sdev, struct request *req) |
| @@ -1210,7 +1342,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret) | |||
| 1210 | * queue must be restarted, so we schedule a callback to happen | 1342 | * queue must be restarted, so we schedule a callback to happen |
| 1211 | * shortly. | 1343 | * shortly. |
| 1212 | */ | 1344 | */ |
| 1213 | if (sdev->device_busy == 0) | 1345 | if (atomic_read(&sdev->device_busy) == 0) |
| 1214 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | 1346 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
| 1215 | break; | 1347 | break; |
| 1216 | default: | 1348 | default: |
| @@ -1236,26 +1368,14 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
| 1236 | goto out; | 1368 | goto out; |
| 1237 | } | 1369 | } |
| 1238 | 1370 | ||
| 1239 | if (req->cmd_type == REQ_TYPE_FS) | 1371 | ret = scsi_setup_cmnd(sdev, req); |
| 1240 | ret = scsi_cmd_to_driver(cmd)->init_command(cmd); | ||
| 1241 | else if (req->cmd_type == REQ_TYPE_BLOCK_PC) | ||
| 1242 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | ||
| 1243 | else | ||
| 1244 | ret = BLKPREP_KILL; | ||
| 1245 | |||
| 1246 | out: | 1372 | out: |
| 1247 | return scsi_prep_return(q, req, ret); | 1373 | return scsi_prep_return(q, req, ret); |
| 1248 | } | 1374 | } |
| 1249 | 1375 | ||
| 1250 | static void scsi_unprep_fn(struct request_queue *q, struct request *req) | 1376 | static void scsi_unprep_fn(struct request_queue *q, struct request *req) |
| 1251 | { | 1377 | { |
| 1252 | if (req->cmd_type == REQ_TYPE_FS) { | 1378 | scsi_uninit_cmd(req->special); |
| 1253 | struct scsi_cmnd *cmd = req->special; | ||
| 1254 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); | ||
| 1255 | |||
| 1256 | if (drv->uninit_command) | ||
| 1257 | drv->uninit_command(cmd); | ||
| 1258 | } | ||
| 1259 | } | 1379 | } |
| 1260 | 1380 | ||
| 1261 | /* | 1381 | /* |
| @@ -1267,99 +1387,144 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req) | |||
| 1267 | static inline int scsi_dev_queue_ready(struct request_queue *q, | 1387 | static inline int scsi_dev_queue_ready(struct request_queue *q, |
| 1268 | struct scsi_device *sdev) | 1388 | struct scsi_device *sdev) |
| 1269 | { | 1389 | { |
| 1270 | if (sdev->device_busy == 0 && sdev->device_blocked) { | 1390 | unsigned int busy; |
| 1391 | |||
| 1392 | busy = atomic_inc_return(&sdev->device_busy) - 1; | ||
| 1393 | if (atomic_read(&sdev->device_blocked)) { | ||
| 1394 | if (busy) | ||
| 1395 | goto out_dec; | ||
| 1396 | |||
| 1271 | /* | 1397 | /* |
| 1272 | * unblock after device_blocked iterates to zero | 1398 | * unblock after device_blocked iterates to zero |
| 1273 | */ | 1399 | */ |
| 1274 | if (--sdev->device_blocked == 0) { | 1400 | if (atomic_dec_return(&sdev->device_blocked) > 0) { |
| 1275 | SCSI_LOG_MLQUEUE(3, | 1401 | /* |
| 1276 | sdev_printk(KERN_INFO, sdev, | 1402 | * For the MQ case we take care of this in the caller. |
| 1277 | "unblocking device at zero depth\n")); | 1403 | */ |
| 1278 | } else { | 1404 | if (!q->mq_ops) |
| 1279 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | 1405 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
| 1280 | return 0; | 1406 | goto out_dec; |
| 1281 | } | 1407 | } |
| 1408 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, | ||
| 1409 | "unblocking device at zero depth\n")); | ||
| 1282 | } | 1410 | } |
| 1283 | if (scsi_device_is_busy(sdev)) | 1411 | |
| 1284 | return 0; | 1412 | if (busy >= sdev->queue_depth) |
| 1413 | goto out_dec; | ||
| 1285 | 1414 | ||
| 1286 | return 1; | 1415 | return 1; |
| 1416 | out_dec: | ||
| 1417 | atomic_dec(&sdev->device_busy); | ||
| 1418 | return 0; | ||
| 1287 | } | 1419 | } |
| 1288 | 1420 | ||
| 1289 | |||
| 1290 | /* | 1421 | /* |
| 1291 | * scsi_target_queue_ready: checks if there we can send commands to target | 1422 | * scsi_target_queue_ready: checks if there we can send commands to target |
| 1292 | * @sdev: scsi device on starget to check. | 1423 | * @sdev: scsi device on starget to check. |
| 1293 | * | ||
| 1294 | * Called with the host lock held. | ||
| 1295 | */ | 1424 | */ |
| 1296 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | 1425 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, |
| 1297 | struct scsi_device *sdev) | 1426 | struct scsi_device *sdev) |
| 1298 | { | 1427 | { |
| 1299 | struct scsi_target *starget = scsi_target(sdev); | 1428 | struct scsi_target *starget = scsi_target(sdev); |
| 1429 | unsigned int busy; | ||
| 1300 | 1430 | ||
| 1301 | if (starget->single_lun) { | 1431 | if (starget->single_lun) { |
| 1432 | spin_lock_irq(shost->host_lock); | ||
| 1302 | if (starget->starget_sdev_user && | 1433 | if (starget->starget_sdev_user && |
| 1303 | starget->starget_sdev_user != sdev) | 1434 | starget->starget_sdev_user != sdev) { |
| 1435 | spin_unlock_irq(shost->host_lock); | ||
| 1304 | return 0; | 1436 | return 0; |
| 1437 | } | ||
| 1305 | starget->starget_sdev_user = sdev; | 1438 | starget->starget_sdev_user = sdev; |
| 1439 | spin_unlock_irq(shost->host_lock); | ||
| 1306 | } | 1440 | } |
| 1307 | 1441 | ||
| 1308 | if (starget->target_busy == 0 && starget->target_blocked) { | 1442 | if (starget->can_queue <= 0) |
| 1443 | return 1; | ||
| 1444 | |||
| 1445 | busy = atomic_inc_return(&starget->target_busy) - 1; | ||
| 1446 | if (atomic_read(&starget->target_blocked) > 0) { | ||
| 1447 | if (busy) | ||
| 1448 | goto starved; | ||
| 1449 | |||
| 1309 | /* | 1450 | /* |
| 1310 | * unblock after target_blocked iterates to zero | 1451 | * unblock after target_blocked iterates to zero |
| 1311 | */ | 1452 | */ |
| 1312 | if (--starget->target_blocked == 0) { | 1453 | if (atomic_dec_return(&starget->target_blocked) > 0) |
| 1313 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, | 1454 | goto out_dec; |
| 1314 | "unblocking target at zero depth\n")); | ||
| 1315 | } else | ||
| 1316 | return 0; | ||
| 1317 | } | ||
| 1318 | 1455 | ||
| 1319 | if (scsi_target_is_busy(starget)) { | 1456 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, |
| 1320 | list_move_tail(&sdev->starved_entry, &shost->starved_list); | 1457 | "unblocking target at zero depth\n")); |
| 1321 | return 0; | ||
| 1322 | } | 1458 | } |
| 1323 | 1459 | ||
| 1460 | if (busy >= starget->can_queue) | ||
| 1461 | goto starved; | ||
| 1462 | |||
| 1324 | return 1; | 1463 | return 1; |
| 1464 | |||
| 1465 | starved: | ||
| 1466 | spin_lock_irq(shost->host_lock); | ||
| 1467 | list_move_tail(&sdev->starved_entry, &shost->starved_list); | ||
| 1468 | spin_unlock_irq(shost->host_lock); | ||
| 1469 | out_dec: | ||
| 1470 | if (starget->can_queue > 0) | ||
| 1471 | atomic_dec(&starget->target_busy); | ||
| 1472 | return 0; | ||
| 1325 | } | 1473 | } |
| 1326 | 1474 | ||
| 1327 | /* | 1475 | /* |
| 1328 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | 1476 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else |
| 1329 | * return 0. We must end up running the queue again whenever 0 is | 1477 | * return 0. We must end up running the queue again whenever 0 is |
| 1330 | * returned, else IO can hang. | 1478 | * returned, else IO can hang. |
| 1331 | * | ||
| 1332 | * Called with host_lock held. | ||
| 1333 | */ | 1479 | */ |
| 1334 | static inline int scsi_host_queue_ready(struct request_queue *q, | 1480 | static inline int scsi_host_queue_ready(struct request_queue *q, |
| 1335 | struct Scsi_Host *shost, | 1481 | struct Scsi_Host *shost, |
| 1336 | struct scsi_device *sdev) | 1482 | struct scsi_device *sdev) |
| 1337 | { | 1483 | { |
| 1484 | unsigned int busy; | ||
| 1485 | |||
| 1338 | if (scsi_host_in_recovery(shost)) | 1486 | if (scsi_host_in_recovery(shost)) |
| 1339 | return 0; | 1487 | return 0; |
| 1340 | if (shost->host_busy == 0 && shost->host_blocked) { | 1488 | |
| 1489 | busy = atomic_inc_return(&shost->host_busy) - 1; | ||
| 1490 | if (atomic_read(&shost->host_blocked) > 0) { | ||
| 1491 | if (busy) | ||
| 1492 | goto starved; | ||
| 1493 | |||
| 1341 | /* | 1494 | /* |
| 1342 | * unblock after host_blocked iterates to zero | 1495 | * unblock after host_blocked iterates to zero |
| 1343 | */ | 1496 | */ |
| 1344 | if (--shost->host_blocked == 0) { | 1497 | if (atomic_dec_return(&shost->host_blocked) > 0) |
| 1345 | SCSI_LOG_MLQUEUE(3, | 1498 | goto out_dec; |
| 1346 | printk("scsi%d unblocking host at zero depth\n", | 1499 | |
| 1347 | shost->host_no)); | 1500 | SCSI_LOG_MLQUEUE(3, |
| 1348 | } else { | 1501 | shost_printk(KERN_INFO, shost, |
| 1349 | return 0; | 1502 | "unblocking host at zero depth\n")); |
| 1350 | } | ||
| 1351 | } | ||
| 1352 | if (scsi_host_is_busy(shost)) { | ||
| 1353 | if (list_empty(&sdev->starved_entry)) | ||
| 1354 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | ||
| 1355 | return 0; | ||
| 1356 | } | 1503 | } |
| 1357 | 1504 | ||
| 1505 | if (shost->can_queue > 0 && busy >= shost->can_queue) | ||
| 1506 | goto starved; | ||
| 1507 | if (shost->host_self_blocked) | ||
| 1508 | goto starved; | ||
| 1509 | |||
| 1358 | /* We're OK to process the command, so we can't be starved */ | 1510 | /* We're OK to process the command, so we can't be starved */ |
| 1359 | if (!list_empty(&sdev->starved_entry)) | 1511 | if (!list_empty(&sdev->starved_entry)) { |
| 1360 | list_del_init(&sdev->starved_entry); | 1512 | spin_lock_irq(shost->host_lock); |
| 1513 | if (!list_empty(&sdev->starved_entry)) | ||
| 1514 | list_del_init(&sdev->starved_entry); | ||
| 1515 | spin_unlock_irq(shost->host_lock); | ||
| 1516 | } | ||
| 1361 | 1517 | ||
| 1362 | return 1; | 1518 | return 1; |
| 1519 | |||
| 1520 | starved: | ||
| 1521 | spin_lock_irq(shost->host_lock); | ||
| 1522 | if (list_empty(&sdev->starved_entry)) | ||
| 1523 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | ||
| 1524 | spin_unlock_irq(shost->host_lock); | ||
| 1525 | out_dec: | ||
| 1526 | atomic_dec(&shost->host_busy); | ||
| 1527 | return 0; | ||
| 1363 | } | 1528 | } |
| 1364 | 1529 | ||
| 1365 | /* | 1530 | /* |
| @@ -1422,13 +1587,10 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
| 1422 | * bump busy counts. To bump the counters, we need to dance | 1587 | * bump busy counts. To bump the counters, we need to dance |
| 1423 | * with the locks as normal issue path does. | 1588 | * with the locks as normal issue path does. |
| 1424 | */ | 1589 | */ |
| 1425 | sdev->device_busy++; | 1590 | atomic_inc(&sdev->device_busy); |
| 1426 | spin_unlock(sdev->request_queue->queue_lock); | 1591 | atomic_inc(&shost->host_busy); |
| 1427 | spin_lock(shost->host_lock); | 1592 | if (starget->can_queue > 0) |
| 1428 | shost->host_busy++; | 1593 | atomic_inc(&starget->target_busy); |
| 1429 | starget->target_busy++; | ||
| 1430 | spin_unlock(shost->host_lock); | ||
| 1431 | spin_lock(sdev->request_queue->queue_lock); | ||
| 1432 | 1594 | ||
| 1433 | blk_complete_request(req); | 1595 | blk_complete_request(req); |
| 1434 | } | 1596 | } |
| @@ -1453,7 +1615,7 @@ static void scsi_softirq_done(struct request *rq) | |||
| 1453 | wait_for/HZ); | 1615 | wait_for/HZ); |
| 1454 | disposition = SUCCESS; | 1616 | disposition = SUCCESS; |
| 1455 | } | 1617 | } |
| 1456 | 1618 | ||
| 1457 | scsi_log_completion(cmd, disposition); | 1619 | scsi_log_completion(cmd, disposition); |
| 1458 | 1620 | ||
| 1459 | switch (disposition) { | 1621 | switch (disposition) { |
| @@ -1472,6 +1634,23 @@ static void scsi_softirq_done(struct request *rq) | |||
| 1472 | } | 1634 | } |
| 1473 | } | 1635 | } |
| 1474 | 1636 | ||
| 1637 | /** | ||
| 1638 | * scsi_done - Invoke completion on finished SCSI command. | ||
| 1639 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | ||
| 1640 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | ||
| 1641 | * | ||
| 1642 | * Description: This function is the mid-level's (SCSI Core) interrupt routine, | ||
| 1643 | * which regains ownership of the SCSI command (de facto) from a LLDD, and | ||
| 1644 | * calls blk_complete_request() for further processing. | ||
| 1645 | * | ||
| 1646 | * This function is interrupt context safe. | ||
| 1647 | */ | ||
| 1648 | static void scsi_done(struct scsi_cmnd *cmd) | ||
| 1649 | { | ||
| 1650 | trace_scsi_dispatch_cmd_done(cmd); | ||
| 1651 | blk_complete_request(cmd->request); | ||
| 1652 | } | ||
| 1653 | |||
| 1475 | /* | 1654 | /* |
| 1476 | * Function: scsi_request_fn() | 1655 | * Function: scsi_request_fn() |
| 1477 | * | 1656 | * |
| @@ -1501,11 +1680,11 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1501 | int rtn; | 1680 | int rtn; |
| 1502 | /* | 1681 | /* |
| 1503 | * get next queueable request. We do this early to make sure | 1682 | * get next queueable request. We do this early to make sure |
| 1504 | * that the request is fully prepared even if we cannot | 1683 | * that the request is fully prepared even if we cannot |
| 1505 | * accept it. | 1684 | * accept it. |
| 1506 | */ | 1685 | */ |
| 1507 | req = blk_peek_request(q); | 1686 | req = blk_peek_request(q); |
| 1508 | if (!req || !scsi_dev_queue_ready(q, sdev)) | 1687 | if (!req) |
| 1509 | break; | 1688 | break; |
| 1510 | 1689 | ||
| 1511 | if (unlikely(!scsi_device_online(sdev))) { | 1690 | if (unlikely(!scsi_device_online(sdev))) { |
| @@ -1515,15 +1694,16 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1515 | continue; | 1694 | continue; |
| 1516 | } | 1695 | } |
| 1517 | 1696 | ||
| 1697 | if (!scsi_dev_queue_ready(q, sdev)) | ||
| 1698 | break; | ||
| 1518 | 1699 | ||
| 1519 | /* | 1700 | /* |
| 1520 | * Remove the request from the request list. | 1701 | * Remove the request from the request list. |
| 1521 | */ | 1702 | */ |
| 1522 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | 1703 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) |
| 1523 | blk_start_request(req); | 1704 | blk_start_request(req); |
| 1524 | sdev->device_busy++; | ||
| 1525 | 1705 | ||
| 1526 | spin_unlock(q->queue_lock); | 1706 | spin_unlock_irq(q->queue_lock); |
| 1527 | cmd = req->special; | 1707 | cmd = req->special; |
| 1528 | if (unlikely(cmd == NULL)) { | 1708 | if (unlikely(cmd == NULL)) { |
| 1529 | printk(KERN_CRIT "impossible request in %s.\n" | 1709 | printk(KERN_CRIT "impossible request in %s.\n" |
| @@ -1533,7 +1713,6 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1533 | blk_dump_rq_flags(req, "foo"); | 1713 | blk_dump_rq_flags(req, "foo"); |
| 1534 | BUG(); | 1714 | BUG(); |
| 1535 | } | 1715 | } |
| 1536 | spin_lock(shost->host_lock); | ||
| 1537 | 1716 | ||
| 1538 | /* | 1717 | /* |
| 1539 | * We hit this when the driver is using a host wide | 1718 | * We hit this when the driver is using a host wide |
| @@ -1544,9 +1723,11 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1544 | * a run when a tag is freed. | 1723 | * a run when a tag is freed. |
| 1545 | */ | 1724 | */ |
| 1546 | if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { | 1725 | if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { |
| 1726 | spin_lock_irq(shost->host_lock); | ||
| 1547 | if (list_empty(&sdev->starved_entry)) | 1727 | if (list_empty(&sdev->starved_entry)) |
| 1548 | list_add_tail(&sdev->starved_entry, | 1728 | list_add_tail(&sdev->starved_entry, |
| 1549 | &shost->starved_list); | 1729 | &shost->starved_list); |
| 1730 | spin_unlock_irq(shost->host_lock); | ||
| 1550 | goto not_ready; | 1731 | goto not_ready; |
| 1551 | } | 1732 | } |
| 1552 | 1733 | ||
| @@ -1554,16 +1735,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1554 | goto not_ready; | 1735 | goto not_ready; |
| 1555 | 1736 | ||
| 1556 | if (!scsi_host_queue_ready(q, shost, sdev)) | 1737 | if (!scsi_host_queue_ready(q, shost, sdev)) |
| 1557 | goto not_ready; | 1738 | goto host_not_ready; |
| 1558 | |||
| 1559 | scsi_target(sdev)->target_busy++; | ||
| 1560 | shost->host_busy++; | ||
| 1561 | |||
| 1562 | /* | ||
| 1563 | * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will | ||
| 1564 | * take the lock again. | ||
| 1565 | */ | ||
| 1566 | spin_unlock_irq(shost->host_lock); | ||
| 1567 | 1739 | ||
| 1568 | /* | 1740 | /* |
| 1569 | * Finally, initialize any error handling parameters, and set up | 1741 | * Finally, initialize any error handling parameters, and set up |
| @@ -1574,17 +1746,22 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1574 | /* | 1746 | /* |
| 1575 | * Dispatch the command to the low-level driver. | 1747 | * Dispatch the command to the low-level driver. |
| 1576 | */ | 1748 | */ |
| 1749 | cmd->scsi_done = scsi_done; | ||
| 1577 | rtn = scsi_dispatch_cmd(cmd); | 1750 | rtn = scsi_dispatch_cmd(cmd); |
| 1578 | spin_lock_irq(q->queue_lock); | 1751 | if (rtn) { |
| 1579 | if (rtn) | 1752 | scsi_queue_insert(cmd, rtn); |
| 1753 | spin_lock_irq(q->queue_lock); | ||
| 1580 | goto out_delay; | 1754 | goto out_delay; |
| 1755 | } | ||
| 1756 | spin_lock_irq(q->queue_lock); | ||
| 1581 | } | 1757 | } |
| 1582 | 1758 | ||
| 1583 | return; | 1759 | return; |
| 1584 | 1760 | ||
| 1761 | host_not_ready: | ||
| 1762 | if (scsi_target(sdev)->can_queue > 0) | ||
| 1763 | atomic_dec(&scsi_target(sdev)->target_busy); | ||
| 1585 | not_ready: | 1764 | not_ready: |
| 1586 | spin_unlock_irq(shost->host_lock); | ||
| 1587 | |||
| 1588 | /* | 1765 | /* |
| 1589 | * lock q, handle tag, requeue req, and decrement device_busy. We | 1766 | * lock q, handle tag, requeue req, and decrement device_busy. We |
| 1590 | * must return with queue_lock held. | 1767 | * must return with queue_lock held. |
| @@ -1595,13 +1772,186 @@ static void scsi_request_fn(struct request_queue *q) | |||
| 1595 | */ | 1772 | */ |
| 1596 | spin_lock_irq(q->queue_lock); | 1773 | spin_lock_irq(q->queue_lock); |
| 1597 | blk_requeue_request(q, req); | 1774 | blk_requeue_request(q, req); |
| 1598 | sdev->device_busy--; | 1775 | atomic_dec(&sdev->device_busy); |
| 1599 | out_delay: | 1776 | out_delay: |
| 1600 | if (sdev->device_busy == 0) | 1777 | if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) |
| 1601 | blk_delay_queue(q, SCSI_QUEUE_DELAY); | 1778 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
| 1602 | } | 1779 | } |
| 1603 | 1780 | ||
| 1604 | u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | 1781 | static inline int prep_to_mq(int ret) |
| 1782 | { | ||
| 1783 | switch (ret) { | ||
| 1784 | case BLKPREP_OK: | ||
| 1785 | return 0; | ||
| 1786 | case BLKPREP_DEFER: | ||
| 1787 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
| 1788 | default: | ||
| 1789 | return BLK_MQ_RQ_QUEUE_ERROR; | ||
| 1790 | } | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | static int scsi_mq_prep_fn(struct request *req) | ||
| 1794 | { | ||
| 1795 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | ||
| 1796 | struct scsi_device *sdev = req->q->queuedata; | ||
| 1797 | struct Scsi_Host *shost = sdev->host; | ||
| 1798 | unsigned char *sense_buf = cmd->sense_buffer; | ||
| 1799 | struct scatterlist *sg; | ||
| 1800 | |||
| 1801 | memset(cmd, 0, sizeof(struct scsi_cmnd)); | ||
| 1802 | |||
| 1803 | req->special = cmd; | ||
| 1804 | |||
| 1805 | cmd->request = req; | ||
| 1806 | cmd->device = sdev; | ||
| 1807 | cmd->sense_buffer = sense_buf; | ||
| 1808 | |||
| 1809 | cmd->tag = req->tag; | ||
| 1810 | |||
| 1811 | cmd->cmnd = req->cmd; | ||
| 1812 | cmd->prot_op = SCSI_PROT_NORMAL; | ||
| 1813 | |||
| 1814 | INIT_LIST_HEAD(&cmd->list); | ||
| 1815 | INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); | ||
| 1816 | cmd->jiffies_at_alloc = jiffies; | ||
| 1817 | |||
| 1818 | /* | ||
| 1819 | * XXX: cmd_list lookups are only used by two drivers, try to get | ||
| 1820 | * rid of this list in common code. | ||
| 1821 | */ | ||
| 1822 | spin_lock_irq(&sdev->list_lock); | ||
| 1823 | list_add_tail(&cmd->list, &sdev->cmd_list); | ||
| 1824 | spin_unlock_irq(&sdev->list_lock); | ||
| 1825 | |||
| 1826 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; | ||
| 1827 | cmd->sdb.table.sgl = sg; | ||
| 1828 | |||
| 1829 | if (scsi_host_get_prot(shost)) { | ||
| 1830 | cmd->prot_sdb = (void *)sg + | ||
| 1831 | shost->sg_tablesize * sizeof(struct scatterlist); | ||
| 1832 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); | ||
| 1833 | |||
| 1834 | cmd->prot_sdb->table.sgl = | ||
| 1835 | (struct scatterlist *)(cmd->prot_sdb + 1); | ||
| 1836 | } | ||
| 1837 | |||
| 1838 | if (blk_bidi_rq(req)) { | ||
| 1839 | struct request *next_rq = req->next_rq; | ||
| 1840 | struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); | ||
| 1841 | |||
| 1842 | memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); | ||
| 1843 | bidi_sdb->table.sgl = | ||
| 1844 | (struct scatterlist *)(bidi_sdb + 1); | ||
| 1845 | |||
| 1846 | next_rq->special = bidi_sdb; | ||
| 1847 | } | ||
| 1848 | |||
| 1849 | return scsi_setup_cmnd(sdev, req); | ||
| 1850 | } | ||
| 1851 | |||
| 1852 | static void scsi_mq_done(struct scsi_cmnd *cmd) | ||
| 1853 | { | ||
| 1854 | trace_scsi_dispatch_cmd_done(cmd); | ||
| 1855 | blk_mq_complete_request(cmd->request); | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | ||
| 1859 | { | ||
| 1860 | struct request_queue *q = req->q; | ||
| 1861 | struct scsi_device *sdev = q->queuedata; | ||
| 1862 | struct Scsi_Host *shost = sdev->host; | ||
| 1863 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); | ||
| 1864 | int ret; | ||
| 1865 | int reason; | ||
| 1866 | |||
| 1867 | ret = prep_to_mq(scsi_prep_state_check(sdev, req)); | ||
| 1868 | if (ret) | ||
| 1869 | goto out; | ||
| 1870 | |||
| 1871 | ret = BLK_MQ_RQ_QUEUE_BUSY; | ||
| 1872 | if (!get_device(&sdev->sdev_gendev)) | ||
| 1873 | goto out; | ||
| 1874 | |||
| 1875 | if (!scsi_dev_queue_ready(q, sdev)) | ||
| 1876 | goto out_put_device; | ||
| 1877 | if (!scsi_target_queue_ready(shost, sdev)) | ||
| 1878 | goto out_dec_device_busy; | ||
| 1879 | if (!scsi_host_queue_ready(q, shost, sdev)) | ||
| 1880 | goto out_dec_target_busy; | ||
| 1881 | |||
| 1882 | if (!(req->cmd_flags & REQ_DONTPREP)) { | ||
| 1883 | ret = prep_to_mq(scsi_mq_prep_fn(req)); | ||
| 1884 | if (ret) | ||
| 1885 | goto out_dec_host_busy; | ||
| 1886 | req->cmd_flags |= REQ_DONTPREP; | ||
| 1887 | } | ||
| 1888 | |||
| 1889 | scsi_init_cmd_errh(cmd); | ||
| 1890 | cmd->scsi_done = scsi_mq_done; | ||
| 1891 | |||
| 1892 | reason = scsi_dispatch_cmd(cmd); | ||
| 1893 | if (reason) { | ||
| 1894 | scsi_set_blocked(cmd, reason); | ||
| 1895 | ret = BLK_MQ_RQ_QUEUE_BUSY; | ||
| 1896 | goto out_dec_host_busy; | ||
| 1897 | } | ||
| 1898 | |||
| 1899 | return BLK_MQ_RQ_QUEUE_OK; | ||
| 1900 | |||
| 1901 | out_dec_host_busy: | ||
| 1902 | atomic_dec(&shost->host_busy); | ||
| 1903 | out_dec_target_busy: | ||
| 1904 | if (scsi_target(sdev)->can_queue > 0) | ||
| 1905 | atomic_dec(&scsi_target(sdev)->target_busy); | ||
| 1906 | out_dec_device_busy: | ||
| 1907 | atomic_dec(&sdev->device_busy); | ||
| 1908 | out_put_device: | ||
| 1909 | put_device(&sdev->sdev_gendev); | ||
| 1910 | out: | ||
| 1911 | switch (ret) { | ||
| 1912 | case BLK_MQ_RQ_QUEUE_BUSY: | ||
| 1913 | blk_mq_stop_hw_queue(hctx); | ||
| 1914 | if (atomic_read(&sdev->device_busy) == 0 && | ||
| 1915 | !scsi_device_blocked(sdev)) | ||
| 1916 | blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); | ||
| 1917 | break; | ||
| 1918 | case BLK_MQ_RQ_QUEUE_ERROR: | ||
| 1919 | /* | ||
| 1920 | * Make sure to release all allocated ressources when | ||
| 1921 | * we hit an error, as we will never see this command | ||
| 1922 | * again. | ||
| 1923 | */ | ||
| 1924 | if (req->cmd_flags & REQ_DONTPREP) | ||
| 1925 | scsi_mq_uninit_cmd(cmd); | ||
| 1926 | break; | ||
| 1927 | default: | ||
| 1928 | break; | ||
| 1929 | } | ||
| 1930 | return ret; | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | static int scsi_init_request(void *data, struct request *rq, | ||
| 1934 | unsigned int hctx_idx, unsigned int request_idx, | ||
| 1935 | unsigned int numa_node) | ||
| 1936 | { | ||
| 1937 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | ||
| 1938 | |||
| 1939 | cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, | ||
| 1940 | numa_node); | ||
| 1941 | if (!cmd->sense_buffer) | ||
| 1942 | return -ENOMEM; | ||
| 1943 | return 0; | ||
| 1944 | } | ||
| 1945 | |||
| 1946 | static void scsi_exit_request(void *data, struct request *rq, | ||
| 1947 | unsigned int hctx_idx, unsigned int request_idx) | ||
| 1948 | { | ||
| 1949 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | ||
| 1950 | |||
| 1951 | kfree(cmd->sense_buffer); | ||
| 1952 | } | ||
| 1953 | |||
| 1954 | static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | ||
| 1605 | { | 1955 | { |
| 1606 | struct device *host_dev; | 1956 | struct device *host_dev; |
| 1607 | u64 bounce_limit = 0xffffffff; | 1957 | u64 bounce_limit = 0xffffffff; |
| @@ -1621,18 +1971,11 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |||
| 1621 | 1971 | ||
| 1622 | return bounce_limit; | 1972 | return bounce_limit; |
| 1623 | } | 1973 | } |
| 1624 | EXPORT_SYMBOL(scsi_calculate_bounce_limit); | ||
| 1625 | 1974 | ||
| 1626 | struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | 1975 | static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
| 1627 | request_fn_proc *request_fn) | ||
| 1628 | { | 1976 | { |
| 1629 | struct request_queue *q; | ||
| 1630 | struct device *dev = shost->dma_dev; | 1977 | struct device *dev = shost->dma_dev; |
| 1631 | 1978 | ||
| 1632 | q = blk_init_queue(request_fn, NULL); | ||
| 1633 | if (!q) | ||
| 1634 | return NULL; | ||
| 1635 | |||
| 1636 | /* | 1979 | /* |
| 1637 | * this limit is imposed by hardware restrictions | 1980 | * this limit is imposed by hardware restrictions |
| 1638 | */ | 1981 | */ |
| @@ -1663,7 +2006,17 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
| 1663 | * blk_queue_update_dma_alignment() later. | 2006 | * blk_queue_update_dma_alignment() later. |
| 1664 | */ | 2007 | */ |
| 1665 | blk_queue_dma_alignment(q, 0x03); | 2008 | blk_queue_dma_alignment(q, 0x03); |
| 2009 | } | ||
| 2010 | |||
| 2011 | struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | ||
| 2012 | request_fn_proc *request_fn) | ||
| 2013 | { | ||
| 2014 | struct request_queue *q; | ||
| 1666 | 2015 | ||
| 2016 | q = blk_init_queue(request_fn, NULL); | ||
| 2017 | if (!q) | ||
| 2018 | return NULL; | ||
| 2019 | __scsi_init_queue(shost, q); | ||
| 1667 | return q; | 2020 | return q; |
| 1668 | } | 2021 | } |
| 1669 | EXPORT_SYMBOL(__scsi_alloc_queue); | 2022 | EXPORT_SYMBOL(__scsi_alloc_queue); |
| @@ -1684,6 +2037,55 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
| 1684 | return q; | 2037 | return q; |
| 1685 | } | 2038 | } |
| 1686 | 2039 | ||
| 2040 | static struct blk_mq_ops scsi_mq_ops = { | ||
| 2041 | .map_queue = blk_mq_map_queue, | ||
| 2042 | .queue_rq = scsi_queue_rq, | ||
| 2043 | .complete = scsi_softirq_done, | ||
| 2044 | .timeout = scsi_times_out, | ||
| 2045 | .init_request = scsi_init_request, | ||
| 2046 | .exit_request = scsi_exit_request, | ||
| 2047 | }; | ||
| 2048 | |||
| 2049 | struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) | ||
| 2050 | { | ||
| 2051 | sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); | ||
| 2052 | if (IS_ERR(sdev->request_queue)) | ||
| 2053 | return NULL; | ||
| 2054 | |||
| 2055 | sdev->request_queue->queuedata = sdev; | ||
| 2056 | __scsi_init_queue(sdev->host, sdev->request_queue); | ||
| 2057 | return sdev->request_queue; | ||
| 2058 | } | ||
| 2059 | |||
| 2060 | int scsi_mq_setup_tags(struct Scsi_Host *shost) | ||
| 2061 | { | ||
| 2062 | unsigned int cmd_size, sgl_size, tbl_size; | ||
| 2063 | |||
| 2064 | tbl_size = shost->sg_tablesize; | ||
| 2065 | if (tbl_size > SCSI_MAX_SG_SEGMENTS) | ||
| 2066 | tbl_size = SCSI_MAX_SG_SEGMENTS; | ||
| 2067 | sgl_size = tbl_size * sizeof(struct scatterlist); | ||
| 2068 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; | ||
| 2069 | if (scsi_host_get_prot(shost)) | ||
| 2070 | cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; | ||
| 2071 | |||
| 2072 | memset(&shost->tag_set, 0, sizeof(shost->tag_set)); | ||
| 2073 | shost->tag_set.ops = &scsi_mq_ops; | ||
| 2074 | shost->tag_set.nr_hw_queues = 1; | ||
| 2075 | shost->tag_set.queue_depth = shost->can_queue; | ||
| 2076 | shost->tag_set.cmd_size = cmd_size; | ||
| 2077 | shost->tag_set.numa_node = NUMA_NO_NODE; | ||
| 2078 | shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; | ||
| 2079 | shost->tag_set.driver_data = shost; | ||
| 2080 | |||
| 2081 | return blk_mq_alloc_tag_set(&shost->tag_set); | ||
| 2082 | } | ||
| 2083 | |||
| 2084 | void scsi_mq_destroy_tags(struct Scsi_Host *shost) | ||
| 2085 | { | ||
| 2086 | blk_mq_free_tag_set(&shost->tag_set); | ||
| 2087 | } | ||
| 2088 | |||
| 1687 | /* | 2089 | /* |
| 1688 | * Function: scsi_block_requests() | 2090 | * Function: scsi_block_requests() |
| 1689 | * | 2091 | * |
| @@ -2139,9 +2541,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
| 2139 | return 0; | 2541 | return 0; |
| 2140 | 2542 | ||
| 2141 | illegal: | 2543 | illegal: |
| 2142 | SCSI_LOG_ERROR_RECOVERY(1, | 2544 | SCSI_LOG_ERROR_RECOVERY(1, |
| 2143 | sdev_printk(KERN_ERR, sdev, | 2545 | sdev_printk(KERN_ERR, sdev, |
| 2144 | "Illegal state transition %s->%s\n", | 2546 | "Illegal state transition %s->%s", |
| 2145 | scsi_device_state_name(oldstate), | 2547 | scsi_device_state_name(oldstate), |
| 2146 | scsi_device_state_name(state)) | 2548 | scsi_device_state_name(state)) |
| 2147 | ); | 2549 | ); |
| @@ -2337,7 +2739,7 @@ scsi_device_quiesce(struct scsi_device *sdev) | |||
| 2337 | return err; | 2739 | return err; |
| 2338 | 2740 | ||
| 2339 | scsi_run_queue(sdev->request_queue); | 2741 | scsi_run_queue(sdev->request_queue); |
| 2340 | while (sdev->device_busy) { | 2742 | while (atomic_read(&sdev->device_busy)) { |
| 2341 | msleep_interruptible(200); | 2743 | msleep_interruptible(200); |
| 2342 | scsi_run_queue(sdev->request_queue); | 2744 | scsi_run_queue(sdev->request_queue); |
| 2343 | } | 2745 | } |
| @@ -2429,9 +2831,13 @@ scsi_internal_device_block(struct scsi_device *sdev) | |||
| 2429 | * block layer from calling the midlayer with this device's | 2831 | * block layer from calling the midlayer with this device's |
| 2430 | * request queue. | 2832 | * request queue. |
| 2431 | */ | 2833 | */ |
| 2432 | spin_lock_irqsave(q->queue_lock, flags); | 2834 | if (q->mq_ops) { |
| 2433 | blk_stop_queue(q); | 2835 | blk_mq_stop_hw_queues(q); |
| 2434 | spin_unlock_irqrestore(q->queue_lock, flags); | 2836 | } else { |
| 2837 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 2838 | blk_stop_queue(q); | ||
| 2839 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 2840 | } | ||
| 2435 | 2841 | ||
| 2436 | return 0; | 2842 | return 0; |
| 2437 | } | 2843 | } |
| @@ -2477,9 +2883,13 @@ scsi_internal_device_unblock(struct scsi_device *sdev, | |||
| 2477 | sdev->sdev_state != SDEV_OFFLINE) | 2883 | sdev->sdev_state != SDEV_OFFLINE) |
| 2478 | return -EINVAL; | 2884 | return -EINVAL; |
| 2479 | 2885 | ||
| 2480 | spin_lock_irqsave(q->queue_lock, flags); | 2886 | if (q->mq_ops) { |
| 2481 | blk_start_queue(q); | 2887 | blk_mq_start_stopped_hw_queues(q, false); |
| 2482 | spin_unlock_irqrestore(q->queue_lock, flags); | 2888 | } else { |
| 2889 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 2890 | blk_start_queue(q); | ||
| 2891 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 2892 | } | ||
| 2483 | 2893 | ||
| 2484 | return 0; | 2894 | return 0; |
| 2485 | } | 2895 | } |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 48e5b657e79f..12b8e1bee7f0 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
| @@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd); | |||
| 88 | extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); | 88 | extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); |
| 89 | extern void scsi_run_host_queues(struct Scsi_Host *shost); | 89 | extern void scsi_run_host_queues(struct Scsi_Host *shost); |
| 90 | extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); | 90 | extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); |
| 91 | extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); | ||
| 92 | extern int scsi_mq_setup_tags(struct Scsi_Host *shost); | ||
| 93 | extern void scsi_mq_destroy_tags(struct Scsi_Host *shost); | ||
| 91 | extern int scsi_init_queue(void); | 94 | extern int scsi_init_queue(void); |
| 92 | extern void scsi_exit_queue(void); | 95 | extern void scsi_exit_queue(void); |
| 93 | struct request_queue; | 96 | struct request_queue; |
| @@ -115,7 +118,7 @@ extern void scsi_exit_procfs(void); | |||
| 115 | extern char scsi_scan_type[]; | 118 | extern char scsi_scan_type[]; |
| 116 | extern int scsi_complete_async_scans(void); | 119 | extern int scsi_complete_async_scans(void); |
| 117 | extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, | 120 | extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, |
| 118 | unsigned int, unsigned int, int); | 121 | unsigned int, u64, int); |
| 119 | extern void scsi_forget_host(struct Scsi_Host *); | 122 | extern void scsi_forget_host(struct Scsi_Host *); |
| 120 | extern void scsi_rescan_device(struct device *); | 123 | extern void scsi_rescan_device(struct device *); |
| 121 | 124 | ||
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 86f0c5d5c116..6fcefa2da503 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
| @@ -185,7 +185,7 @@ static int proc_print_scsidevice(struct device *dev, void *data) | |||
| 185 | 185 | ||
| 186 | sdev = to_scsi_device(dev); | 186 | sdev = to_scsi_device(dev); |
| 187 | seq_printf(s, | 187 | seq_printf(s, |
| 188 | "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ", | 188 | "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ", |
| 189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
| 190 | for (i = 0; i < 8; i++) { | 190 | for (i = 0; i < 8; i++) { |
| 191 | if (sdev->vendor[i] >= 0x20) | 191 | if (sdev->vendor[i] >= 0x20) |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e02b3aab56ce..56675dbbf681 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -81,15 +81,11 @@ static const char *scsi_null_device_strs = "nullnullnullnull"; | |||
| 81 | 81 | ||
| 82 | #define MAX_SCSI_LUNS 512 | 82 | #define MAX_SCSI_LUNS 512 |
| 83 | 83 | ||
| 84 | #ifdef CONFIG_SCSI_MULTI_LUN | 84 | static u64 max_scsi_luns = MAX_SCSI_LUNS; |
| 85 | static unsigned int max_scsi_luns = MAX_SCSI_LUNS; | ||
| 86 | #else | ||
| 87 | static unsigned int max_scsi_luns = 1; | ||
| 88 | #endif | ||
| 89 | 85 | ||
| 90 | module_param_named(max_luns, max_scsi_luns, uint, S_IRUGO|S_IWUSR); | 86 | module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); |
| 91 | MODULE_PARM_DESC(max_luns, | 87 | MODULE_PARM_DESC(max_luns, |
| 92 | "last scsi LUN (should be between 1 and 2^32-1)"); | 88 | "last scsi LUN (should be between 1 and 2^64-1)"); |
| 93 | 89 | ||
| 94 | #ifdef CONFIG_SCSI_SCAN_ASYNC | 90 | #ifdef CONFIG_SCSI_SCAN_ASYNC |
| 95 | #define SCSI_SCAN_TYPE_DEFAULT "async" | 91 | #define SCSI_SCAN_TYPE_DEFAULT "async" |
| @@ -198,7 +194,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, | |||
| 198 | { | 194 | { |
| 199 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 195 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
| 200 | 196 | ||
| 201 | printk(KERN_NOTICE "scsi: unlocking floptical drive\n"); | 197 | sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); |
| 202 | scsi_cmd[0] = MODE_SENSE; | 198 | scsi_cmd[0] = MODE_SENSE; |
| 203 | scsi_cmd[1] = 0; | 199 | scsi_cmd[1] = 0; |
| 204 | scsi_cmd[2] = 0x2e; | 200 | scsi_cmd[2] = 0x2e; |
| @@ -224,7 +220,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, | |||
| 224 | * scsi_Device pointer, or NULL on failure. | 220 | * scsi_Device pointer, or NULL on failure. |
| 225 | **/ | 221 | **/ |
| 226 | static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | 222 | static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, |
| 227 | unsigned int lun, void *hostdata) | 223 | u64 lun, void *hostdata) |
| 228 | { | 224 | { |
| 229 | struct scsi_device *sdev; | 225 | struct scsi_device *sdev; |
| 230 | int display_failure_msg = 1, ret; | 226 | int display_failure_msg = 1, ret; |
| @@ -277,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
| 277 | */ | 273 | */ |
| 278 | sdev->borken = 1; | 274 | sdev->borken = 1; |
| 279 | 275 | ||
| 280 | sdev->request_queue = scsi_alloc_queue(sdev); | 276 | if (shost_use_blk_mq(shost)) |
| 277 | sdev->request_queue = scsi_mq_alloc_queue(sdev); | ||
| 278 | else | ||
| 279 | sdev->request_queue = scsi_alloc_queue(sdev); | ||
| 281 | if (!sdev->request_queue) { | 280 | if (!sdev->request_queue) { |
| 282 | /* release fn is set up in scsi_sysfs_device_initialise, so | 281 | /* release fn is set up in scsi_sysfs_device_initialise, so |
| 283 | * have to free and put manually here */ | 282 | * have to free and put manually here */ |
| @@ -600,8 +599,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 600 | HZ / 2 + HZ * scsi_inq_timeout, 3, | 599 | HZ / 2 + HZ * scsi_inq_timeout, 3, |
| 601 | &resid); | 600 | &resid); |
| 602 | 601 | ||
| 603 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " | 602 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
| 604 | "with code 0x%x\n", | 603 | "scsi scan: INQUIRY %s with code 0x%x\n", |
| 605 | result ? "failed" : "successful", result)); | 604 | result ? "failed" : "successful", result)); |
| 606 | 605 | ||
| 607 | if (result) { | 606 | if (result) { |
| @@ -671,9 +670,10 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 671 | } | 670 | } |
| 672 | 671 | ||
| 673 | } else if (pass == 2) { | 672 | } else if (pass == 2) { |
| 674 | printk(KERN_INFO "scsi scan: %d byte inquiry failed. " | 673 | sdev_printk(KERN_INFO, sdev, |
| 675 | "Consider BLIST_INQUIRY_36 for this device\n", | 674 | "scsi scan: %d byte inquiry failed. " |
| 676 | try_inquiry_len); | 675 | "Consider BLIST_INQUIRY_36 for this device\n", |
| 676 | try_inquiry_len); | ||
| 677 | 677 | ||
| 678 | /* If this pass failed, the third pass goes back and transfers | 678 | /* If this pass failed, the third pass goes back and transfers |
| 679 | * the same amount as we successfully got in the first pass. */ | 679 | * the same amount as we successfully got in the first pass. */ |
| @@ -706,8 +706,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 706 | * strings. | 706 | * strings. |
| 707 | */ | 707 | */ |
| 708 | if (sdev->inquiry_len < 36) { | 708 | if (sdev->inquiry_len < 36) { |
| 709 | printk(KERN_INFO "scsi scan: INQUIRY result too short (%d)," | 709 | sdev_printk(KERN_INFO, sdev, |
| 710 | " using 36\n", sdev->inquiry_len); | 710 | "scsi scan: INQUIRY result too short (%d)," |
| 711 | " using 36\n", sdev->inquiry_len); | ||
| 711 | sdev->inquiry_len = 36; | 712 | sdev->inquiry_len = 36; |
| 712 | } | 713 | } |
| 713 | 714 | ||
| @@ -806,29 +807,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 806 | sdev->removable = (inq_result[1] & 0x80) >> 7; | 807 | sdev->removable = (inq_result[1] & 0x80) >> 7; |
| 807 | } | 808 | } |
| 808 | 809 | ||
| 809 | switch (sdev->type) { | ||
| 810 | case TYPE_RBC: | ||
| 811 | case TYPE_TAPE: | ||
| 812 | case TYPE_DISK: | ||
| 813 | case TYPE_PRINTER: | ||
| 814 | case TYPE_MOD: | ||
| 815 | case TYPE_PROCESSOR: | ||
| 816 | case TYPE_SCANNER: | ||
| 817 | case TYPE_MEDIUM_CHANGER: | ||
| 818 | case TYPE_ENCLOSURE: | ||
| 819 | case TYPE_COMM: | ||
| 820 | case TYPE_RAID: | ||
| 821 | case TYPE_OSD: | ||
| 822 | sdev->writeable = 1; | ||
| 823 | break; | ||
| 824 | case TYPE_ROM: | ||
| 825 | case TYPE_WORM: | ||
| 826 | sdev->writeable = 0; | ||
| 827 | break; | ||
| 828 | default: | ||
| 829 | printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); | ||
| 830 | } | ||
| 831 | |||
| 832 | if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { | 810 | if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { |
| 833 | /* RBC and MMC devices can return SCSI-3 compliance and yet | 811 | /* RBC and MMC devices can return SCSI-3 compliance and yet |
| 834 | * still not support REPORT LUNS, so make them act as | 812 | * still not support REPORT LUNS, so make them act as |
| @@ -922,6 +900,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 922 | if (*bflags & BLIST_USE_10_BYTE_MS) | 900 | if (*bflags & BLIST_USE_10_BYTE_MS) |
| 923 | sdev->use_10_for_ms = 1; | 901 | sdev->use_10_for_ms = 1; |
| 924 | 902 | ||
| 903 | /* some devices don't like REPORT SUPPORTED OPERATION CODES | ||
| 904 | * and will simply timeout causing sd_mod init to take a very | ||
| 905 | * very long time */ | ||
| 906 | if (*bflags & BLIST_NO_RSOC) | ||
| 907 | sdev->no_report_opcodes = 1; | ||
| 908 | |||
| 925 | /* set the device running here so that slave configure | 909 | /* set the device running here so that slave configure |
| 926 | * may do I/O */ | 910 | * may do I/O */ |
| 927 | ret = scsi_device_set_state(sdev, SDEV_RUNNING); | 911 | ret = scsi_device_set_state(sdev, SDEV_RUNNING); |
| @@ -950,7 +934,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 950 | 934 | ||
| 951 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; | 935 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; |
| 952 | 936 | ||
| 953 | if (*bflags & BLIST_SKIP_VPD_PAGES) | 937 | if (*bflags & BLIST_TRY_VPD_PAGES) |
| 938 | sdev->try_vpd_pages = 1; | ||
| 939 | else if (*bflags & BLIST_SKIP_VPD_PAGES) | ||
| 954 | sdev->skip_vpd_pages = 1; | 940 | sdev->skip_vpd_pages = 1; |
| 955 | 941 | ||
| 956 | transport_configure_device(&sdev->sdev_gendev); | 942 | transport_configure_device(&sdev->sdev_gendev); |
| @@ -1032,7 +1018,7 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, | |||
| 1032 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized | 1018 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized |
| 1033 | **/ | 1019 | **/ |
| 1034 | static int scsi_probe_and_add_lun(struct scsi_target *starget, | 1020 | static int scsi_probe_and_add_lun(struct scsi_target *starget, |
| 1035 | uint lun, int *bflagsp, | 1021 | u64 lun, int *bflagsp, |
| 1036 | struct scsi_device **sdevp, int rescan, | 1022 | struct scsi_device **sdevp, int rescan, |
| 1037 | void *hostdata) | 1023 | void *hostdata) |
| 1038 | { | 1024 | { |
| @@ -1048,7 +1034,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
| 1048 | sdev = scsi_device_lookup_by_target(starget, lun); | 1034 | sdev = scsi_device_lookup_by_target(starget, lun); |
| 1049 | if (sdev) { | 1035 | if (sdev) { |
| 1050 | if (rescan || !scsi_device_created(sdev)) { | 1036 | if (rescan || !scsi_device_created(sdev)) { |
| 1051 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO | 1037 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
| 1052 | "scsi scan: device exists on %s\n", | 1038 | "scsi scan: device exists on %s\n", |
| 1053 | dev_name(&sdev->sdev_gendev))); | 1039 | dev_name(&sdev->sdev_gendev))); |
| 1054 | if (sdevp) | 1040 | if (sdevp) |
| @@ -1135,7 +1121,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
| 1135 | if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && | 1121 | if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && |
| 1136 | (result[0] & 0x1f) == 0x1f && | 1122 | (result[0] & 0x1f) == 0x1f && |
| 1137 | !scsi_is_wlun(lun)) { | 1123 | !scsi_is_wlun(lun)) { |
| 1138 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO | 1124 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
| 1139 | "scsi scan: peripheral device type" | 1125 | "scsi scan: peripheral device type" |
| 1140 | " of 31, no device added\n")); | 1126 | " of 31, no device added\n")); |
| 1141 | res = SCSI_SCAN_TARGET_PRESENT; | 1127 | res = SCSI_SCAN_TARGET_PRESENT; |
| @@ -1185,11 +1171,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
| 1185 | static void scsi_sequential_lun_scan(struct scsi_target *starget, | 1171 | static void scsi_sequential_lun_scan(struct scsi_target *starget, |
| 1186 | int bflags, int scsi_level, int rescan) | 1172 | int bflags, int scsi_level, int rescan) |
| 1187 | { | 1173 | { |
| 1188 | unsigned int sparse_lun, lun, max_dev_lun; | 1174 | uint max_dev_lun; |
| 1175 | u64 sparse_lun, lun; | ||
| 1189 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 1176 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
| 1190 | 1177 | ||
| 1191 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of" | 1178 | SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, |
| 1192 | "%s\n", dev_name(&starget->dev))); | 1179 | "scsi scan: Sequential scan\n")); |
| 1193 | 1180 | ||
| 1194 | max_dev_lun = min(max_scsi_luns, shost->max_lun); | 1181 | max_dev_lun = min(max_scsi_luns, shost->max_lun); |
| 1195 | /* | 1182 | /* |
| @@ -1239,6 +1226,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, | |||
| 1239 | max_dev_lun = min(8U, max_dev_lun); | 1226 | max_dev_lun = min(8U, max_dev_lun); |
| 1240 | 1227 | ||
| 1241 | /* | 1228 | /* |
| 1229 | * Stop scanning at 255 unless BLIST_SCSI3LUN | ||
| 1230 | */ | ||
| 1231 | if (!(bflags & BLIST_SCSI3LUN)) | ||
| 1232 | max_dev_lun = min(256U, max_dev_lun); | ||
| 1233 | |||
| 1234 | /* | ||
| 1242 | * We have already scanned LUN 0, so start at LUN 1. Keep scanning | 1235 | * We have already scanned LUN 0, so start at LUN 1. Keep scanning |
| 1243 | * until we reach the max, or no LUN is found and we are not | 1236 | * until we reach the max, or no LUN is found and we are not |
| 1244 | * sparse_lun. | 1237 | * sparse_lun. |
| @@ -1260,24 +1253,25 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, | |||
| 1260 | * truncation before using this function. | 1253 | * truncation before using this function. |
| 1261 | * | 1254 | * |
| 1262 | * Notes: | 1255 | * Notes: |
| 1263 | * The struct scsi_lun is assumed to be four levels, with each level | ||
| 1264 | * effectively containing a SCSI byte-ordered (big endian) short; the | ||
| 1265 | * addressing bits of each level are ignored (the highest two bits). | ||
| 1266 | * For a description of the LUN format, post SCSI-3 see the SCSI | 1256 | * For a description of the LUN format, post SCSI-3 see the SCSI |
| 1267 | * Architecture Model, for SCSI-3 see the SCSI Controller Commands. | 1257 | * Architecture Model, for SCSI-3 see the SCSI Controller Commands. |
| 1268 | * | 1258 | * |
| 1269 | * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns | 1259 | * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function |
| 1270 | * the integer: 0x0b030a04 | 1260 | * returns the integer: 0x0b03d204 |
| 1261 | * | ||
| 1262 | * This encoding will return a standard integer LUN for LUNs smaller | ||
| 1263 | * than 256, which typically use a single level LUN structure with | ||
| 1264 | * addressing method 0. | ||
| 1271 | **/ | 1265 | **/ |
| 1272 | int scsilun_to_int(struct scsi_lun *scsilun) | 1266 | u64 scsilun_to_int(struct scsi_lun *scsilun) |
| 1273 | { | 1267 | { |
| 1274 | int i; | 1268 | int i; |
| 1275 | unsigned int lun; | 1269 | u64 lun; |
| 1276 | 1270 | ||
| 1277 | lun = 0; | 1271 | lun = 0; |
| 1278 | for (i = 0; i < sizeof(lun); i += 2) | 1272 | for (i = 0; i < sizeof(lun); i += 2) |
| 1279 | lun = lun | (((scsilun->scsi_lun[i] << 8) | | 1273 | lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | |
| 1280 | scsilun->scsi_lun[i + 1]) << (i * 8)); | 1274 | ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); |
| 1281 | return lun; | 1275 | return lun; |
| 1282 | } | 1276 | } |
| 1283 | EXPORT_SYMBOL(scsilun_to_int); | 1277 | EXPORT_SYMBOL(scsilun_to_int); |
| @@ -1291,16 +1285,13 @@ EXPORT_SYMBOL(scsilun_to_int); | |||
| 1291 | * Reverts the functionality of the scsilun_to_int, which packed | 1285 | * Reverts the functionality of the scsilun_to_int, which packed |
| 1292 | * an 8-byte lun value into an int. This routine unpacks the int | 1286 | * an 8-byte lun value into an int. This routine unpacks the int |
| 1293 | * back into the lun value. | 1287 | * back into the lun value. |
| 1294 | * Note: the scsilun_to_int() routine does not truly handle all | ||
| 1295 | * 8bytes of the lun value. This functions restores only as much | ||
| 1296 | * as was set by the routine. | ||
| 1297 | * | 1288 | * |
| 1298 | * Notes: | 1289 | * Notes: |
| 1299 | * Given an integer : 0x0b030a04, this function returns a | 1290 | * Given an integer : 0x0b03d204, this function returns a |
| 1300 | * scsi_lun of : struct scsi_lun of: 0a 04 0b 03 00 00 00 00 | 1291 | * struct scsi_lun of: d2 04 0b 03 00 00 00 00 |
| 1301 | * | 1292 | * |
| 1302 | **/ | 1293 | **/ |
| 1303 | void int_to_scsilun(unsigned int lun, struct scsi_lun *scsilun) | 1294 | void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) |
| 1304 | { | 1295 | { |
| 1305 | int i; | 1296 | int i; |
| 1306 | 1297 | ||
| @@ -1340,7 +1331,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1340 | char devname[64]; | 1331 | char devname[64]; |
| 1341 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 1332 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
| 1342 | unsigned int length; | 1333 | unsigned int length; |
| 1343 | unsigned int lun; | 1334 | u64 lun; |
| 1344 | unsigned int num_luns; | 1335 | unsigned int num_luns; |
| 1345 | unsigned int retries; | 1336 | unsigned int retries; |
| 1346 | int result; | 1337 | int result; |
| @@ -1430,17 +1421,19 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1430 | * a retry. | 1421 | * a retry. |
| 1431 | */ | 1422 | */ |
| 1432 | for (retries = 0; retries < 3; retries++) { | 1423 | for (retries = 0; retries < 3; retries++) { |
| 1433 | SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending" | 1424 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
| 1434 | " REPORT LUNS to %s (try %d)\n", devname, | 1425 | "scsi scan: Sending REPORT LUNS to (try %d)\n", |
| 1435 | retries)); | 1426 | retries)); |
| 1436 | 1427 | ||
| 1437 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, | 1428 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, |
| 1438 | lun_data, length, &sshdr, | 1429 | lun_data, length, &sshdr, |
| 1439 | SCSI_TIMEOUT + 4 * HZ, 3, NULL); | 1430 | SCSI_TIMEOUT + 4 * HZ, 3, NULL); |
| 1440 | 1431 | ||
| 1441 | SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" | 1432 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
| 1442 | " %s (try %d) result 0x%x\n", result | 1433 | "scsi scan: REPORT LUNS" |
| 1443 | ? "failed" : "successful", retries, result)); | 1434 | " %s (try %d) result 0x%x\n", |
| 1435 | result ? "failed" : "successful", | ||
| 1436 | retries, result)); | ||
| 1444 | if (result == 0) | 1437 | if (result == 0) |
| 1445 | break; | 1438 | break; |
| 1446 | else if (scsi_sense_valid(&sshdr)) { | 1439 | else if (scsi_sense_valid(&sshdr)) { |
| @@ -1466,10 +1459,11 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1466 | 1459 | ||
| 1467 | num_luns = (length / sizeof(struct scsi_lun)); | 1460 | num_luns = (length / sizeof(struct scsi_lun)); |
| 1468 | if (num_luns > max_scsi_report_luns) { | 1461 | if (num_luns > max_scsi_report_luns) { |
| 1469 | printk(KERN_WARNING "scsi: On %s only %d (max_scsi_report_luns)" | 1462 | sdev_printk(KERN_WARNING, sdev, |
| 1470 | " of %d luns reported, try increasing" | 1463 | "Only %d (max_scsi_report_luns)" |
| 1471 | " max_scsi_report_luns.\n", devname, | 1464 | " of %d luns reported, try increasing" |
| 1472 | max_scsi_report_luns, num_luns); | 1465 | " max_scsi_report_luns.\n", |
| 1466 | max_scsi_report_luns, num_luns); | ||
| 1473 | num_luns = max_scsi_report_luns; | 1467 | num_luns = max_scsi_report_luns; |
| 1474 | } | 1468 | } |
| 1475 | 1469 | ||
| @@ -1483,27 +1477,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1483 | for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { | 1477 | for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { |
| 1484 | lun = scsilun_to_int(lunp); | 1478 | lun = scsilun_to_int(lunp); |
| 1485 | 1479 | ||
| 1486 | /* | 1480 | if (lun > sdev->host->max_lun) { |
| 1487 | * Check if the unused part of lunp is non-zero, and so | 1481 | sdev_printk(KERN_WARNING, sdev, |
| 1488 | * does not fit in lun. | 1482 | "lun%llu has a LUN larger than" |
| 1489 | */ | 1483 | " allowed by the host adapter\n", lun); |
| 1490 | if (memcmp(&lunp->scsi_lun[sizeof(lun)], "\0\0\0\0", 4)) { | ||
| 1491 | int i; | ||
| 1492 | |||
| 1493 | /* | ||
| 1494 | * Output an error displaying the LUN in byte order, | ||
| 1495 | * this differs from what linux would print for the | ||
| 1496 | * integer LUN value. | ||
| 1497 | */ | ||
| 1498 | printk(KERN_WARNING "scsi: %s lun 0x", devname); | ||
| 1499 | data = (char *)lunp->scsi_lun; | ||
| 1500 | for (i = 0; i < sizeof(struct scsi_lun); i++) | ||
| 1501 | printk("%02x", data[i]); | ||
| 1502 | printk(" has a LUN larger than currently supported.\n"); | ||
| 1503 | } else if (lun > sdev->host->max_lun) { | ||
| 1504 | printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" | ||
| 1505 | " than allowed by the host adapter\n", | ||
| 1506 | devname, lun); | ||
| 1507 | } else { | 1484 | } else { |
| 1508 | int res; | 1485 | int res; |
| 1509 | 1486 | ||
| @@ -1515,8 +1492,8 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1515 | */ | 1492 | */ |
| 1516 | sdev_printk(KERN_ERR, sdev, | 1493 | sdev_printk(KERN_ERR, sdev, |
| 1517 | "Unexpected response" | 1494 | "Unexpected response" |
| 1518 | " from lun %d while scanning, scan" | 1495 | " from lun %llu while scanning, scan" |
| 1519 | " aborted\n", lun); | 1496 | " aborted\n", (unsigned long long)lun); |
| 1520 | break; | 1497 | break; |
| 1521 | } | 1498 | } |
| 1522 | } | 1499 | } |
| @@ -1535,7 +1512,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
| 1535 | } | 1512 | } |
| 1536 | 1513 | ||
| 1537 | struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, | 1514 | struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, |
| 1538 | uint id, uint lun, void *hostdata) | 1515 | uint id, u64 lun, void *hostdata) |
| 1539 | { | 1516 | { |
| 1540 | struct scsi_device *sdev = ERR_PTR(-ENODEV); | 1517 | struct scsi_device *sdev = ERR_PTR(-ENODEV); |
| 1541 | struct device *parent = &shost->shost_gendev; | 1518 | struct device *parent = &shost->shost_gendev; |
| @@ -1571,7 +1548,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, | |||
| 1571 | EXPORT_SYMBOL(__scsi_add_device); | 1548 | EXPORT_SYMBOL(__scsi_add_device); |
| 1572 | 1549 | ||
| 1573 | int scsi_add_device(struct Scsi_Host *host, uint channel, | 1550 | int scsi_add_device(struct Scsi_Host *host, uint channel, |
| 1574 | uint target, uint lun) | 1551 | uint target, u64 lun) |
| 1575 | { | 1552 | { |
| 1576 | struct scsi_device *sdev = | 1553 | struct scsi_device *sdev = |
| 1577 | __scsi_add_device(host, channel, target, lun, NULL); | 1554 | __scsi_add_device(host, channel, target, lun, NULL); |
| @@ -1600,7 +1577,7 @@ void scsi_rescan_device(struct device *dev) | |||
| 1600 | EXPORT_SYMBOL(scsi_rescan_device); | 1577 | EXPORT_SYMBOL(scsi_rescan_device); |
| 1601 | 1578 | ||
| 1602 | static void __scsi_scan_target(struct device *parent, unsigned int channel, | 1579 | static void __scsi_scan_target(struct device *parent, unsigned int channel, |
| 1603 | unsigned int id, unsigned int lun, int rescan) | 1580 | unsigned int id, u64 lun, int rescan) |
| 1604 | { | 1581 | { |
| 1605 | struct Scsi_Host *shost = dev_to_shost(parent); | 1582 | struct Scsi_Host *shost = dev_to_shost(parent); |
| 1606 | int bflags = 0; | 1583 | int bflags = 0; |
| @@ -1668,7 +1645,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
| 1668 | * sequential scan of LUNs on the target id. | 1645 | * sequential scan of LUNs on the target id. |
| 1669 | **/ | 1646 | **/ |
| 1670 | void scsi_scan_target(struct device *parent, unsigned int channel, | 1647 | void scsi_scan_target(struct device *parent, unsigned int channel, |
| 1671 | unsigned int id, unsigned int lun, int rescan) | 1648 | unsigned int id, u64 lun, int rescan) |
| 1672 | { | 1649 | { |
| 1673 | struct Scsi_Host *shost = dev_to_shost(parent); | 1650 | struct Scsi_Host *shost = dev_to_shost(parent); |
| 1674 | 1651 | ||
| @@ -1688,7 +1665,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel, | |||
| 1688 | EXPORT_SYMBOL(scsi_scan_target); | 1665 | EXPORT_SYMBOL(scsi_scan_target); |
| 1689 | 1666 | ||
| 1690 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, | 1667 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, |
| 1691 | unsigned int id, unsigned int lun, int rescan) | 1668 | unsigned int id, u64 lun, int rescan) |
| 1692 | { | 1669 | { |
| 1693 | uint order_id; | 1670 | uint order_id; |
| 1694 | 1671 | ||
| @@ -1719,10 +1696,10 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, | |||
| 1719 | } | 1696 | } |
| 1720 | 1697 | ||
| 1721 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, | 1698 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, |
| 1722 | unsigned int id, unsigned int lun, int rescan) | 1699 | unsigned int id, u64 lun, int rescan) |
| 1723 | { | 1700 | { |
| 1724 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, | 1701 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, |
| 1725 | "%s: <%u:%u:%u>\n", | 1702 | "%s: <%u:%u:%llu>\n", |
| 1726 | __func__, channel, id, lun)); | 1703 | __func__, channel, id, lun)); |
| 1727 | 1704 | ||
| 1728 | if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || | 1705 | if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || |
| @@ -1781,8 +1758,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) | |||
| 1781 | return NULL; | 1758 | return NULL; |
| 1782 | 1759 | ||
| 1783 | if (shost->async_scan) { | 1760 | if (shost->async_scan) { |
| 1784 | printk("%s called twice for host %d", __func__, | 1761 | shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); |
| 1785 | shost->host_no); | ||
| 1786 | dump_stack(); | 1762 | dump_stack(); |
| 1787 | return NULL; | 1763 | return NULL; |
| 1788 | } | 1764 | } |
| @@ -1835,8 +1811,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data) | |||
| 1835 | mutex_lock(&shost->scan_mutex); | 1811 | mutex_lock(&shost->scan_mutex); |
| 1836 | 1812 | ||
| 1837 | if (!shost->async_scan) { | 1813 | if (!shost->async_scan) { |
| 1838 | printk("%s called twice for host %d", __func__, | 1814 | shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); |
| 1839 | shost->host_no); | ||
| 1840 | dump_stack(); | 1815 | dump_stack(); |
| 1841 | mutex_unlock(&shost->scan_mutex); | 1816 | mutex_unlock(&shost->scan_mutex); |
| 1842 | return; | 1817 | return; |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 074e8cc30955..8b4105a22ac2 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
| @@ -80,7 +80,7 @@ const char *scsi_host_state_name(enum scsi_host_state state) | |||
| 80 | return name; | 80 | return name; |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static int check_set(unsigned int *val, char *src) | 83 | static int check_set(unsigned long long *val, char *src) |
| 84 | { | 84 | { |
| 85 | char *last; | 85 | char *last; |
| 86 | 86 | ||
| @@ -90,7 +90,7 @@ static int check_set(unsigned int *val, char *src) | |||
| 90 | /* | 90 | /* |
| 91 | * Doesn't check for int overflow | 91 | * Doesn't check for int overflow |
| 92 | */ | 92 | */ |
| 93 | *val = simple_strtoul(src, &last, 0); | 93 | *val = simple_strtoull(src, &last, 0); |
| 94 | if (*last != '\0') | 94 | if (*last != '\0') |
| 95 | return 1; | 95 | return 1; |
| 96 | } | 96 | } |
| @@ -99,11 +99,11 @@ static int check_set(unsigned int *val, char *src) | |||
| 99 | 99 | ||
| 100 | static int scsi_scan(struct Scsi_Host *shost, const char *str) | 100 | static int scsi_scan(struct Scsi_Host *shost, const char *str) |
| 101 | { | 101 | { |
| 102 | char s1[15], s2[15], s3[15], junk; | 102 | char s1[15], s2[15], s3[17], junk; |
| 103 | unsigned int channel, id, lun; | 103 | unsigned long long channel, id, lun; |
| 104 | int res; | 104 | int res; |
| 105 | 105 | ||
| 106 | res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk); | 106 | res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); |
| 107 | if (res != 3) | 107 | if (res != 3) |
| 108 | return -EINVAL; | 108 | return -EINVAL; |
| 109 | if (check_set(&channel, s1)) | 109 | if (check_set(&channel, s1)) |
| @@ -333,8 +333,8 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, | |||
| 333 | 333 | ||
| 334 | static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); | 334 | static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); |
| 335 | 335 | ||
| 336 | shost_rd_attr(use_blk_mq, "%d\n"); | ||
| 336 | shost_rd_attr(unique_id, "%u\n"); | 337 | shost_rd_attr(unique_id, "%u\n"); |
| 337 | shost_rd_attr(host_busy, "%hu\n"); | ||
| 338 | shost_rd_attr(cmd_per_lun, "%hd\n"); | 338 | shost_rd_attr(cmd_per_lun, "%hd\n"); |
| 339 | shost_rd_attr(can_queue, "%hd\n"); | 339 | shost_rd_attr(can_queue, "%hd\n"); |
| 340 | shost_rd_attr(sg_tablesize, "%hu\n"); | 340 | shost_rd_attr(sg_tablesize, "%hu\n"); |
| @@ -344,7 +344,16 @@ shost_rd_attr(prot_capabilities, "%u\n"); | |||
| 344 | shost_rd_attr(prot_guard_type, "%hd\n"); | 344 | shost_rd_attr(prot_guard_type, "%hd\n"); |
| 345 | shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); | 345 | shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); |
| 346 | 346 | ||
| 347 | static ssize_t | ||
| 348 | show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 349 | { | ||
| 350 | struct Scsi_Host *shost = class_to_shost(dev); | ||
| 351 | return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy)); | ||
| 352 | } | ||
| 353 | static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); | ||
| 354 | |||
| 347 | static struct attribute *scsi_sysfs_shost_attrs[] = { | 355 | static struct attribute *scsi_sysfs_shost_attrs[] = { |
| 356 | &dev_attr_use_blk_mq.attr, | ||
| 348 | &dev_attr_unique_id.attr, | 357 | &dev_attr_unique_id.attr, |
| 349 | &dev_attr_host_busy.attr, | 358 | &dev_attr_host_busy.attr, |
| 350 | &dev_attr_cmd_per_lun.attr, | 359 | &dev_attr_cmd_per_lun.attr, |
| @@ -577,14 +586,30 @@ static int scsi_sdev_check_buf_bit(const char *buf) | |||
| 577 | /* | 586 | /* |
| 578 | * Create the actual show/store functions and data structures. | 587 | * Create the actual show/store functions and data structures. |
| 579 | */ | 588 | */ |
| 580 | sdev_rd_attr (device_blocked, "%d\n"); | ||
| 581 | sdev_rd_attr (device_busy, "%d\n"); | ||
| 582 | sdev_rd_attr (type, "%d\n"); | 589 | sdev_rd_attr (type, "%d\n"); |
| 583 | sdev_rd_attr (scsi_level, "%d\n"); | 590 | sdev_rd_attr (scsi_level, "%d\n"); |
| 584 | sdev_rd_attr (vendor, "%.8s\n"); | 591 | sdev_rd_attr (vendor, "%.8s\n"); |
| 585 | sdev_rd_attr (model, "%.16s\n"); | 592 | sdev_rd_attr (model, "%.16s\n"); |
| 586 | sdev_rd_attr (rev, "%.4s\n"); | 593 | sdev_rd_attr (rev, "%.4s\n"); |
| 587 | 594 | ||
| 595 | static ssize_t | ||
| 596 | sdev_show_device_busy(struct device *dev, struct device_attribute *attr, | ||
| 597 | char *buf) | ||
| 598 | { | ||
| 599 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 600 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy)); | ||
| 601 | } | ||
| 602 | static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); | ||
| 603 | |||
| 604 | static ssize_t | ||
| 605 | sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, | ||
| 606 | char *buf) | ||
| 607 | { | ||
| 608 | struct scsi_device *sdev = to_scsi_device(dev); | ||
| 609 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); | ||
| 610 | } | ||
| 611 | static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); | ||
| 612 | |||
| 588 | /* | 613 | /* |
| 589 | * TODO: can we make these symlinks to the block layer ones? | 614 | * TODO: can we make these symlinks to the block layer ones? |
| 590 | */ | 615 | */ |
| @@ -885,9 +910,9 @@ sdev_store_queue_ramp_up_period(struct device *dev, | |||
| 885 | const char *buf, size_t count) | 910 | const char *buf, size_t count) |
| 886 | { | 911 | { |
| 887 | struct scsi_device *sdev = to_scsi_device(dev); | 912 | struct scsi_device *sdev = to_scsi_device(dev); |
| 888 | unsigned long period; | 913 | unsigned int period; |
| 889 | 914 | ||
| 890 | if (strict_strtoul(buf, 10, &period)) | 915 | if (kstrtouint(buf, 10, &period)) |
| 891 | return -EINVAL; | 916 | return -EINVAL; |
| 892 | 917 | ||
| 893 | sdev->queue_ramp_up_period = msecs_to_jiffies(period); | 918 | sdev->queue_ramp_up_period = msecs_to_jiffies(period); |
| @@ -1230,13 +1255,13 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) | |||
| 1230 | device_initialize(&sdev->sdev_gendev); | 1255 | device_initialize(&sdev->sdev_gendev); |
| 1231 | sdev->sdev_gendev.bus = &scsi_bus_type; | 1256 | sdev->sdev_gendev.bus = &scsi_bus_type; |
| 1232 | sdev->sdev_gendev.type = &scsi_dev_type; | 1257 | sdev->sdev_gendev.type = &scsi_dev_type; |
| 1233 | dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d", | 1258 | dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", |
| 1234 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 1259 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
| 1235 | 1260 | ||
| 1236 | device_initialize(&sdev->sdev_dev); | 1261 | device_initialize(&sdev->sdev_dev); |
| 1237 | sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); | 1262 | sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); |
| 1238 | sdev->sdev_dev.class = &sdev_class; | 1263 | sdev->sdev_dev.class = &sdev_class; |
| 1239 | dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d", | 1264 | dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", |
| 1240 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 1265 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
| 1241 | sdev->scsi_level = starget->scsi_level; | 1266 | sdev->scsi_level = starget->scsi_level; |
| 1242 | transport_setup_device(&sdev->sdev_gendev); | 1267 | transport_setup_device(&sdev->sdev_gendev); |
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c deleted file mode 100644 index 6209110f295d..000000000000 --- a/drivers/scsi/scsi_tgt_if.c +++ /dev/null | |||
| @@ -1,399 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SCSI target kernel/user interface functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org> | ||
| 5 | * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License as | ||
| 9 | * published by the Free Software Foundation; either version 2 of the | ||
| 10 | * License, or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | */ | ||
| 22 | #include <linux/miscdevice.h> | ||
| 23 | #include <linux/gfp.h> | ||
| 24 | #include <linux/file.h> | ||
| 25 | #include <linux/export.h> | ||
| 26 | #include <net/tcp.h> | ||
| 27 | #include <scsi/scsi.h> | ||
| 28 | #include <scsi/scsi_cmnd.h> | ||
| 29 | #include <scsi/scsi_device.h> | ||
| 30 | #include <scsi/scsi_host.h> | ||
| 31 | #include <scsi/scsi_tgt.h> | ||
| 32 | #include <scsi/scsi_tgt_if.h> | ||
| 33 | |||
| 34 | #include <asm/cacheflush.h> | ||
| 35 | |||
| 36 | #include "scsi_tgt_priv.h" | ||
| 37 | |||
| 38 | #if TGT_RING_SIZE < PAGE_SIZE | ||
| 39 | # define TGT_RING_SIZE PAGE_SIZE | ||
| 40 | #endif | ||
| 41 | |||
| 42 | #define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT) | ||
| 43 | #define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event)) | ||
| 44 | #define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES) | ||
| 45 | |||
| 46 | struct tgt_ring { | ||
| 47 | u32 tr_idx; | ||
| 48 | unsigned long tr_pages[TGT_RING_PAGES]; | ||
| 49 | spinlock_t tr_lock; | ||
| 50 | }; | ||
| 51 | |||
| 52 | /* tx_ring : kernel->user, rx_ring : user->kernel */ | ||
| 53 | static struct tgt_ring tx_ring, rx_ring; | ||
| 54 | static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait); | ||
| 55 | |||
| 56 | static inline void tgt_ring_idx_inc(struct tgt_ring *ring) | ||
| 57 | { | ||
| 58 | if (ring->tr_idx == TGT_MAX_EVENTS - 1) | ||
| 59 | ring->tr_idx = 0; | ||
| 60 | else | ||
| 61 | ring->tr_idx++; | ||
| 62 | } | ||
| 63 | |||
| 64 | static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx) | ||
| 65 | { | ||
| 66 | u32 pidx, off; | ||
| 67 | |||
| 68 | pidx = idx / TGT_EVENT_PER_PAGE; | ||
| 69 | off = idx % TGT_EVENT_PER_PAGE; | ||
| 70 | |||
| 71 | return (struct tgt_event *) | ||
| 72 | (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off); | ||
| 73 | } | ||
| 74 | |||
| 75 | static int tgt_uspace_send_event(u32 type, struct tgt_event *p) | ||
| 76 | { | ||
| 77 | struct tgt_event *ev; | ||
| 78 | struct tgt_ring *ring = &tx_ring; | ||
| 79 | unsigned long flags; | ||
| 80 | int err = 0; | ||
| 81 | |||
| 82 | spin_lock_irqsave(&ring->tr_lock, flags); | ||
| 83 | |||
| 84 | ev = tgt_head_event(ring, ring->tr_idx); | ||
| 85 | if (!ev->hdr.status) | ||
| 86 | tgt_ring_idx_inc(ring); | ||
| 87 | else | ||
| 88 | err = -BUSY; | ||
| 89 | |||
| 90 | spin_unlock_irqrestore(&ring->tr_lock, flags); | ||
| 91 | |||
| 92 | if (err) | ||
| 93 | return err; | ||
| 94 | |||
| 95 | memcpy(ev, p, sizeof(*ev)); | ||
| 96 | ev->hdr.type = type; | ||
| 97 | mb(); | ||
| 98 | ev->hdr.status = 1; | ||
| 99 | |||
| 100 | flush_dcache_page(virt_to_page(ev)); | ||
| 101 | |||
| 102 | wake_up_interruptible(&tgt_poll_wait); | ||
| 103 | |||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id, | ||
| 108 | struct scsi_lun *lun, u64 tag) | ||
| 109 | { | ||
| 110 | struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); | ||
| 111 | struct tgt_event ev; | ||
| 112 | int err; | ||
| 113 | |||
| 114 | memset(&ev, 0, sizeof(ev)); | ||
| 115 | ev.p.cmd_req.host_no = shost->host_no; | ||
| 116 | ev.p.cmd_req.itn_id = itn_id; | ||
| 117 | ev.p.cmd_req.data_len = scsi_bufflen(cmd); | ||
| 118 | memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb)); | ||
| 119 | memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun)); | ||
| 120 | ev.p.cmd_req.attribute = cmd->tag; | ||
| 121 | ev.p.cmd_req.tag = tag; | ||
| 122 | |||
| 123 | dprintk("%p %d %u %x %llx\n", cmd, shost->host_no, | ||
| 124 | ev.p.cmd_req.data_len, cmd->tag, | ||
| 125 | (unsigned long long) ev.p.cmd_req.tag); | ||
| 126 | |||
| 127 | err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev); | ||
| 128 | if (err) | ||
| 129 | eprintk("tx buf is full, could not send\n"); | ||
| 130 | |||
| 131 | return err; | ||
| 132 | } | ||
| 133 | |||
| 134 | int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 itn_id, u64 tag) | ||
| 135 | { | ||
| 136 | struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); | ||
| 137 | struct tgt_event ev; | ||
| 138 | int err; | ||
| 139 | |||
| 140 | memset(&ev, 0, sizeof(ev)); | ||
| 141 | ev.p.cmd_done.host_no = shost->host_no; | ||
| 142 | ev.p.cmd_done.itn_id = itn_id; | ||
| 143 | ev.p.cmd_done.tag = tag; | ||
| 144 | ev.p.cmd_done.result = cmd->result; | ||
| 145 | |||
| 146 | dprintk("%p %d %llu %u %x\n", cmd, shost->host_no, | ||
| 147 | (unsigned long long) ev.p.cmd_req.tag, | ||
| 148 | ev.p.cmd_req.data_len, cmd->tag); | ||
| 149 | |||
| 150 | err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev); | ||
| 151 | if (err) | ||
| 152 | eprintk("tx buf is full, could not send\n"); | ||
| 153 | |||
| 154 | return err; | ||
| 155 | } | ||
| 156 | |||
| 157 | int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 itn_id, int function, | ||
| 158 | u64 tag, struct scsi_lun *scsilun, void *data) | ||
| 159 | { | ||
| 160 | struct tgt_event ev; | ||
| 161 | int err; | ||
| 162 | |||
| 163 | memset(&ev, 0, sizeof(ev)); | ||
| 164 | ev.p.tsk_mgmt_req.host_no = host_no; | ||
| 165 | ev.p.tsk_mgmt_req.itn_id = itn_id; | ||
| 166 | ev.p.tsk_mgmt_req.function = function; | ||
| 167 | ev.p.tsk_mgmt_req.tag = tag; | ||
| 168 | memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun)); | ||
| 169 | ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data; | ||
| 170 | |||
| 171 | dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag, | ||
| 172 | (unsigned long long) ev.p.tsk_mgmt_req.mid); | ||
| 173 | |||
| 174 | err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev); | ||
| 175 | if (err) | ||
| 176 | eprintk("tx buf is full, could not send\n"); | ||
| 177 | |||
| 178 | return err; | ||
| 179 | } | ||
| 180 | |||
| 181 | int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 itn_id, | ||
| 182 | int function, char *initiator_id) | ||
| 183 | { | ||
| 184 | struct tgt_event ev; | ||
| 185 | int err; | ||
| 186 | |||
| 187 | memset(&ev, 0, sizeof(ev)); | ||
| 188 | ev.p.it_nexus_req.host_no = host_no; | ||
| 189 | ev.p.it_nexus_req.function = function; | ||
| 190 | ev.p.it_nexus_req.itn_id = itn_id; | ||
| 191 | if (initiator_id) | ||
| 192 | strncpy(ev.p.it_nexus_req.initiator_id, initiator_id, | ||
| 193 | sizeof(ev.p.it_nexus_req.initiator_id)); | ||
| 194 | |||
| 195 | dprintk("%d %x %llx\n", host_no, function, (unsigned long long)itn_id); | ||
| 196 | |||
| 197 | err = tgt_uspace_send_event(TGT_KEVENT_IT_NEXUS_REQ, &ev); | ||
| 198 | if (err) | ||
| 199 | eprintk("tx buf is full, could not send\n"); | ||
| 200 | |||
| 201 | return err; | ||
| 202 | } | ||
| 203 | |||
| 204 | static int event_recv_msg(struct tgt_event *ev) | ||
| 205 | { | ||
| 206 | int err = 0; | ||
| 207 | |||
| 208 | switch (ev->hdr.type) { | ||
| 209 | case TGT_UEVENT_CMD_RSP: | ||
| 210 | err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no, | ||
| 211 | ev->p.cmd_rsp.itn_id, | ||
| 212 | ev->p.cmd_rsp.result, | ||
| 213 | ev->p.cmd_rsp.tag, | ||
| 214 | ev->p.cmd_rsp.uaddr, | ||
| 215 | ev->p.cmd_rsp.len, | ||
| 216 | ev->p.cmd_rsp.sense_uaddr, | ||
| 217 | ev->p.cmd_rsp.sense_len, | ||
| 218 | ev->p.cmd_rsp.rw); | ||
| 219 | break; | ||
| 220 | case TGT_UEVENT_TSK_MGMT_RSP: | ||
| 221 | err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no, | ||
| 222 | ev->p.tsk_mgmt_rsp.itn_id, | ||
| 223 | ev->p.tsk_mgmt_rsp.mid, | ||
| 224 | ev->p.tsk_mgmt_rsp.result); | ||
| 225 | break; | ||
| 226 | case TGT_UEVENT_IT_NEXUS_RSP: | ||
| 227 | err = scsi_tgt_kspace_it_nexus_rsp(ev->p.it_nexus_rsp.host_no, | ||
| 228 | ev->p.it_nexus_rsp.itn_id, | ||
| 229 | ev->p.it_nexus_rsp.result); | ||
| 230 | break; | ||
| 231 | default: | ||
| 232 | eprintk("unknown type %d\n", ev->hdr.type); | ||
| 233 | err = -EINVAL; | ||
| 234 | } | ||
| 235 | |||
| 236 | return err; | ||
| 237 | } | ||
| 238 | |||
| 239 | static ssize_t tgt_write(struct file *file, const char __user * buffer, | ||
| 240 | size_t count, loff_t * ppos) | ||
| 241 | { | ||
| 242 | struct tgt_event *ev; | ||
| 243 | struct tgt_ring *ring = &rx_ring; | ||
| 244 | |||
| 245 | while (1) { | ||
| 246 | ev = tgt_head_event(ring, ring->tr_idx); | ||
| 247 | /* do we need this? */ | ||
| 248 | flush_dcache_page(virt_to_page(ev)); | ||
| 249 | |||
| 250 | if (!ev->hdr.status) | ||
| 251 | break; | ||
| 252 | |||
| 253 | tgt_ring_idx_inc(ring); | ||
| 254 | event_recv_msg(ev); | ||
| 255 | ev->hdr.status = 0; | ||
| 256 | }; | ||
| 257 | |||
| 258 | return count; | ||
| 259 | } | ||
| 260 | |||
| 261 | static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait) | ||
| 262 | { | ||
| 263 | struct tgt_event *ev; | ||
| 264 | struct tgt_ring *ring = &tx_ring; | ||
| 265 | unsigned long flags; | ||
| 266 | unsigned int mask = 0; | ||
| 267 | u32 idx; | ||
| 268 | |||
| 269 | poll_wait(file, &tgt_poll_wait, wait); | ||
| 270 | |||
| 271 | spin_lock_irqsave(&ring->tr_lock, flags); | ||
| 272 | |||
| 273 | idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1; | ||
| 274 | ev = tgt_head_event(ring, idx); | ||
| 275 | if (ev->hdr.status) | ||
| 276 | mask |= POLLIN | POLLRDNORM; | ||
| 277 | |||
| 278 | spin_unlock_irqrestore(&ring->tr_lock, flags); | ||
| 279 | |||
| 280 | return mask; | ||
| 281 | } | ||
| 282 | |||
| 283 | static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr, | ||
| 284 | struct tgt_ring *ring) | ||
| 285 | { | ||
| 286 | int i, err; | ||
| 287 | |||
| 288 | for (i = 0; i < TGT_RING_PAGES; i++) { | ||
| 289 | struct page *page = virt_to_page(ring->tr_pages[i]); | ||
| 290 | err = vm_insert_page(vma, addr, page); | ||
| 291 | if (err) | ||
| 292 | return err; | ||
| 293 | addr += PAGE_SIZE; | ||
| 294 | } | ||
| 295 | |||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | |||
| 299 | static int tgt_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 300 | { | ||
| 301 | unsigned long addr; | ||
| 302 | int err; | ||
| 303 | |||
| 304 | if (vma->vm_pgoff) | ||
| 305 | return -EINVAL; | ||
| 306 | |||
| 307 | if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) { | ||
| 308 | eprintk("mmap size must be %lu, not %lu \n", | ||
| 309 | TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start); | ||
| 310 | return -EINVAL; | ||
| 311 | } | ||
| 312 | |||
| 313 | addr = vma->vm_start; | ||
| 314 | err = uspace_ring_map(vma, addr, &tx_ring); | ||
| 315 | if (err) | ||
| 316 | return err; | ||
| 317 | err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring); | ||
| 318 | |||
| 319 | return err; | ||
| 320 | } | ||
| 321 | |||
| 322 | static int tgt_open(struct inode *inode, struct file *file) | ||
| 323 | { | ||
| 324 | tx_ring.tr_idx = rx_ring.tr_idx = 0; | ||
| 325 | |||
| 326 | return 0; | ||
| 327 | } | ||
| 328 | |||
| 329 | static const struct file_operations tgt_fops = { | ||
| 330 | .owner = THIS_MODULE, | ||
| 331 | .open = tgt_open, | ||
| 332 | .poll = tgt_poll, | ||
| 333 | .write = tgt_write, | ||
| 334 | .mmap = tgt_mmap, | ||
| 335 | .llseek = noop_llseek, | ||
| 336 | }; | ||
| 337 | |||
| 338 | static struct miscdevice tgt_miscdev = { | ||
| 339 | .minor = MISC_DYNAMIC_MINOR, | ||
| 340 | .name = "tgt", | ||
| 341 | .fops = &tgt_fops, | ||
| 342 | }; | ||
| 343 | |||
| 344 | static void tgt_ring_exit(struct tgt_ring *ring) | ||
| 345 | { | ||
| 346 | int i; | ||
| 347 | |||
| 348 | for (i = 0; i < TGT_RING_PAGES; i++) | ||
| 349 | free_page(ring->tr_pages[i]); | ||
| 350 | } | ||
| 351 | |||
| 352 | static int tgt_ring_init(struct tgt_ring *ring) | ||
| 353 | { | ||
| 354 | int i; | ||
| 355 | |||
| 356 | spin_lock_init(&ring->tr_lock); | ||
| 357 | |||
| 358 | for (i = 0; i < TGT_RING_PAGES; i++) { | ||
| 359 | ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL); | ||
| 360 | if (!ring->tr_pages[i]) { | ||
| 361 | eprintk("out of memory\n"); | ||
| 362 | return -ENOMEM; | ||
| 363 | } | ||
| 364 | } | ||
| 365 | |||
| 366 | return 0; | ||
| 367 | } | ||
| 368 | |||
| 369 | void scsi_tgt_if_exit(void) | ||
| 370 | { | ||
| 371 | tgt_ring_exit(&tx_ring); | ||
| 372 | tgt_ring_exit(&rx_ring); | ||
| 373 | misc_deregister(&tgt_miscdev); | ||
| 374 | } | ||
| 375 | |||
| 376 | int scsi_tgt_if_init(void) | ||
| 377 | { | ||
| 378 | int err; | ||
| 379 | |||
| 380 | err = tgt_ring_init(&tx_ring); | ||
| 381 | if (err) | ||
| 382 | return err; | ||
| 383 | |||
| 384 | err = tgt_ring_init(&rx_ring); | ||
| 385 | if (err) | ||
| 386 | goto free_tx_ring; | ||
| 387 | |||
| 388 | err = misc_register(&tgt_miscdev); | ||
| 389 | if (err) | ||
| 390 | goto free_rx_ring; | ||
| 391 | |||
| 392 | return 0; | ||
| 393 | free_rx_ring: | ||
| 394 | tgt_ring_exit(&rx_ring); | ||
| 395 | free_tx_ring: | ||
| 396 | tgt_ring_exit(&tx_ring); | ||
| 397 | |||
| 398 | return err; | ||
| 399 | } | ||
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c deleted file mode 100644 index e51add05fb8d..000000000000 --- a/drivers/scsi/scsi_tgt_lib.c +++ /dev/null | |||
| @@ -1,661 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SCSI target lib functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu> | ||
| 5 | * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License as | ||
| 9 | * published by the Free Software Foundation; either version 2 of the | ||
| 10 | * License, or (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
| 20 | * 02110-1301 USA | ||
| 21 | */ | ||
| 22 | #include <linux/blkdev.h> | ||
| 23 | #include <linux/hash.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/pagemap.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <scsi/scsi.h> | ||
| 28 | #include <scsi/scsi_cmnd.h> | ||
| 29 | #include <scsi/scsi_device.h> | ||
| 30 | #include <scsi/scsi_host.h> | ||
| 31 | #include <scsi/scsi_transport.h> | ||
| 32 | #include <scsi/scsi_tgt.h> | ||
| 33 | |||
| 34 | #include "scsi_tgt_priv.h" | ||
| 35 | |||
| 36 | static struct workqueue_struct *scsi_tgtd; | ||
| 37 | static struct kmem_cache *scsi_tgt_cmd_cache; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * TODO: this struct will be killed when the block layer supports large bios | ||
| 41 | * and James's work struct code is in | ||
| 42 | */ | ||
| 43 | struct scsi_tgt_cmd { | ||
| 44 | /* TODO replace work with James b's code */ | ||
| 45 | struct work_struct work; | ||
| 46 | /* TODO fix limits of some drivers */ | ||
| 47 | struct bio *bio; | ||
| 48 | |||
| 49 | struct list_head hash_list; | ||
| 50 | struct request *rq; | ||
| 51 | u64 itn_id; | ||
| 52 | u64 tag; | ||
| 53 | }; | ||
| 54 | |||
| 55 | #define TGT_HASH_ORDER 4 | ||
| 56 | #define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER) | ||
| 57 | |||
| 58 | struct scsi_tgt_queuedata { | ||
| 59 | struct Scsi_Host *shost; | ||
| 60 | struct list_head cmd_hash[1 << TGT_HASH_ORDER]; | ||
| 61 | spinlock_t cmd_hash_lock; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Function: scsi_host_get_command() | ||
| 66 | * | ||
| 67 | * Purpose: Allocate and setup a scsi command block and blk request | ||
| 68 | * | ||
| 69 | * Arguments: shost - scsi host | ||
| 70 | * data_dir - dma data dir | ||
| 71 | * gfp_mask- allocator flags | ||
| 72 | * | ||
| 73 | * Returns: The allocated scsi command structure. | ||
| 74 | * | ||
| 75 | * This should be called by target LLDs to get a command. | ||
| 76 | */ | ||
| 77 | struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost, | ||
| 78 | enum dma_data_direction data_dir, | ||
| 79 | gfp_t gfp_mask) | ||
| 80 | { | ||
| 81 | int write = (data_dir == DMA_TO_DEVICE); | ||
| 82 | struct request *rq; | ||
| 83 | struct scsi_cmnd *cmd; | ||
| 84 | struct scsi_tgt_cmd *tcmd; | ||
| 85 | |||
| 86 | /* Bail if we can't get a reference to the device */ | ||
| 87 | if (!get_device(&shost->shost_gendev)) | ||
| 88 | return NULL; | ||
| 89 | |||
| 90 | tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC); | ||
| 91 | if (!tcmd) | ||
| 92 | goto put_dev; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * The blk helpers are used to the READ/WRITE requests | ||
| 96 | * transferring data from a initiator point of view. Since | ||
| 97 | * we are in target mode we want the opposite. | ||
| 98 | */ | ||
| 99 | rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask); | ||
| 100 | if (!rq) | ||
| 101 | goto free_tcmd; | ||
| 102 | |||
| 103 | cmd = __scsi_get_command(shost, gfp_mask); | ||
| 104 | if (!cmd) | ||
| 105 | goto release_rq; | ||
| 106 | |||
| 107 | cmd->sc_data_direction = data_dir; | ||
| 108 | cmd->jiffies_at_alloc = jiffies; | ||
| 109 | cmd->request = rq; | ||
| 110 | |||
| 111 | cmd->cmnd = rq->cmd; | ||
| 112 | |||
| 113 | rq->special = cmd; | ||
| 114 | rq->cmd_type = REQ_TYPE_SPECIAL; | ||
| 115 | rq->cmd_flags |= REQ_TYPE_BLOCK_PC; | ||
| 116 | rq->end_io_data = tcmd; | ||
| 117 | |||
| 118 | tcmd->rq = rq; | ||
| 119 | |||
| 120 | return cmd; | ||
| 121 | |||
| 122 | release_rq: | ||
| 123 | blk_put_request(rq); | ||
| 124 | free_tcmd: | ||
| 125 | kmem_cache_free(scsi_tgt_cmd_cache, tcmd); | ||
| 126 | put_dev: | ||
| 127 | put_device(&shost->shost_gendev); | ||
| 128 | return NULL; | ||
| 129 | |||
| 130 | } | ||
| 131 | EXPORT_SYMBOL_GPL(scsi_host_get_command); | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Function: scsi_host_put_command() | ||
| 135 | * | ||
| 136 | * Purpose: Free a scsi command block | ||
| 137 | * | ||
| 138 | * Arguments: shost - scsi host | ||
| 139 | * cmd - command block to free | ||
| 140 | * | ||
| 141 | * Returns: Nothing. | ||
| 142 | * | ||
| 143 | * Notes: The command must not belong to any lists. | ||
| 144 | */ | ||
| 145 | void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | ||
| 146 | { | ||
| 147 | struct request_queue *q = shost->uspace_req_q; | ||
| 148 | struct request *rq = cmd->request; | ||
| 149 | struct scsi_tgt_cmd *tcmd = rq->end_io_data; | ||
| 150 | unsigned long flags; | ||
| 151 | |||
| 152 | kmem_cache_free(scsi_tgt_cmd_cache, tcmd); | ||
| 153 | |||
| 154 | spin_lock_irqsave(q->queue_lock, flags); | ||
| 155 | __blk_put_request(q, rq); | ||
| 156 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
| 157 | |||
| 158 | __scsi_put_command(shost, cmd); | ||
| 159 | put_device(&shost->shost_gendev); | ||
| 160 | } | ||
| 161 | EXPORT_SYMBOL_GPL(scsi_host_put_command); | ||
| 162 | |||
| 163 | static void cmd_hashlist_del(struct scsi_cmnd *cmd) | ||
| 164 | { | ||
| 165 | struct request_queue *q = cmd->request->q; | ||
| 166 | struct scsi_tgt_queuedata *qdata = q->queuedata; | ||
| 167 | unsigned long flags; | ||
| 168 | struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; | ||
| 169 | |||
| 170 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | ||
| 171 | list_del(&tcmd->hash_list); | ||
| 172 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd) | ||
| 176 | { | ||
| 177 | blk_rq_unmap_user(tcmd->bio); | ||
| 178 | } | ||
| 179 | |||
| 180 | static void scsi_tgt_cmd_destroy(struct work_struct *work) | ||
| 181 | { | ||
| 182 | struct scsi_tgt_cmd *tcmd = | ||
| 183 | container_of(work, struct scsi_tgt_cmd, work); | ||
| 184 | struct scsi_cmnd *cmd = tcmd->rq->special; | ||
| 185 | |||
| 186 | dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction, | ||
| 187 | rq_data_dir(cmd->request)); | ||
| 188 | scsi_unmap_user_pages(tcmd); | ||
| 189 | tcmd->rq->bio = NULL; | ||
| 190 | scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd, | ||
| 194 | u64 itn_id, u64 tag) | ||
| 195 | { | ||
| 196 | struct scsi_tgt_queuedata *qdata = rq->q->queuedata; | ||
| 197 | unsigned long flags; | ||
| 198 | struct list_head *head; | ||
| 199 | |||
| 200 | tcmd->itn_id = itn_id; | ||
| 201 | tcmd->tag = tag; | ||
| 202 | tcmd->bio = NULL; | ||
| 203 | INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); | ||
| 204 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | ||
| 205 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; | ||
| 206 | list_add(&tcmd->hash_list, head); | ||
| 207 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | ||
| 208 | } | ||
| 209 | |||
| 210 | /* | ||
| 211 | * scsi_tgt_alloc_queue - setup queue used for message passing | ||
| 212 | * shost: scsi host | ||
| 213 | * | ||
| 214 | * This should be called by the LLD after host allocation. | ||
| 215 | * And will be released when the host is released. | ||
| 216 | */ | ||
| 217 | int scsi_tgt_alloc_queue(struct Scsi_Host *shost) | ||
| 218 | { | ||
| 219 | struct scsi_tgt_queuedata *queuedata; | ||
| 220 | struct request_queue *q; | ||
| 221 | int err, i; | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Do we need to send a netlink event or should uspace | ||
| 225 | * just respond to the hotplug event? | ||
| 226 | */ | ||
| 227 | q = __scsi_alloc_queue(shost, NULL); | ||
| 228 | if (!q) | ||
| 229 | return -ENOMEM; | ||
| 230 | |||
| 231 | queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL); | ||
| 232 | if (!queuedata) { | ||
| 233 | err = -ENOMEM; | ||
| 234 | goto cleanup_queue; | ||
| 235 | } | ||
| 236 | queuedata->shost = shost; | ||
| 237 | q->queuedata = queuedata; | ||
| 238 | |||
| 239 | /* | ||
| 240 | * this is a silly hack. We should probably just queue as many | ||
| 241 | * command as is recvd to userspace. uspace can then make | ||
| 242 | * sure we do not overload the HBA | ||
| 243 | */ | ||
| 244 | q->nr_requests = shost->can_queue; | ||
| 245 | /* | ||
| 246 | * We currently only support software LLDs so this does | ||
| 247 | * not matter for now. Do we need this for the cards we support? | ||
| 248 | * If so we should make it a host template value. | ||
| 249 | */ | ||
| 250 | blk_queue_dma_alignment(q, 0); | ||
| 251 | shost->uspace_req_q = q; | ||
| 252 | |||
| 253 | for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++) | ||
| 254 | INIT_LIST_HEAD(&queuedata->cmd_hash[i]); | ||
| 255 | spin_lock_init(&queuedata->cmd_hash_lock); | ||
| 256 | |||
| 257 | return 0; | ||
| 258 | |||
| 259 | cleanup_queue: | ||
| 260 | blk_cleanup_queue(q); | ||
| 261 | return err; | ||
| 262 | } | ||
| 263 | EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue); | ||
| 264 | |||
| 265 | void scsi_tgt_free_queue(struct Scsi_Host *shost) | ||
| 266 | { | ||
| 267 | int i; | ||
| 268 | unsigned long flags; | ||
| 269 | struct request_queue *q = shost->uspace_req_q; | ||
| 270 | struct scsi_cmnd *cmd; | ||
| 271 | struct scsi_tgt_queuedata *qdata = q->queuedata; | ||
| 272 | struct scsi_tgt_cmd *tcmd, *n; | ||
| 273 | LIST_HEAD(cmds); | ||
| 274 | |||
| 275 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | ||
| 276 | |||
| 277 | for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) { | ||
| 278 | list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i], | ||
| 279 | hash_list) | ||
| 280 | list_move(&tcmd->hash_list, &cmds); | ||
| 281 | } | ||
| 282 | |||
| 283 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | ||
| 284 | |||
| 285 | while (!list_empty(&cmds)) { | ||
| 286 | tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list); | ||
| 287 | list_del(&tcmd->hash_list); | ||
| 288 | cmd = tcmd->rq->special; | ||
| 289 | |||
| 290 | shost->hostt->eh_abort_handler(cmd); | ||
| 291 | scsi_tgt_cmd_destroy(&tcmd->work); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); | ||
| 295 | |||
| 296 | struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd) | ||
| 297 | { | ||
| 298 | struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata; | ||
| 299 | return queue->shost; | ||
| 300 | } | ||
| 301 | EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host); | ||
| 302 | |||
| 303 | /* | ||
| 304 | * scsi_tgt_queue_command - queue command for userspace processing | ||
| 305 | * @cmd: scsi command | ||
| 306 | * @scsilun: scsi lun | ||
| 307 | * @tag: unique value to identify this command for tmf | ||
| 308 | */ | ||
| 309 | int scsi_tgt_queue_command(struct scsi_cmnd *cmd, u64 itn_id, | ||
| 310 | struct scsi_lun *scsilun, u64 tag) | ||
| 311 | { | ||
| 312 | struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; | ||
| 313 | int err; | ||
| 314 | |||
| 315 | init_scsi_tgt_cmd(cmd->request, tcmd, itn_id, tag); | ||
| 316 | err = scsi_tgt_uspace_send_cmd(cmd, itn_id, scsilun, tag); | ||
| 317 | if (err) | ||
| 318 | cmd_hashlist_del(cmd); | ||
| 319 | |||
| 320 | return err; | ||
| 321 | } | ||
| 322 | EXPORT_SYMBOL_GPL(scsi_tgt_queue_command); | ||
| 323 | |||
| 324 | /* | ||
| 325 | * This is run from a interrupt handler normally and the unmap | ||
| 326 | * needs process context so we must queue | ||
| 327 | */ | ||
| 328 | static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) | ||
| 329 | { | ||
| 330 | struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; | ||
| 331 | |||
| 332 | dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request)); | ||
| 333 | |||
| 334 | scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); | ||
| 335 | |||
| 336 | scsi_release_buffers(cmd); | ||
| 337 | |||
| 338 | queue_work(scsi_tgtd, &tcmd->work); | ||
| 339 | } | ||
| 340 | |||
| 341 | static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd) | ||
| 342 | { | ||
| 343 | struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); | ||
| 344 | int err; | ||
| 345 | |||
| 346 | dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request)); | ||
| 347 | |||
| 348 | err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done); | ||
| 349 | switch (err) { | ||
| 350 | case SCSI_MLQUEUE_HOST_BUSY: | ||
| 351 | case SCSI_MLQUEUE_DEVICE_BUSY: | ||
| 352 | return -EAGAIN; | ||
| 353 | } | ||
| 354 | return 0; | ||
| 355 | } | ||
| 356 | |||
| 357 | /* TODO: test this crap and replace bio_map_user with new interface maybe */ | ||
| 358 | static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | ||
| 359 | unsigned long uaddr, unsigned int len, int rw) | ||
| 360 | { | ||
| 361 | struct request_queue *q = cmd->request->q; | ||
| 362 | struct request *rq = cmd->request; | ||
| 363 | int err; | ||
| 364 | |||
| 365 | dprintk("%lx %u\n", uaddr, len); | ||
| 366 | err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); | ||
| 367 | if (err) { | ||
| 368 | /* | ||
| 369 | * TODO: need to fixup sg_tablesize, max_segment_size, | ||
| 370 | * max_sectors, etc for modern HW and software drivers | ||
| 371 | * where this value is bogus. | ||
| 372 | * | ||
| 373 | * TODO2: we can alloc a reserve buffer of max size | ||
| 374 | * we can handle and do the slow copy path for really large | ||
| 375 | * IO. | ||
| 376 | */ | ||
| 377 | eprintk("Could not handle request of size %u.\n", len); | ||
| 378 | return err; | ||
| 379 | } | ||
| 380 | |||
| 381 | tcmd->bio = rq->bio; | ||
| 382 | err = scsi_init_io(cmd, GFP_KERNEL); | ||
| 383 | if (err) { | ||
| 384 | scsi_release_buffers(cmd); | ||
| 385 | goto unmap_rq; | ||
| 386 | } | ||
| 387 | /* | ||
| 388 | * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the | ||
| 389 | * length for us. | ||
| 390 | */ | ||
| 391 | cmd->sdb.length = blk_rq_bytes(rq); | ||
| 392 | |||
| 393 | return 0; | ||
| 394 | |||
| 395 | unmap_rq: | ||
| 396 | scsi_unmap_user_pages(tcmd); | ||
| 397 | return err; | ||
| 398 | } | ||
| 399 | |||
| 400 | static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, | ||
| 401 | unsigned len) | ||
| 402 | { | ||
| 403 | char __user *p = (char __user *) uaddr; | ||
| 404 | |||
| 405 | if (copy_from_user(cmd->sense_buffer, p, | ||
| 406 | min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) { | ||
| 407 | printk(KERN_ERR "Could not copy the sense buffer\n"); | ||
| 408 | return -EIO; | ||
| 409 | } | ||
| 410 | return 0; | ||
| 411 | } | ||
| 412 | |||
| 413 | static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | ||
| 414 | { | ||
| 415 | struct scsi_tgt_cmd *tcmd; | ||
| 416 | int err; | ||
| 417 | |||
| 418 | err = shost->hostt->eh_abort_handler(cmd); | ||
| 419 | if (err) | ||
| 420 | eprintk("fail to abort %p\n", cmd); | ||
| 421 | |||
| 422 | tcmd = cmd->request->end_io_data; | ||
| 423 | scsi_tgt_cmd_destroy(&tcmd->work); | ||
| 424 | return err; | ||
| 425 | } | ||
| 426 | |||
| 427 | static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag) | ||
| 428 | { | ||
| 429 | struct scsi_tgt_queuedata *qdata = q->queuedata; | ||
| 430 | struct request *rq = NULL; | ||
| 431 | struct list_head *head; | ||
| 432 | struct scsi_tgt_cmd *tcmd; | ||
| 433 | unsigned long flags; | ||
| 434 | |||
| 435 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; | ||
| 436 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | ||
| 437 | list_for_each_entry(tcmd, head, hash_list) { | ||
| 438 | if (tcmd->tag == tag) { | ||
| 439 | rq = tcmd->rq; | ||
| 440 | list_del(&tcmd->hash_list); | ||
| 441 | break; | ||
| 442 | } | ||
| 443 | } | ||
| 444 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | ||
| 445 | |||
| 446 | return rq; | ||
| 447 | } | ||
| 448 | |||
| 449 | int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag, | ||
| 450 | unsigned long uaddr, u32 len, unsigned long sense_uaddr, | ||
| 451 | u32 sense_len, u8 rw) | ||
| 452 | { | ||
| 453 | struct Scsi_Host *shost; | ||
| 454 | struct scsi_cmnd *cmd; | ||
| 455 | struct request *rq; | ||
| 456 | struct scsi_tgt_cmd *tcmd; | ||
| 457 | int err = 0; | ||
| 458 | |||
| 459 | dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag, | ||
| 460 | result, len, uaddr, rw); | ||
| 461 | |||
| 462 | /* TODO: replace with a O(1) alg */ | ||
| 463 | shost = scsi_host_lookup(host_no); | ||
| 464 | if (!shost) { | ||
| 465 | printk(KERN_ERR "Could not find host no %d\n", host_no); | ||
| 466 | return -EINVAL; | ||
| 467 | } | ||
| 468 | |||
| 469 | if (!shost->uspace_req_q) { | ||
| 470 | printk(KERN_ERR "Not target scsi host %d\n", host_no); | ||
| 471 | goto done; | ||
| 472 | } | ||
| 473 | |||
| 474 | rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag); | ||
| 475 | if (!rq) { | ||
| 476 | printk(KERN_ERR "Could not find tag %llu\n", | ||
| 477 | (unsigned long long) tag); | ||
| 478 | err = -EINVAL; | ||
| 479 | goto done; | ||
| 480 | } | ||
| 481 | cmd = rq->special; | ||
| 482 | |||
| 483 | dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n", | ||
| 484 | cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd), | ||
| 485 | rq_data_dir(rq), cmd->cmnd[0]); | ||
| 486 | |||
| 487 | if (result == TASK_ABORTED) { | ||
| 488 | scsi_tgt_abort_cmd(shost, cmd); | ||
| 489 | goto done; | ||
| 490 | } | ||
| 491 | /* | ||
| 492 | * store the userspace values here, the working values are | ||
| 493 | * in the request_* values | ||
| 494 | */ | ||
| 495 | tcmd = cmd->request->end_io_data; | ||
| 496 | cmd->result = result; | ||
| 497 | |||
| 498 | if (cmd->result == SAM_STAT_CHECK_CONDITION) | ||
| 499 | scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len); | ||
| 500 | |||
| 501 | if (len) { | ||
| 502 | err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw); | ||
| 503 | if (err) { | ||
| 504 | /* | ||
| 505 | * user-space daemon bugs or OOM | ||
| 506 | * TODO: we can do better for OOM. | ||
| 507 | */ | ||
| 508 | struct scsi_tgt_queuedata *qdata; | ||
| 509 | struct list_head *head; | ||
| 510 | unsigned long flags; | ||
| 511 | |||
| 512 | eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n", | ||
| 513 | cmd, err, uaddr, len, rw); | ||
| 514 | |||
| 515 | qdata = shost->uspace_req_q->queuedata; | ||
| 516 | head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)]; | ||
| 517 | |||
| 518 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | ||
| 519 | list_add(&tcmd->hash_list, head); | ||
| 520 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | ||
| 521 | |||
| 522 | goto done; | ||
| 523 | } | ||
| 524 | } | ||
| 525 | err = scsi_tgt_transfer_response(cmd); | ||
| 526 | done: | ||
| 527 | scsi_host_put(shost); | ||
| 528 | return err; | ||
| 529 | } | ||
| 530 | |||
| 531 | int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, u64 itn_id, | ||
| 532 | int function, u64 tag, struct scsi_lun *scsilun, | ||
| 533 | void *data) | ||
| 534 | { | ||
| 535 | int err; | ||
| 536 | |||
| 537 | /* TODO: need to retry if this fails. */ | ||
| 538 | err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, itn_id, | ||
| 539 | function, tag, scsilun, data); | ||
| 540 | if (err < 0) | ||
| 541 | eprintk("The task management request lost!\n"); | ||
| 542 | return err; | ||
| 543 | } | ||
| 544 | EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request); | ||
| 545 | |||
| 546 | int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result) | ||
| 547 | { | ||
| 548 | struct Scsi_Host *shost; | ||
| 549 | int err = -EINVAL; | ||
| 550 | |||
| 551 | dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); | ||
| 552 | |||
| 553 | shost = scsi_host_lookup(host_no); | ||
| 554 | if (!shost) { | ||
| 555 | printk(KERN_ERR "Could not find host no %d\n", host_no); | ||
| 556 | return err; | ||
| 557 | } | ||
| 558 | |||
| 559 | if (!shost->uspace_req_q) { | ||
| 560 | printk(KERN_ERR "Not target scsi host %d\n", host_no); | ||
| 561 | goto done; | ||
| 562 | } | ||
| 563 | |||
| 564 | err = shost->transportt->tsk_mgmt_response(shost, itn_id, mid, result); | ||
| 565 | done: | ||
| 566 | scsi_host_put(shost); | ||
| 567 | return err; | ||
| 568 | } | ||
| 569 | |||
| 570 | int scsi_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, | ||
| 571 | char *initiator) | ||
| 572 | { | ||
| 573 | int err; | ||
| 574 | |||
| 575 | /* TODO: need to retry if this fails. */ | ||
| 576 | err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, itn_id, 0, | ||
| 577 | initiator); | ||
| 578 | if (err < 0) | ||
| 579 | eprintk("The i_t_neuxs request lost, %d %llx!\n", | ||
| 580 | shost->host_no, (unsigned long long)itn_id); | ||
| 581 | return err; | ||
| 582 | } | ||
| 583 | EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_create); | ||
| 584 | |||
| 585 | int scsi_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) | ||
| 586 | { | ||
| 587 | int err; | ||
| 588 | |||
| 589 | /* TODO: need to retry if this fails. */ | ||
| 590 | err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, | ||
| 591 | itn_id, 1, NULL); | ||
| 592 | if (err < 0) | ||
| 593 | eprintk("The i_t_neuxs request lost, %d %llx!\n", | ||
| 594 | shost->host_no, (unsigned long long)itn_id); | ||
| 595 | return err; | ||
| 596 | } | ||
| 597 | EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_destroy); | ||
| 598 | |||
| 599 | int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result) | ||
| 600 | { | ||
| 601 | struct Scsi_Host *shost; | ||
| 602 | int err = -EINVAL; | ||
| 603 | |||
| 604 | dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); | ||
| 605 | |||
| 606 | shost = scsi_host_lookup(host_no); | ||
| 607 | if (!shost) { | ||
| 608 | printk(KERN_ERR "Could not find host no %d\n", host_no); | ||
| 609 | return err; | ||
| 610 | } | ||
| 611 | |||
| 612 | if (!shost->uspace_req_q) { | ||
| 613 | printk(KERN_ERR "Not target scsi host %d\n", host_no); | ||
| 614 | goto done; | ||
| 615 | } | ||
| 616 | |||
| 617 | err = shost->transportt->it_nexus_response(shost, itn_id, result); | ||
| 618 | done: | ||
| 619 | scsi_host_put(shost); | ||
| 620 | return err; | ||
| 621 | } | ||
| 622 | |||
| 623 | static int __init scsi_tgt_init(void) | ||
| 624 | { | ||
| 625 | int err; | ||
| 626 | |||
| 627 | scsi_tgt_cmd_cache = KMEM_CACHE(scsi_tgt_cmd, 0); | ||
| 628 | if (!scsi_tgt_cmd_cache) | ||
| 629 | return -ENOMEM; | ||
| 630 | |||
| 631 | scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1); | ||
| 632 | if (!scsi_tgtd) { | ||
| 633 | err = -ENOMEM; | ||
| 634 | goto free_kmemcache; | ||
| 635 | } | ||
| 636 | |||
| 637 | err = scsi_tgt_if_init(); | ||
| 638 | if (err) | ||
| 639 | goto destroy_wq; | ||
| 640 | |||
| 641 | return 0; | ||
| 642 | |||
| 643 | destroy_wq: | ||
| 644 | destroy_workqueue(scsi_tgtd); | ||
| 645 | free_kmemcache: | ||
| 646 | kmem_cache_destroy(scsi_tgt_cmd_cache); | ||
| 647 | return err; | ||
| 648 | } | ||
| 649 | |||
| 650 | static void __exit scsi_tgt_exit(void) | ||
| 651 | { | ||
| 652 | destroy_workqueue(scsi_tgtd); | ||
| 653 | scsi_tgt_if_exit(); | ||
| 654 | kmem_cache_destroy(scsi_tgt_cmd_cache); | ||
| 655 | } | ||
| 656 | |||
| 657 | module_init(scsi_tgt_init); | ||
| 658 | module_exit(scsi_tgt_exit); | ||
| 659 | |||
| 660 | MODULE_DESCRIPTION("SCSI target core"); | ||
| 661 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h deleted file mode 100644 index fe4c62177f78..000000000000 --- a/drivers/scsi/scsi_tgt_priv.h +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | struct scsi_cmnd; | ||
| 2 | struct scsi_lun; | ||
| 3 | struct Scsi_Host; | ||
| 4 | struct task_struct; | ||
| 5 | |||
| 6 | /* tmp - will replace with SCSI logging stuff */ | ||
| 7 | #define eprintk(fmt, args...) \ | ||
| 8 | do { \ | ||
| 9 | printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ | ||
| 10 | } while (0) | ||
| 11 | |||
| 12 | #define dprintk(fmt, args...) | ||
| 13 | /* #define dprintk eprintk */ | ||
| 14 | |||
| 15 | extern void scsi_tgt_if_exit(void); | ||
| 16 | extern int scsi_tgt_if_init(void); | ||
| 17 | |||
| 18 | extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 it_nexus_id, | ||
| 19 | struct scsi_lun *lun, u64 tag); | ||
| 20 | extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 it_nexus_id, | ||
| 21 | u64 tag); | ||
| 22 | extern int scsi_tgt_kspace_exec(int host_no, u64 it_nexus_id, int result, u64 tag, | ||
| 23 | unsigned long uaddr, u32 len, | ||
| 24 | unsigned long sense_uaddr, u32 sense_len, u8 rw); | ||
| 25 | extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 it_nexus_id, | ||
| 26 | int function, u64 tag, | ||
| 27 | struct scsi_lun *scsilun, void *data); | ||
| 28 | extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 it_nexus_id, | ||
| 29 | u64 mid, int result); | ||
| 30 | extern int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 it_nexus_id, | ||
| 31 | int function, char *initiator); | ||
| 32 | extern int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 it_nexus_id, int result); | ||
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 2bea4f0b684a..503594e5f76d 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c | |||
| @@ -28,7 +28,7 @@ scsi_trace_misc(struct trace_seq *, unsigned char *, int); | |||
| 28 | static const char * | 28 | static const char * |
| 29 | scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) | 29 | scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) |
| 30 | { | 30 | { |
| 31 | const char *ret = p->buffer + p->len; | 31 | const char *ret = trace_seq_buffer_ptr(p); |
| 32 | sector_t lba = 0, txlen = 0; | 32 | sector_t lba = 0, txlen = 0; |
| 33 | 33 | ||
| 34 | lba |= ((cdb[1] & 0x1F) << 16); | 34 | lba |= ((cdb[1] & 0x1F) << 16); |
| @@ -46,7 +46,7 @@ scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 46 | static const char * | 46 | static const char * |
| 47 | scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) | 47 | scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) |
| 48 | { | 48 | { |
| 49 | const char *ret = p->buffer + p->len; | 49 | const char *ret = trace_seq_buffer_ptr(p); |
| 50 | sector_t lba = 0, txlen = 0; | 50 | sector_t lba = 0, txlen = 0; |
| 51 | 51 | ||
| 52 | lba |= (cdb[2] << 24); | 52 | lba |= (cdb[2] << 24); |
| @@ -71,7 +71,7 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 71 | static const char * | 71 | static const char * |
| 72 | scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) | 72 | scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) |
| 73 | { | 73 | { |
| 74 | const char *ret = p->buffer + p->len; | 74 | const char *ret = trace_seq_buffer_ptr(p); |
| 75 | sector_t lba = 0, txlen = 0; | 75 | sector_t lba = 0, txlen = 0; |
| 76 | 76 | ||
| 77 | lba |= (cdb[2] << 24); | 77 | lba |= (cdb[2] << 24); |
| @@ -94,7 +94,7 @@ scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 94 | static const char * | 94 | static const char * |
| 95 | scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) | 95 | scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) |
| 96 | { | 96 | { |
| 97 | const char *ret = p->buffer + p->len; | 97 | const char *ret = trace_seq_buffer_ptr(p); |
| 98 | sector_t lba = 0, txlen = 0; | 98 | sector_t lba = 0, txlen = 0; |
| 99 | 99 | ||
| 100 | lba |= ((u64)cdb[2] << 56); | 100 | lba |= ((u64)cdb[2] << 56); |
| @@ -125,7 +125,7 @@ scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 125 | static const char * | 125 | static const char * |
| 126 | scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) | 126 | scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) |
| 127 | { | 127 | { |
| 128 | const char *ret = p->buffer + p->len, *cmd; | 128 | const char *ret = trace_seq_buffer_ptr(p), *cmd; |
| 129 | sector_t lba = 0, txlen = 0; | 129 | sector_t lba = 0, txlen = 0; |
| 130 | u32 ei_lbrt = 0; | 130 | u32 ei_lbrt = 0; |
| 131 | 131 | ||
| @@ -180,7 +180,7 @@ out: | |||
| 180 | static const char * | 180 | static const char * |
| 181 | scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) | 181 | scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) |
| 182 | { | 182 | { |
| 183 | const char *ret = p->buffer + p->len; | 183 | const char *ret = trace_seq_buffer_ptr(p); |
| 184 | unsigned int regions = cdb[7] << 8 | cdb[8]; | 184 | unsigned int regions = cdb[7] << 8 | cdb[8]; |
| 185 | 185 | ||
| 186 | trace_seq_printf(p, "regions=%u", (regions - 8) / 16); | 186 | trace_seq_printf(p, "regions=%u", (regions - 8) / 16); |
| @@ -192,7 +192,7 @@ scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 192 | static const char * | 192 | static const char * |
| 193 | scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) | 193 | scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) |
| 194 | { | 194 | { |
| 195 | const char *ret = p->buffer + p->len, *cmd; | 195 | const char *ret = trace_seq_buffer_ptr(p), *cmd; |
| 196 | sector_t lba = 0; | 196 | sector_t lba = 0; |
| 197 | u32 alloc_len = 0; | 197 | u32 alloc_len = 0; |
| 198 | 198 | ||
| @@ -247,7 +247,7 @@ scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) | |||
| 247 | static const char * | 247 | static const char * |
| 248 | scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) | 248 | scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) |
| 249 | { | 249 | { |
| 250 | const char *ret = p->buffer + p->len; | 250 | const char *ret = trace_seq_buffer_ptr(p); |
| 251 | 251 | ||
| 252 | trace_seq_printf(p, "-"); | 252 | trace_seq_printf(p, "-"); |
| 253 | trace_seq_putc(p, 0); | 253 | trace_seq_putc(p, 0); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 521f5838594b..5d6f348eb3d8 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | #include <scsi/scsi_netlink_fc.h> | 39 | #include <scsi/scsi_netlink_fc.h> |
| 40 | #include <scsi/scsi_bsg_fc.h> | 40 | #include <scsi/scsi_bsg_fc.h> |
| 41 | #include "scsi_priv.h" | 41 | #include "scsi_priv.h" |
| 42 | #include "scsi_transport_fc_internal.h" | ||
| 43 | 42 | ||
| 44 | static int fc_queue_work(struct Scsi_Host *, struct work_struct *); | 43 | static int fc_queue_work(struct Scsi_Host *, struct work_struct *); |
| 45 | static void fc_vport_sched_delete(struct work_struct *work); | 44 | static void fc_vport_sched_delete(struct work_struct *work); |
| @@ -262,6 +261,10 @@ static const struct { | |||
| 262 | { FC_PORTSPEED_8GBIT, "8 Gbit" }, | 261 | { FC_PORTSPEED_8GBIT, "8 Gbit" }, |
| 263 | { FC_PORTSPEED_16GBIT, "16 Gbit" }, | 262 | { FC_PORTSPEED_16GBIT, "16 Gbit" }, |
| 264 | { FC_PORTSPEED_32GBIT, "32 Gbit" }, | 263 | { FC_PORTSPEED_32GBIT, "32 Gbit" }, |
| 264 | { FC_PORTSPEED_20GBIT, "20 Gbit" }, | ||
| 265 | { FC_PORTSPEED_40GBIT, "40 Gbit" }, | ||
| 266 | { FC_PORTSPEED_50GBIT, "50 Gbit" }, | ||
| 267 | { FC_PORTSPEED_100GBIT, "100 Gbit" }, | ||
| 265 | { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, | 268 | { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, |
| 266 | }; | 269 | }; |
| 267 | fc_bitfield_name_search(port_speed, fc_port_speed_names) | 270 | fc_bitfield_name_search(port_speed, fc_port_speed_names) |
| @@ -2089,7 +2092,7 @@ fc_timed_out(struct scsi_cmnd *scmd) | |||
| 2089 | * on the rport. | 2092 | * on the rport. |
| 2090 | */ | 2093 | */ |
| 2091 | static void | 2094 | static void |
| 2092 | fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun) | 2095 | fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) |
| 2093 | { | 2096 | { |
| 2094 | struct fc_rport *rport; | 2097 | struct fc_rport *rport; |
| 2095 | unsigned long flags; | 2098 | unsigned long flags; |
| @@ -2121,7 +2124,7 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun) | |||
| 2121 | * object as the parent. | 2124 | * object as the parent. |
| 2122 | */ | 2125 | */ |
| 2123 | static int | 2126 | static int |
| 2124 | fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun) | 2127 | fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) |
| 2125 | { | 2128 | { |
| 2126 | uint chlo, chhi; | 2129 | uint chlo, chhi; |
| 2127 | uint tgtlo, tgthi; | 2130 | uint tgtlo, tgthi; |
| @@ -3008,10 +3011,6 @@ fc_remote_port_delete(struct fc_rport *rport) | |||
| 3008 | 3011 | ||
| 3009 | spin_unlock_irqrestore(shost->host_lock, flags); | 3012 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 3010 | 3013 | ||
| 3011 | if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR && | ||
| 3012 | shost->active_mode & MODE_TARGET) | ||
| 3013 | fc_tgt_it_nexus_destroy(shost, (unsigned long)rport); | ||
| 3014 | |||
| 3015 | scsi_target_block(&rport->dev); | 3014 | scsi_target_block(&rport->dev); |
| 3016 | 3015 | ||
| 3017 | /* see if we need to kill io faster than waiting for device loss */ | 3016 | /* see if we need to kill io faster than waiting for device loss */ |
| @@ -3052,7 +3051,6 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) | |||
| 3052 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | 3051 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); |
| 3053 | unsigned long flags; | 3052 | unsigned long flags; |
| 3054 | int create = 0; | 3053 | int create = 0; |
| 3055 | int ret; | ||
| 3056 | 3054 | ||
| 3057 | spin_lock_irqsave(shost->host_lock, flags); | 3055 | spin_lock_irqsave(shost->host_lock, flags); |
| 3058 | if (roles & FC_PORT_ROLE_FCP_TARGET) { | 3056 | if (roles & FC_PORT_ROLE_FCP_TARGET) { |
| @@ -3061,12 +3059,6 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) | |||
| 3061 | create = 1; | 3059 | create = 1; |
| 3062 | } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) | 3060 | } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) |
| 3063 | create = 1; | 3061 | create = 1; |
| 3064 | } else if (shost->active_mode & MODE_TARGET) { | ||
| 3065 | ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport, | ||
| 3066 | (char *)&rport->node_name); | ||
| 3067 | if (ret) | ||
| 3068 | printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n", | ||
| 3069 | ret); | ||
| 3070 | } | 3062 | } |
| 3071 | 3063 | ||
| 3072 | rport->roles = roles; | 3064 | rport->roles = roles; |
diff --git a/drivers/scsi/scsi_transport_fc_internal.h b/drivers/scsi/scsi_transport_fc_internal.h deleted file mode 100644 index e7bfbe751c1f..000000000000 --- a/drivers/scsi/scsi_transport_fc_internal.h +++ /dev/null | |||
| @@ -1,26 +0,0 @@ | |||
| 1 | #include <scsi/scsi_tgt.h> | ||
| 2 | |||
| 3 | #ifdef CONFIG_SCSI_FC_TGT_ATTRS | ||
| 4 | static inline int fc_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, | ||
| 5 | char *initiator) | ||
| 6 | { | ||
| 7 | return scsi_tgt_it_nexus_create(shost, itn_id, initiator); | ||
| 8 | } | ||
| 9 | |||
| 10 | static inline int fc_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) | ||
| 11 | { | ||
| 12 | return scsi_tgt_it_nexus_destroy(shost, itn_id); | ||
| 13 | } | ||
| 14 | #else | ||
| 15 | static inline int fc_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, | ||
| 16 | char *initiator) | ||
| 17 | { | ||
| 18 | return 0; | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline int fc_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) | ||
| 22 | { | ||
| 23 | return 0; | ||
| 24 | } | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0102a2d70dd8..67d43e35693d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
| @@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(iscsi_scan_finished); | |||
| 1780 | struct iscsi_scan_data { | 1780 | struct iscsi_scan_data { |
| 1781 | unsigned int channel; | 1781 | unsigned int channel; |
| 1782 | unsigned int id; | 1782 | unsigned int id; |
| 1783 | unsigned int lun; | 1783 | u64 lun; |
| 1784 | }; | 1784 | }; |
| 1785 | 1785 | ||
| 1786 | static int iscsi_user_scan_session(struct device *dev, void *data) | 1786 | static int iscsi_user_scan_session(struct device *dev, void *data) |
| @@ -1827,7 +1827,7 @@ user_scan_exit: | |||
| 1827 | } | 1827 | } |
| 1828 | 1828 | ||
| 1829 | static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, | 1829 | static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, |
| 1830 | uint id, uint lun) | 1830 | uint id, u64 lun) |
| 1831 | { | 1831 | { |
| 1832 | struct iscsi_scan_data scan_data; | 1832 | struct iscsi_scan_data scan_data; |
| 1833 | 1833 | ||
| @@ -3059,7 +3059,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) | |||
| 3059 | evchap->u.get_chap.host_no = ev->u.get_chap.host_no; | 3059 | evchap->u.get_chap.host_no = ev->u.get_chap.host_no; |
| 3060 | evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; | 3060 | evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx; |
| 3061 | evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; | 3061 | evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries; |
| 3062 | buf = (char *) ((char *)evchap + sizeof(*evchap)); | 3062 | buf = (char *)evchap + sizeof(*evchap); |
| 3063 | memset(buf, 0, chap_buf_size); | 3063 | memset(buf, 0, chap_buf_size); |
| 3064 | 3064 | ||
| 3065 | err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, | 3065 | err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, |
| @@ -3429,7 +3429,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) | |||
| 3429 | char *buf; | 3429 | char *buf; |
| 3430 | 3430 | ||
| 3431 | if (!transport->get_host_stats) | 3431 | if (!transport->get_host_stats) |
| 3432 | return -EINVAL; | 3432 | return -ENOSYS; |
| 3433 | 3433 | ||
| 3434 | priv = iscsi_if_transport_lookup(transport); | 3434 | priv = iscsi_if_transport_lookup(transport); |
| 3435 | if (!priv) | 3435 | if (!priv) |
| @@ -3463,10 +3463,14 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) | |||
| 3463 | evhost_stats->type = nlh->nlmsg_type; | 3463 | evhost_stats->type = nlh->nlmsg_type; |
| 3464 | evhost_stats->u.get_host_stats.host_no = | 3464 | evhost_stats->u.get_host_stats.host_no = |
| 3465 | ev->u.get_host_stats.host_no; | 3465 | ev->u.get_host_stats.host_no; |
| 3466 | buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats)); | 3466 | buf = (char *)evhost_stats + sizeof(*evhost_stats); |
| 3467 | memset(buf, 0, host_stats_size); | 3467 | memset(buf, 0, host_stats_size); |
| 3468 | 3468 | ||
| 3469 | err = transport->get_host_stats(shost, buf, host_stats_size); | 3469 | err = transport->get_host_stats(shost, buf, host_stats_size); |
| 3470 | if (err) { | ||
| 3471 | kfree_skb(skbhost_stats); | ||
| 3472 | goto exit_host_stats; | ||
| 3473 | } | ||
| 3470 | 3474 | ||
| 3471 | actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); | 3475 | actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); |
| 3472 | skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); | 3476 | skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index c341f855fadc..9a058194b9bd 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
| @@ -1705,7 +1705,7 @@ EXPORT_SYMBOL(scsi_is_sas_rphy); | |||
| 1705 | */ | 1705 | */ |
| 1706 | 1706 | ||
| 1707 | static int sas_user_scan(struct Scsi_Host *shost, uint channel, | 1707 | static int sas_user_scan(struct Scsi_Host *shost, uint channel, |
| 1708 | uint id, uint lun) | 1708 | uint id, u64 lun) |
| 1709 | { | 1709 | { |
| 1710 | struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); | 1710 | struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); |
| 1711 | struct sas_rphy *rphy; | 1711 | struct sas_rphy *rphy; |
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 13e898332e45..ae45bd99baed 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <scsi/scsi_transport.h> | 33 | #include <scsi/scsi_transport.h> |
| 34 | #include <scsi/scsi_transport_srp.h> | 34 | #include <scsi/scsi_transport_srp.h> |
| 35 | #include "scsi_priv.h" | 35 | #include "scsi_priv.h" |
| 36 | #include "scsi_transport_srp_internal.h" | ||
| 37 | 36 | ||
| 38 | struct srp_host_attrs { | 37 | struct srp_host_attrs { |
| 39 | atomic_t next_port_id; | 38 | atomic_t next_port_id; |
| @@ -473,7 +472,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport) | |||
| 473 | if (delay > 0) | 472 | if (delay > 0) |
| 474 | queue_delayed_work(system_long_wq, &rport->reconnect_work, | 473 | queue_delayed_work(system_long_wq, &rport->reconnect_work, |
| 475 | 1UL * delay * HZ); | 474 | 1UL * delay * HZ); |
| 476 | if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { | 475 | if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) && |
| 476 | srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { | ||
| 477 | pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), | 477 | pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), |
| 478 | rport->state); | 478 | rport->state); |
| 479 | scsi_target_block(&shost->shost_gendev); | 479 | scsi_target_block(&shost->shost_gendev); |
| @@ -746,18 +746,6 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, | |||
| 746 | return ERR_PTR(ret); | 746 | return ERR_PTR(ret); |
| 747 | } | 747 | } |
| 748 | 748 | ||
| 749 | if (shost->active_mode & MODE_TARGET && | ||
| 750 | ids->roles == SRP_RPORT_ROLE_INITIATOR) { | ||
| 751 | ret = srp_tgt_it_nexus_create(shost, (unsigned long)rport, | ||
| 752 | rport->port_id); | ||
| 753 | if (ret) { | ||
| 754 | device_del(&rport->dev); | ||
| 755 | transport_destroy_device(&rport->dev); | ||
| 756 | put_device(&rport->dev); | ||
| 757 | return ERR_PTR(ret); | ||
| 758 | } | ||
| 759 | } | ||
| 760 | |||
| 761 | transport_add_device(&rport->dev); | 749 | transport_add_device(&rport->dev); |
| 762 | transport_configure_device(&rport->dev); | 750 | transport_configure_device(&rport->dev); |
| 763 | 751 | ||
| @@ -774,11 +762,6 @@ EXPORT_SYMBOL_GPL(srp_rport_add); | |||
| 774 | void srp_rport_del(struct srp_rport *rport) | 762 | void srp_rport_del(struct srp_rport *rport) |
| 775 | { | 763 | { |
| 776 | struct device *dev = &rport->dev; | 764 | struct device *dev = &rport->dev; |
| 777 | struct Scsi_Host *shost = dev_to_shost(dev->parent); | ||
| 778 | |||
| 779 | if (shost->active_mode & MODE_TARGET && | ||
| 780 | rport->roles == SRP_RPORT_ROLE_INITIATOR) | ||
| 781 | srp_tgt_it_nexus_destroy(shost, (unsigned long)rport); | ||
| 782 | 765 | ||
| 783 | transport_remove_device(dev); | 766 | transport_remove_device(dev); |
| 784 | device_del(dev); | 767 | device_del(dev); |
diff --git a/drivers/scsi/scsi_transport_srp_internal.h b/drivers/scsi/scsi_transport_srp_internal.h deleted file mode 100644 index 8a79747f9f3d..000000000000 --- a/drivers/scsi/scsi_transport_srp_internal.h +++ /dev/null | |||
| @@ -1,25 +0,0 @@ | |||
| 1 | #include <scsi/scsi_tgt.h> | ||
| 2 | |||
| 3 | #ifdef CONFIG_SCSI_SRP_TGT_ATTRS | ||
| 4 | static inline int srp_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, | ||
| 5 | char *initiator) | ||
| 6 | { | ||
| 7 | return scsi_tgt_it_nexus_create(shost, itn_id, initiator); | ||
| 8 | } | ||
| 9 | |||
| 10 | static inline int srp_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) | ||
| 11 | { | ||
| 12 | return scsi_tgt_it_nexus_destroy(shost, itn_id); | ||
| 13 | } | ||
| 14 | |||
| 15 | #else | ||
| 16 | static inline int srp_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, | ||
| 17 | char *initiator) | ||
| 18 | { | ||
| 19 | return 0; | ||
| 20 | } | ||
| 21 | static inline int srp_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) | ||
| 22 | { | ||
| 23 | return 0; | ||
| 24 | } | ||
| 25 | #endif | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6825eda1114a..2c2041ca4b70 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -134,6 +134,19 @@ static const char *sd_cache_types[] = { | |||
| 134 | "write back, no read (daft)" | 134 | "write back, no read (daft)" |
| 135 | }; | 135 | }; |
| 136 | 136 | ||
| 137 | static void sd_set_flush_flag(struct scsi_disk *sdkp) | ||
| 138 | { | ||
| 139 | unsigned flush = 0; | ||
| 140 | |||
| 141 | if (sdkp->WCE) { | ||
| 142 | flush |= REQ_FLUSH; | ||
| 143 | if (sdkp->DPOFUA) | ||
| 144 | flush |= REQ_FUA; | ||
| 145 | } | ||
| 146 | |||
| 147 | blk_queue_flush(sdkp->disk->queue, flush); | ||
| 148 | } | ||
| 149 | |||
| 137 | static ssize_t | 150 | static ssize_t |
| 138 | cache_type_store(struct device *dev, struct device_attribute *attr, | 151 | cache_type_store(struct device *dev, struct device_attribute *attr, |
| 139 | const char *buf, size_t count) | 152 | const char *buf, size_t count) |
| @@ -177,6 +190,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, | |||
| 177 | if (sdkp->cache_override) { | 190 | if (sdkp->cache_override) { |
| 178 | sdkp->WCE = wce; | 191 | sdkp->WCE = wce; |
| 179 | sdkp->RCD = rcd; | 192 | sdkp->RCD = rcd; |
| 193 | sd_set_flush_flag(sdkp); | ||
| 180 | return count; | 194 | return count; |
| 181 | } | 195 | } |
| 182 | 196 | ||
| @@ -677,8 +691,10 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
| 677 | * Will issue either UNMAP or WRITE SAME(16) depending on preference | 691 | * Will issue either UNMAP or WRITE SAME(16) depending on preference |
| 678 | * indicated by target device. | 692 | * indicated by target device. |
| 679 | **/ | 693 | **/ |
| 680 | static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | 694 | static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) |
| 681 | { | 695 | { |
| 696 | struct request *rq = cmd->request; | ||
| 697 | struct scsi_device *sdp = cmd->device; | ||
| 682 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 698 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
| 683 | sector_t sector = blk_rq_pos(rq); | 699 | sector_t sector = blk_rq_pos(rq); |
| 684 | unsigned int nr_sectors = blk_rq_sectors(rq); | 700 | unsigned int nr_sectors = blk_rq_sectors(rq); |
| @@ -690,9 +706,6 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 690 | 706 | ||
| 691 | sector >>= ilog2(sdp->sector_size) - 9; | 707 | sector >>= ilog2(sdp->sector_size) - 9; |
| 692 | nr_sectors >>= ilog2(sdp->sector_size) - 9; | 708 | nr_sectors >>= ilog2(sdp->sector_size) - 9; |
| 693 | rq->timeout = SD_TIMEOUT; | ||
| 694 | |||
| 695 | memset(rq->cmd, 0, rq->cmd_len); | ||
| 696 | 709 | ||
| 697 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | 710 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); |
| 698 | if (!page) | 711 | if (!page) |
| @@ -702,9 +715,9 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 702 | case SD_LBP_UNMAP: | 715 | case SD_LBP_UNMAP: |
| 703 | buf = page_address(page); | 716 | buf = page_address(page); |
| 704 | 717 | ||
| 705 | rq->cmd_len = 10; | 718 | cmd->cmd_len = 10; |
| 706 | rq->cmd[0] = UNMAP; | 719 | cmd->cmnd[0] = UNMAP; |
| 707 | rq->cmd[8] = 24; | 720 | cmd->cmnd[8] = 24; |
| 708 | 721 | ||
| 709 | put_unaligned_be16(6 + 16, &buf[0]); | 722 | put_unaligned_be16(6 + 16, &buf[0]); |
| 710 | put_unaligned_be16(16, &buf[2]); | 723 | put_unaligned_be16(16, &buf[2]); |
| @@ -715,23 +728,23 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 715 | break; | 728 | break; |
| 716 | 729 | ||
| 717 | case SD_LBP_WS16: | 730 | case SD_LBP_WS16: |
| 718 | rq->cmd_len = 16; | 731 | cmd->cmd_len = 16; |
| 719 | rq->cmd[0] = WRITE_SAME_16; | 732 | cmd->cmnd[0] = WRITE_SAME_16; |
| 720 | rq->cmd[1] = 0x8; /* UNMAP */ | 733 | cmd->cmnd[1] = 0x8; /* UNMAP */ |
| 721 | put_unaligned_be64(sector, &rq->cmd[2]); | 734 | put_unaligned_be64(sector, &cmd->cmnd[2]); |
| 722 | put_unaligned_be32(nr_sectors, &rq->cmd[10]); | 735 | put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); |
| 723 | 736 | ||
| 724 | len = sdkp->device->sector_size; | 737 | len = sdkp->device->sector_size; |
| 725 | break; | 738 | break; |
| 726 | 739 | ||
| 727 | case SD_LBP_WS10: | 740 | case SD_LBP_WS10: |
| 728 | case SD_LBP_ZERO: | 741 | case SD_LBP_ZERO: |
| 729 | rq->cmd_len = 10; | 742 | cmd->cmd_len = 10; |
| 730 | rq->cmd[0] = WRITE_SAME; | 743 | cmd->cmnd[0] = WRITE_SAME; |
| 731 | if (sdkp->provisioning_mode == SD_LBP_WS10) | 744 | if (sdkp->provisioning_mode == SD_LBP_WS10) |
| 732 | rq->cmd[1] = 0x8; /* UNMAP */ | 745 | cmd->cmnd[1] = 0x8; /* UNMAP */ |
| 733 | put_unaligned_be32(sector, &rq->cmd[2]); | 746 | put_unaligned_be32(sector, &cmd->cmnd[2]); |
| 734 | put_unaligned_be16(nr_sectors, &rq->cmd[7]); | 747 | put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); |
| 735 | 748 | ||
| 736 | len = sdkp->device->sector_size; | 749 | len = sdkp->device->sector_size; |
| 737 | break; | 750 | break; |
| @@ -742,8 +755,21 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 742 | } | 755 | } |
| 743 | 756 | ||
| 744 | rq->completion_data = page; | 757 | rq->completion_data = page; |
| 758 | rq->timeout = SD_TIMEOUT; | ||
| 759 | |||
| 760 | cmd->transfersize = len; | ||
| 761 | cmd->allowed = SD_MAX_RETRIES; | ||
| 762 | |||
| 763 | /* | ||
| 764 | * Initially __data_len is set to the amount of data that needs to be | ||
| 765 | * transferred to the target. This amount depends on whether WRITE SAME | ||
| 766 | * or UNMAP is being used. After the scatterlist has been mapped by | ||
| 767 | * scsi_init_io() we set __data_len to the size of the area to be | ||
| 768 | * discarded on disk. This allows us to report completion on the full | ||
| 769 | * amount of blocks described by the request. | ||
| 770 | */ | ||
| 745 | blk_add_request_payload(rq, page, len); | 771 | blk_add_request_payload(rq, page, len); |
| 746 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 772 | ret = scsi_init_io(cmd, GFP_ATOMIC); |
| 747 | rq->__data_len = nr_bytes; | 773 | rq->__data_len = nr_bytes; |
| 748 | 774 | ||
| 749 | out: | 775 | out: |
| @@ -785,14 +811,15 @@ out: | |||
| 785 | 811 | ||
| 786 | /** | 812 | /** |
| 787 | * sd_setup_write_same_cmnd - write the same data to multiple blocks | 813 | * sd_setup_write_same_cmnd - write the same data to multiple blocks |
| 788 | * @sdp: scsi device to operate one | 814 | * @cmd: command to prepare |
| 789 | * @rq: Request to prepare | ||
| 790 | * | 815 | * |
| 791 | * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on | 816 | * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on |
| 792 | * preference indicated by target device. | 817 | * preference indicated by target device. |
| 793 | **/ | 818 | **/ |
| 794 | static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) | 819 | static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) |
| 795 | { | 820 | { |
| 821 | struct request *rq = cmd->request; | ||
| 822 | struct scsi_device *sdp = cmd->device; | ||
| 796 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 823 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
| 797 | struct bio *bio = rq->bio; | 824 | struct bio *bio = rq->bio; |
| 798 | sector_t sector = blk_rq_pos(rq); | 825 | sector_t sector = blk_rq_pos(rq); |
| @@ -808,53 +835,56 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) | |||
| 808 | sector >>= ilog2(sdp->sector_size) - 9; | 835 | sector >>= ilog2(sdp->sector_size) - 9; |
| 809 | nr_sectors >>= ilog2(sdp->sector_size) - 9; | 836 | nr_sectors >>= ilog2(sdp->sector_size) - 9; |
| 810 | 837 | ||
| 811 | rq->__data_len = sdp->sector_size; | ||
| 812 | rq->timeout = SD_WRITE_SAME_TIMEOUT; | 838 | rq->timeout = SD_WRITE_SAME_TIMEOUT; |
| 813 | memset(rq->cmd, 0, rq->cmd_len); | ||
| 814 | 839 | ||
| 815 | if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { | 840 | if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { |
| 816 | rq->cmd_len = 16; | 841 | cmd->cmd_len = 16; |
| 817 | rq->cmd[0] = WRITE_SAME_16; | 842 | cmd->cmnd[0] = WRITE_SAME_16; |
| 818 | put_unaligned_be64(sector, &rq->cmd[2]); | 843 | put_unaligned_be64(sector, &cmd->cmnd[2]); |
| 819 | put_unaligned_be32(nr_sectors, &rq->cmd[10]); | 844 | put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); |
| 820 | } else { | 845 | } else { |
| 821 | rq->cmd_len = 10; | 846 | cmd->cmd_len = 10; |
| 822 | rq->cmd[0] = WRITE_SAME; | 847 | cmd->cmnd[0] = WRITE_SAME; |
| 823 | put_unaligned_be32(sector, &rq->cmd[2]); | 848 | put_unaligned_be32(sector, &cmd->cmnd[2]); |
| 824 | put_unaligned_be16(nr_sectors, &rq->cmd[7]); | 849 | put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); |
| 825 | } | 850 | } |
| 826 | 851 | ||
| 827 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 852 | cmd->transfersize = sdp->sector_size; |
| 828 | rq->__data_len = nr_bytes; | 853 | cmd->allowed = SD_MAX_RETRIES; |
| 829 | 854 | ||
| 855 | /* | ||
| 856 | * For WRITE_SAME the data transferred in the DATA IN buffer is | ||
| 857 | * different from the amount of data actually written to the target. | ||
| 858 | * | ||
| 859 | * We set up __data_len to the amount of data transferred from the | ||
| 860 | * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list | ||
| 861 | * to transfer a single sector of data first, but then reset it to | ||
| 862 | * the amount of data to be written right after so that the I/O path | ||
| 863 | * knows how much to actually write. | ||
| 864 | */ | ||
| 865 | rq->__data_len = sdp->sector_size; | ||
| 866 | ret = scsi_init_io(cmd, GFP_ATOMIC); | ||
| 867 | rq->__data_len = nr_bytes; | ||
| 830 | return ret; | 868 | return ret; |
| 831 | } | 869 | } |
| 832 | 870 | ||
| 833 | static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) | 871 | static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) |
| 834 | { | 872 | { |
| 835 | rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; | 873 | struct request *rq = cmd->request; |
| 836 | rq->retries = SD_MAX_RETRIES; | ||
| 837 | rq->cmd[0] = SYNCHRONIZE_CACHE; | ||
| 838 | rq->cmd_len = 10; | ||
| 839 | |||
| 840 | return scsi_setup_blk_pc_cmnd(sdp, rq); | ||
| 841 | } | ||
| 842 | 874 | ||
| 843 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) | 875 | /* flush requests don't perform I/O, zero the S/G table */ |
| 844 | { | 876 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
| 845 | struct request *rq = SCpnt->request; | ||
| 846 | 877 | ||
| 847 | if (rq->cmd_flags & REQ_DISCARD) | 878 | cmd->cmnd[0] = SYNCHRONIZE_CACHE; |
| 848 | __free_page(rq->completion_data); | 879 | cmd->cmd_len = 10; |
| 880 | cmd->transfersize = 0; | ||
| 881 | cmd->allowed = SD_MAX_RETRIES; | ||
| 849 | 882 | ||
| 850 | if (SCpnt->cmnd != rq->cmd) { | 883 | rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; |
| 851 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | 884 | return BLKPREP_OK; |
| 852 | SCpnt->cmnd = NULL; | ||
| 853 | SCpnt->cmd_len = 0; | ||
| 854 | } | ||
| 855 | } | 885 | } |
| 856 | 886 | ||
| 857 | static int sd_init_command(struct scsi_cmnd *SCpnt) | 887 | static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) |
| 858 | { | 888 | { |
| 859 | struct request *rq = SCpnt->request; | 889 | struct request *rq = SCpnt->request; |
| 860 | struct scsi_device *sdp = SCpnt->device; | 890 | struct scsi_device *sdp = SCpnt->device; |
| @@ -866,21 +896,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) | |||
| 866 | int ret, host_dif; | 896 | int ret, host_dif; |
| 867 | unsigned char protect; | 897 | unsigned char protect; |
| 868 | 898 | ||
| 869 | /* | 899 | ret = scsi_init_io(SCpnt, GFP_ATOMIC); |
| 870 | * Discard request come in as REQ_TYPE_FS but we turn them into | ||
| 871 | * block PC requests to make life easier. | ||
| 872 | */ | ||
| 873 | if (rq->cmd_flags & REQ_DISCARD) { | ||
| 874 | ret = sd_setup_discard_cmnd(sdp, rq); | ||
| 875 | goto out; | ||
| 876 | } else if (rq->cmd_flags & REQ_WRITE_SAME) { | ||
| 877 | ret = sd_setup_write_same_cmnd(sdp, rq); | ||
| 878 | goto out; | ||
| 879 | } else if (rq->cmd_flags & REQ_FLUSH) { | ||
| 880 | ret = scsi_setup_flush_cmnd(sdp, rq); | ||
| 881 | goto out; | ||
| 882 | } | ||
| 883 | ret = scsi_setup_fs_cmnd(sdp, rq); | ||
| 884 | if (ret != BLKPREP_OK) | 900 | if (ret != BLKPREP_OK) |
| 885 | goto out; | 901 | goto out; |
| 886 | SCpnt = rq->special; | 902 | SCpnt = rq->special; |
| @@ -976,18 +992,13 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) | |||
| 976 | } | 992 | } |
| 977 | } | 993 | } |
| 978 | if (rq_data_dir(rq) == WRITE) { | 994 | if (rq_data_dir(rq) == WRITE) { |
| 979 | if (!sdp->writeable) { | ||
| 980 | goto out; | ||
| 981 | } | ||
| 982 | SCpnt->cmnd[0] = WRITE_6; | 995 | SCpnt->cmnd[0] = WRITE_6; |
| 983 | SCpnt->sc_data_direction = DMA_TO_DEVICE; | ||
| 984 | 996 | ||
| 985 | if (blk_integrity_rq(rq)) | 997 | if (blk_integrity_rq(rq)) |
| 986 | sd_dif_prepare(rq, block, sdp->sector_size); | 998 | sd_dif_prepare(rq, block, sdp->sector_size); |
| 987 | 999 | ||
| 988 | } else if (rq_data_dir(rq) == READ) { | 1000 | } else if (rq_data_dir(rq) == READ) { |
| 989 | SCpnt->cmnd[0] = READ_6; | 1001 | SCpnt->cmnd[0] = READ_6; |
| 990 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | ||
| 991 | } else { | 1002 | } else { |
| 992 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); | 1003 | scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); |
| 993 | goto out; | 1004 | goto out; |
| @@ -1042,7 +1053,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) | |||
| 1042 | SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff; | 1053 | SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff; |
| 1043 | SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff; | 1054 | SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff; |
| 1044 | SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; | 1055 | SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; |
| 1045 | } else if (sdp->use_16_for_rw) { | 1056 | } else if (sdp->use_16_for_rw || (this_count > 0xffff)) { |
| 1046 | SCpnt->cmnd[0] += READ_16 - READ_6; | 1057 | SCpnt->cmnd[0] += READ_16 - READ_6; |
| 1047 | SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); | 1058 | SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); |
| 1048 | SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; | 1059 | SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; |
| @@ -1061,9 +1072,6 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) | |||
| 1061 | } else if ((this_count > 0xff) || (block > 0x1fffff) || | 1072 | } else if ((this_count > 0xff) || (block > 0x1fffff) || |
| 1062 | scsi_device_protection(SCpnt->device) || | 1073 | scsi_device_protection(SCpnt->device) || |
| 1063 | SCpnt->device->use_10_for_rw) { | 1074 | SCpnt->device->use_10_for_rw) { |
| 1064 | if (this_count > 0xffff) | ||
| 1065 | this_count = 0xffff; | ||
| 1066 | |||
| 1067 | SCpnt->cmnd[0] += READ_10 - READ_6; | 1075 | SCpnt->cmnd[0] += READ_10 - READ_6; |
| 1068 | SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); | 1076 | SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); |
| 1069 | SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; | 1077 | SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; |
| @@ -1116,6 +1124,34 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) | |||
| 1116 | return ret; | 1124 | return ret; |
| 1117 | } | 1125 | } |
| 1118 | 1126 | ||
| 1127 | static int sd_init_command(struct scsi_cmnd *cmd) | ||
| 1128 | { | ||
| 1129 | struct request *rq = cmd->request; | ||
| 1130 | |||
| 1131 | if (rq->cmd_flags & REQ_DISCARD) | ||
| 1132 | return sd_setup_discard_cmnd(cmd); | ||
| 1133 | else if (rq->cmd_flags & REQ_WRITE_SAME) | ||
| 1134 | return sd_setup_write_same_cmnd(cmd); | ||
| 1135 | else if (rq->cmd_flags & REQ_FLUSH) | ||
| 1136 | return sd_setup_flush_cmnd(cmd); | ||
| 1137 | else | ||
| 1138 | return sd_setup_read_write_cmnd(cmd); | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) | ||
| 1142 | { | ||
| 1143 | struct request *rq = SCpnt->request; | ||
| 1144 | |||
| 1145 | if (rq->cmd_flags & REQ_DISCARD) | ||
| 1146 | __free_page(rq->completion_data); | ||
| 1147 | |||
| 1148 | if (SCpnt->cmnd != rq->cmd) { | ||
| 1149 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | ||
| 1150 | SCpnt->cmnd = NULL; | ||
| 1151 | SCpnt->cmd_len = 0; | ||
| 1152 | } | ||
| 1153 | } | ||
| 1154 | |||
| 1119 | /** | 1155 | /** |
| 1120 | * sd_open - open a scsi disk device | 1156 | * sd_open - open a scsi disk device |
| 1121 | * @inode: only i_rdev member may be used | 1157 | * @inode: only i_rdev member may be used |
| @@ -2225,7 +2261,11 @@ got_data: | |||
| 2225 | } | 2261 | } |
| 2226 | } | 2262 | } |
| 2227 | 2263 | ||
| 2228 | sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff); | 2264 | if (sdkp->capacity > 0xffffffff) { |
| 2265 | sdp->use_16_for_rw = 1; | ||
| 2266 | sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS; | ||
| 2267 | } else | ||
| 2268 | sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS; | ||
| 2229 | 2269 | ||
| 2230 | /* Rescale capacity to 512-byte units */ | 2270 | /* Rescale capacity to 512-byte units */ |
| 2231 | if (sector_size == 4096) | 2271 | if (sector_size == 4096) |
| @@ -2540,6 +2580,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
| 2540 | { | 2580 | { |
| 2541 | unsigned int sector_sz = sdkp->device->sector_size; | 2581 | unsigned int sector_sz = sdkp->device->sector_size; |
| 2542 | const int vpd_len = 64; | 2582 | const int vpd_len = 64; |
| 2583 | u32 max_xfer_length; | ||
| 2543 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); | 2584 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); |
| 2544 | 2585 | ||
| 2545 | if (!buffer || | 2586 | if (!buffer || |
| @@ -2547,6 +2588,10 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
| 2547 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) | 2588 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) |
| 2548 | goto out; | 2589 | goto out; |
| 2549 | 2590 | ||
| 2591 | max_xfer_length = get_unaligned_be32(&buffer[8]); | ||
| 2592 | if (max_xfer_length) | ||
| 2593 | sdkp->max_xfer_blocks = max_xfer_length; | ||
| 2594 | |||
| 2550 | blk_queue_io_min(sdkp->disk->queue, | 2595 | blk_queue_io_min(sdkp->disk->queue, |
| 2551 | get_unaligned_be16(&buffer[6]) * sector_sz); | 2596 | get_unaligned_be16(&buffer[6]) * sector_sz); |
| 2552 | blk_queue_io_opt(sdkp->disk->queue, | 2597 | blk_queue_io_opt(sdkp->disk->queue, |
| @@ -2681,6 +2726,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) | |||
| 2681 | 2726 | ||
| 2682 | static int sd_try_extended_inquiry(struct scsi_device *sdp) | 2727 | static int sd_try_extended_inquiry(struct scsi_device *sdp) |
| 2683 | { | 2728 | { |
| 2729 | /* Attempt VPD inquiry if the device blacklist explicitly calls | ||
| 2730 | * for it. | ||
| 2731 | */ | ||
| 2732 | if (sdp->try_vpd_pages) | ||
| 2733 | return 1; | ||
| 2684 | /* | 2734 | /* |
| 2685 | * Although VPD inquiries can go to SCSI-2 type devices, | 2735 | * Although VPD inquiries can go to SCSI-2 type devices, |
| 2686 | * some USB ones crash on receiving them, and the pages | 2736 | * some USB ones crash on receiving them, and the pages |
| @@ -2701,7 +2751,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2701 | struct scsi_disk *sdkp = scsi_disk(disk); | 2751 | struct scsi_disk *sdkp = scsi_disk(disk); |
| 2702 | struct scsi_device *sdp = sdkp->device; | 2752 | struct scsi_device *sdp = sdkp->device; |
| 2703 | unsigned char *buffer; | 2753 | unsigned char *buffer; |
| 2704 | unsigned flush = 0; | 2754 | unsigned int max_xfer; |
| 2705 | 2755 | ||
| 2706 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, | 2756 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, |
| 2707 | "sd_revalidate_disk\n")); | 2757 | "sd_revalidate_disk\n")); |
| @@ -2747,14 +2797,12 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2747 | * We now have all cache related info, determine how we deal | 2797 | * We now have all cache related info, determine how we deal |
| 2748 | * with flush requests. | 2798 | * with flush requests. |
| 2749 | */ | 2799 | */ |
| 2750 | if (sdkp->WCE) { | 2800 | sd_set_flush_flag(sdkp); |
| 2751 | flush |= REQ_FLUSH; | ||
| 2752 | if (sdkp->DPOFUA) | ||
| 2753 | flush |= REQ_FUA; | ||
| 2754 | } | ||
| 2755 | |||
| 2756 | blk_queue_flush(sdkp->disk->queue, flush); | ||
| 2757 | 2801 | ||
| 2802 | max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), | ||
| 2803 | sdkp->max_xfer_blocks); | ||
| 2804 | max_xfer <<= ilog2(sdp->sector_size) - 9; | ||
| 2805 | blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); | ||
| 2758 | set_capacity(disk, sdkp->capacity); | 2806 | set_capacity(disk, sdkp->capacity); |
| 2759 | sd_config_write_same(sdkp); | 2807 | sd_config_write_same(sdkp); |
| 2760 | kfree(buffer); | 2808 | kfree(buffer); |
| @@ -3208,12 +3256,14 @@ static int __init init_sd(void) | |||
| 3208 | 0, 0, NULL); | 3256 | 0, 0, NULL); |
| 3209 | if (!sd_cdb_cache) { | 3257 | if (!sd_cdb_cache) { |
| 3210 | printk(KERN_ERR "sd: can't init extended cdb cache\n"); | 3258 | printk(KERN_ERR "sd: can't init extended cdb cache\n"); |
| 3259 | err = -ENOMEM; | ||
| 3211 | goto err_out_class; | 3260 | goto err_out_class; |
| 3212 | } | 3261 | } |
| 3213 | 3262 | ||
| 3214 | sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); | 3263 | sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); |
| 3215 | if (!sd_cdb_pool) { | 3264 | if (!sd_cdb_pool) { |
| 3216 | printk(KERN_ERR "sd: can't init extended cdb pool\n"); | 3265 | printk(KERN_ERR "sd: can't init extended cdb pool\n"); |
| 3266 | err = -ENOMEM; | ||
| 3217 | goto err_out_cache; | 3267 | goto err_out_cache; |
| 3218 | } | 3268 | } |
| 3219 | 3269 | ||
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 620871efbf0a..4c3ab8377fd3 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -44,6 +44,8 @@ enum { | |||
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | enum { | 46 | enum { |
| 47 | SD_DEF_XFER_BLOCKS = 0xffff, | ||
| 48 | SD_MAX_XFER_BLOCKS = 0xffffffff, | ||
| 47 | SD_MAX_WS10_BLOCKS = 0xffff, | 49 | SD_MAX_WS10_BLOCKS = 0xffff, |
| 48 | SD_MAX_WS16_BLOCKS = 0x7fffff, | 50 | SD_MAX_WS16_BLOCKS = 0x7fffff, |
| 49 | }; | 51 | }; |
| @@ -64,6 +66,7 @@ struct scsi_disk { | |||
| 64 | struct gendisk *disk; | 66 | struct gendisk *disk; |
| 65 | atomic_t openers; | 67 | atomic_t openers; |
| 66 | sector_t capacity; /* size in 512-byte sectors */ | 68 | sector_t capacity; /* size in 512-byte sectors */ |
| 69 | u32 max_xfer_blocks; | ||
| 67 | u32 max_ws_blocks; | 70 | u32 max_ws_blocks; |
| 68 | u32 max_unmap_blocks; | 71 | u32 max_unmap_blocks; |
| 69 | u32 unmap_granularity; | 72 | u32 unmap_granularity; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 53268aaba559..01cf88888797 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
| @@ -7,9 +7,7 @@ | |||
| 7 | * Original driver (sg.c): | 7 | * Original driver (sg.c): |
| 8 | * Copyright (C) 1992 Lawrence Foard | 8 | * Copyright (C) 1992 Lawrence Foard |
| 9 | * Version 2 and 3 extensions to driver: | 9 | * Version 2 and 3 extensions to driver: |
| 10 | * Copyright (C) 1998 - 2005 Douglas Gilbert | 10 | * Copyright (C) 1998 - 2014 Douglas Gilbert |
| 11 | * | ||
| 12 | * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support | ||
| 13 | * | 11 | * |
| 14 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
| 15 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
| @@ -18,11 +16,11 @@ | |||
| 18 | * | 16 | * |
| 19 | */ | 17 | */ |
| 20 | 18 | ||
| 21 | static int sg_version_num = 30534; /* 2 digits for each component */ | 19 | static int sg_version_num = 30536; /* 2 digits for each component */ |
| 22 | #define SG_VERSION_STR "3.5.34" | 20 | #define SG_VERSION_STR "3.5.36" |
| 23 | 21 | ||
| 24 | /* | 22 | /* |
| 25 | * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: | 23 | * D. P. Gilbert (dgilbert@interlog.com), notes: |
| 26 | * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First | 24 | * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First |
| 27 | * the kernel/module needs to be built with CONFIG_SCSI_LOGGING | 25 | * the kernel/module needs to be built with CONFIG_SCSI_LOGGING |
| 28 | * (otherwise the macros compile to empty statements). | 26 | * (otherwise the macros compile to empty statements). |
| @@ -51,6 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ | |||
| 51 | #include <linux/delay.h> | 49 | #include <linux/delay.h> |
| 52 | #include <linux/blktrace_api.h> | 50 | #include <linux/blktrace_api.h> |
| 53 | #include <linux/mutex.h> | 51 | #include <linux/mutex.h> |
| 52 | #include <linux/atomic.h> | ||
| 54 | #include <linux/ratelimit.h> | 53 | #include <linux/ratelimit.h> |
| 55 | 54 | ||
| 56 | #include "scsi.h" | 55 | #include "scsi.h" |
| @@ -64,7 +63,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ | |||
| 64 | 63 | ||
| 65 | #ifdef CONFIG_SCSI_PROC_FS | 64 | #ifdef CONFIG_SCSI_PROC_FS |
| 66 | #include <linux/proc_fs.h> | 65 | #include <linux/proc_fs.h> |
| 67 | static char *sg_version_date = "20061027"; | 66 | static char *sg_version_date = "20140603"; |
| 68 | 67 | ||
| 69 | static int sg_proc_init(void); | 68 | static int sg_proc_init(void); |
| 70 | static void sg_proc_cleanup(void); | 69 | static void sg_proc_cleanup(void); |
| @@ -74,6 +73,12 @@ static void sg_proc_cleanup(void); | |||
| 74 | 73 | ||
| 75 | #define SG_MAX_DEVS 32768 | 74 | #define SG_MAX_DEVS 32768 |
| 76 | 75 | ||
| 76 | /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type | ||
| 77 | * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater | ||
| 78 | * than 16 bytes are "variable length" whose length is a multiple of 4 | ||
| 79 | */ | ||
| 80 | #define SG_MAX_CDB_SIZE 252 | ||
| 81 | |||
| 77 | /* | 82 | /* |
| 78 | * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) | 83 | * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) |
| 79 | * Then when using 32 bit integers x * m may overflow during the calculation. | 84 | * Then when using 32 bit integers x * m may overflow during the calculation. |
| @@ -102,18 +107,16 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; | |||
| 102 | 107 | ||
| 103 | #define SG_SECTOR_SZ 512 | 108 | #define SG_SECTOR_SZ 512 |
| 104 | 109 | ||
| 105 | static int sg_add(struct device *, struct class_interface *); | 110 | static int sg_add_device(struct device *, struct class_interface *); |
| 106 | static void sg_remove(struct device *, struct class_interface *); | 111 | static void sg_remove_device(struct device *, struct class_interface *); |
| 107 | |||
| 108 | static DEFINE_SPINLOCK(sg_open_exclusive_lock); | ||
| 109 | 112 | ||
| 110 | static DEFINE_IDR(sg_index_idr); | 113 | static DEFINE_IDR(sg_index_idr); |
| 111 | static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock | 114 | static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock |
| 112 | file descriptor list for device */ | 115 | file descriptor list for device */ |
| 113 | 116 | ||
| 114 | static struct class_interface sg_interface = { | 117 | static struct class_interface sg_interface = { |
| 115 | .add_dev = sg_add, | 118 | .add_dev = sg_add_device, |
| 116 | .remove_dev = sg_remove, | 119 | .remove_dev = sg_remove_device, |
| 117 | }; | 120 | }; |
| 118 | 121 | ||
| 119 | typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ | 122 | typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ |
| @@ -146,8 +149,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ | |||
| 146 | } Sg_request; | 149 | } Sg_request; |
| 147 | 150 | ||
| 148 | typedef struct sg_fd { /* holds the state of a file descriptor */ | 151 | typedef struct sg_fd { /* holds the state of a file descriptor */ |
| 149 | /* sfd_siblings is protected by sg_index_lock */ | 152 | struct list_head sfd_siblings; /* protected by device's sfd_lock */ |
| 150 | struct list_head sfd_siblings; | ||
| 151 | struct sg_device *parentdp; /* owning device */ | 153 | struct sg_device *parentdp; /* owning device */ |
| 152 | wait_queue_head_t read_wait; /* queue read until command done */ | 154 | wait_queue_head_t read_wait; /* queue read until command done */ |
| 153 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ | 155 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ |
| @@ -161,7 +163,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ | |||
| 161 | char low_dma; /* as in parent but possibly overridden to 1 */ | 163 | char low_dma; /* as in parent but possibly overridden to 1 */ |
| 162 | char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ | 164 | char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ |
| 163 | char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ | 165 | char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ |
| 164 | char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ | 166 | unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ |
| 165 | char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ | 167 | char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ |
| 166 | char mmap_called; /* 0 -> mmap() never called on this fd */ | 168 | char mmap_called; /* 0 -> mmap() never called on this fd */ |
| 167 | struct kref f_ref; | 169 | struct kref f_ref; |
| @@ -170,14 +172,15 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ | |||
| 170 | 172 | ||
| 171 | typedef struct sg_device { /* holds the state of each scsi generic device */ | 173 | typedef struct sg_device { /* holds the state of each scsi generic device */ |
| 172 | struct scsi_device *device; | 174 | struct scsi_device *device; |
| 173 | wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ | 175 | wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ |
| 176 | struct mutex open_rel_lock; /* held when in open() or release() */ | ||
| 174 | int sg_tablesize; /* adapter's max scatter-gather table size */ | 177 | int sg_tablesize; /* adapter's max scatter-gather table size */ |
| 175 | u32 index; /* device index number */ | 178 | u32 index; /* device index number */ |
| 176 | /* sfds is protected by sg_index_lock */ | ||
| 177 | struct list_head sfds; | 179 | struct list_head sfds; |
| 178 | volatile char detached; /* 0->attached, 1->detached pending removal */ | 180 | rwlock_t sfd_lock; /* protect access to sfd list */ |
| 179 | /* exclude protected by sg_open_exclusive_lock */ | 181 | atomic_t detaching; /* 0->device usable, 1->device detaching */ |
| 180 | char exclude; /* opened for exclusive access */ | 182 | bool exclude; /* 1->open(O_EXCL) succeeded and is active */ |
| 183 | int open_cnt; /* count of opens (perhaps < num(sfds) ) */ | ||
| 181 | char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ | 184 | char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ |
| 182 | struct gendisk *disk; | 185 | struct gendisk *disk; |
| 183 | struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ | 186 | struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ |
| @@ -197,24 +200,28 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, | |||
| 197 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, | 200 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, |
| 198 | unsigned char *cmnd, int timeout, int blocking); | 201 | unsigned char *cmnd, int timeout, int blocking); |
| 199 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); | 202 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); |
| 200 | static void sg_remove_scat(Sg_scatter_hold * schp); | 203 | static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); |
| 201 | static void sg_build_reserve(Sg_fd * sfp, int req_size); | 204 | static void sg_build_reserve(Sg_fd * sfp, int req_size); |
| 202 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); | 205 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
| 203 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); | 206 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
| 204 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); | 207 | static Sg_fd *sg_add_sfp(Sg_device * sdp); |
| 205 | static void sg_remove_sfp(struct kref *); | 208 | static void sg_remove_sfp(struct kref *); |
| 206 | static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); | 209 | static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); |
| 207 | static Sg_request *sg_add_request(Sg_fd * sfp); | 210 | static Sg_request *sg_add_request(Sg_fd * sfp); |
| 208 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); | 211 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); |
| 209 | static int sg_res_in_use(Sg_fd * sfp); | 212 | static int sg_res_in_use(Sg_fd * sfp); |
| 210 | static Sg_device *sg_get_dev(int dev); | 213 | static Sg_device *sg_get_dev(int dev); |
| 211 | static void sg_put_dev(Sg_device *sdp); | 214 | static void sg_device_destroy(struct kref *kref); |
| 212 | 215 | ||
| 213 | #define SZ_SG_HEADER sizeof(struct sg_header) | 216 | #define SZ_SG_HEADER sizeof(struct sg_header) |
| 214 | #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) | 217 | #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) |
| 215 | #define SZ_SG_IOVEC sizeof(sg_iovec_t) | 218 | #define SZ_SG_IOVEC sizeof(sg_iovec_t) |
| 216 | #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) | 219 | #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) |
| 217 | 220 | ||
| 221 | #define sg_printk(prefix, sdp, fmt, a...) \ | ||
| 222 | sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \ | ||
| 223 | (sdp)->disk->disk_name, ##a) | ||
| 224 | |||
| 218 | static int sg_allow_access(struct file *filp, unsigned char *cmd) | 225 | static int sg_allow_access(struct file *filp, unsigned char *cmd) |
| 219 | { | 226 | { |
| 220 | struct sg_fd *sfp = filp->private_data; | 227 | struct sg_fd *sfp = filp->private_data; |
| @@ -225,38 +232,43 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) | |||
| 225 | return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); | 232 | return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); |
| 226 | } | 233 | } |
| 227 | 234 | ||
| 228 | static int get_exclude(Sg_device *sdp) | 235 | static int |
| 229 | { | 236 | open_wait(Sg_device *sdp, int flags) |
| 230 | unsigned long flags; | ||
| 231 | int ret; | ||
| 232 | |||
| 233 | spin_lock_irqsave(&sg_open_exclusive_lock, flags); | ||
| 234 | ret = sdp->exclude; | ||
| 235 | spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); | ||
| 236 | return ret; | ||
| 237 | } | ||
| 238 | |||
| 239 | static int set_exclude(Sg_device *sdp, char val) | ||
| 240 | { | 237 | { |
| 241 | unsigned long flags; | 238 | int retval = 0; |
| 242 | |||
| 243 | spin_lock_irqsave(&sg_open_exclusive_lock, flags); | ||
| 244 | sdp->exclude = val; | ||
| 245 | spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); | ||
| 246 | return val; | ||
| 247 | } | ||
| 248 | 239 | ||
| 249 | static int sfds_list_empty(Sg_device *sdp) | 240 | if (flags & O_EXCL) { |
| 250 | { | 241 | while (sdp->open_cnt > 0) { |
| 251 | unsigned long flags; | 242 | mutex_unlock(&sdp->open_rel_lock); |
| 252 | int ret; | 243 | retval = wait_event_interruptible(sdp->open_wait, |
| 244 | (atomic_read(&sdp->detaching) || | ||
| 245 | !sdp->open_cnt)); | ||
| 246 | mutex_lock(&sdp->open_rel_lock); | ||
| 247 | |||
| 248 | if (retval) /* -ERESTARTSYS */ | ||
| 249 | return retval; | ||
| 250 | if (atomic_read(&sdp->detaching)) | ||
| 251 | return -ENODEV; | ||
| 252 | } | ||
| 253 | } else { | ||
| 254 | while (sdp->exclude) { | ||
| 255 | mutex_unlock(&sdp->open_rel_lock); | ||
| 256 | retval = wait_event_interruptible(sdp->open_wait, | ||
| 257 | (atomic_read(&sdp->detaching) || | ||
| 258 | !sdp->exclude)); | ||
| 259 | mutex_lock(&sdp->open_rel_lock); | ||
| 260 | |||
| 261 | if (retval) /* -ERESTARTSYS */ | ||
| 262 | return retval; | ||
| 263 | if (atomic_read(&sdp->detaching)) | ||
| 264 | return -ENODEV; | ||
| 265 | } | ||
| 266 | } | ||
| 253 | 267 | ||
| 254 | read_lock_irqsave(&sg_index_lock, flags); | 268 | return retval; |
| 255 | ret = list_empty(&sdp->sfds); | ||
| 256 | read_unlock_irqrestore(&sg_index_lock, flags); | ||
| 257 | return ret; | ||
| 258 | } | 269 | } |
| 259 | 270 | ||
| 271 | /* Returns 0 on success, else a negated errno value */ | ||
| 260 | static int | 272 | static int |
| 261 | sg_open(struct inode *inode, struct file *filp) | 273 | sg_open(struct inode *inode, struct file *filp) |
| 262 | { | 274 | { |
| @@ -265,17 +277,17 @@ sg_open(struct inode *inode, struct file *filp) | |||
| 265 | struct request_queue *q; | 277 | struct request_queue *q; |
| 266 | Sg_device *sdp; | 278 | Sg_device *sdp; |
| 267 | Sg_fd *sfp; | 279 | Sg_fd *sfp; |
| 268 | int res; | ||
| 269 | int retval; | 280 | int retval; |
| 270 | 281 | ||
| 271 | nonseekable_open(inode, filp); | 282 | nonseekable_open(inode, filp); |
| 272 | SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); | 283 | if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) |
| 284 | return -EPERM; /* Can't lock it with read only access */ | ||
| 273 | sdp = sg_get_dev(dev); | 285 | sdp = sg_get_dev(dev); |
| 274 | if (IS_ERR(sdp)) { | 286 | if (IS_ERR(sdp)) |
| 275 | retval = PTR_ERR(sdp); | 287 | return PTR_ERR(sdp); |
| 276 | sdp = NULL; | 288 | |
| 277 | goto sg_put; | 289 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 278 | } | 290 | "sg_open: flags=0x%x\n", flags)); |
| 279 | 291 | ||
| 280 | /* This driver's module count bumped by fops_get in <linux/fs.h> */ | 292 | /* This driver's module count bumped by fops_get in <linux/fs.h> */ |
| 281 | /* Prevent the device driver from vanishing while we sleep */ | 293 | /* Prevent the device driver from vanishing while we sleep */ |
| @@ -287,6 +299,9 @@ sg_open(struct inode *inode, struct file *filp) | |||
| 287 | if (retval) | 299 | if (retval) |
| 288 | goto sdp_put; | 300 | goto sdp_put; |
| 289 | 301 | ||
| 302 | /* scsi_block_when_processing_errors() may block so bypass | ||
| 303 | * check if O_NONBLOCK. Permits SCSI commands to be issued | ||
| 304 | * during error recovery. Tread carefully. */ | ||
| 290 | if (!((flags & O_NONBLOCK) || | 305 | if (!((flags & O_NONBLOCK) || |
| 291 | scsi_block_when_processing_errors(sdp->device))) { | 306 | scsi_block_when_processing_errors(sdp->device))) { |
| 292 | retval = -ENXIO; | 307 | retval = -ENXIO; |
| @@ -294,65 +309,65 @@ sg_open(struct inode *inode, struct file *filp) | |||
| 294 | goto error_out; | 309 | goto error_out; |
| 295 | } | 310 | } |
| 296 | 311 | ||
| 297 | if (flags & O_EXCL) { | 312 | mutex_lock(&sdp->open_rel_lock); |
| 298 | if (O_RDONLY == (flags & O_ACCMODE)) { | 313 | if (flags & O_NONBLOCK) { |
| 299 | retval = -EPERM; /* Can't lock it with read only access */ | 314 | if (flags & O_EXCL) { |
| 300 | goto error_out; | 315 | if (sdp->open_cnt > 0) { |
| 301 | } | 316 | retval = -EBUSY; |
| 302 | if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { | 317 | goto error_mutex_locked; |
| 303 | retval = -EBUSY; | 318 | } |
| 304 | goto error_out; | 319 | } else { |
| 305 | } | 320 | if (sdp->exclude) { |
| 306 | res = wait_event_interruptible(sdp->o_excl_wait, | 321 | retval = -EBUSY; |
| 307 | ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); | 322 | goto error_mutex_locked; |
| 308 | if (res) { | 323 | } |
| 309 | retval = res; /* -ERESTARTSYS because signal hit process */ | ||
| 310 | goto error_out; | ||
| 311 | } | ||
| 312 | } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ | ||
| 313 | if (flags & O_NONBLOCK) { | ||
| 314 | retval = -EBUSY; | ||
| 315 | goto error_out; | ||
| 316 | } | ||
| 317 | res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); | ||
| 318 | if (res) { | ||
| 319 | retval = res; /* -ERESTARTSYS because signal hit process */ | ||
| 320 | goto error_out; | ||
| 321 | } | 324 | } |
| 325 | } else { | ||
| 326 | retval = open_wait(sdp, flags); | ||
| 327 | if (retval) /* -ERESTARTSYS or -ENODEV */ | ||
| 328 | goto error_mutex_locked; | ||
| 322 | } | 329 | } |
| 323 | if (sdp->detached) { | 330 | |
| 324 | retval = -ENODEV; | 331 | /* N.B. at this point we are holding the open_rel_lock */ |
| 325 | goto error_out; | 332 | if (flags & O_EXCL) |
| 326 | } | 333 | sdp->exclude = true; |
| 327 | if (sfds_list_empty(sdp)) { /* no existing opens on this device */ | 334 | |
| 335 | if (sdp->open_cnt < 1) { /* no existing opens */ | ||
| 328 | sdp->sgdebug = 0; | 336 | sdp->sgdebug = 0; |
| 329 | q = sdp->device->request_queue; | 337 | q = sdp->device->request_queue; |
| 330 | sdp->sg_tablesize = queue_max_segments(q); | 338 | sdp->sg_tablesize = queue_max_segments(q); |
| 331 | } | 339 | } |
| 332 | if ((sfp = sg_add_sfp(sdp, dev))) | 340 | sfp = sg_add_sfp(sdp); |
| 333 | filp->private_data = sfp; | 341 | if (IS_ERR(sfp)) { |
| 334 | else { | 342 | retval = PTR_ERR(sfp); |
| 335 | if (flags & O_EXCL) { | 343 | goto out_undo; |
| 336 | set_exclude(sdp, 0); /* undo if error */ | ||
| 337 | wake_up_interruptible(&sdp->o_excl_wait); | ||
| 338 | } | ||
| 339 | retval = -ENOMEM; | ||
| 340 | goto error_out; | ||
| 341 | } | 344 | } |
| 345 | |||
| 346 | filp->private_data = sfp; | ||
| 347 | sdp->open_cnt++; | ||
| 348 | mutex_unlock(&sdp->open_rel_lock); | ||
| 349 | |||
| 342 | retval = 0; | 350 | retval = 0; |
| 343 | error_out: | ||
| 344 | if (retval) { | ||
| 345 | scsi_autopm_put_device(sdp->device); | ||
| 346 | sdp_put: | ||
| 347 | scsi_device_put(sdp->device); | ||
| 348 | } | ||
| 349 | sg_put: | 351 | sg_put: |
| 350 | if (sdp) | 352 | kref_put(&sdp->d_ref, sg_device_destroy); |
| 351 | sg_put_dev(sdp); | ||
| 352 | return retval; | 353 | return retval; |
| 354 | |||
| 355 | out_undo: | ||
| 356 | if (flags & O_EXCL) { | ||
| 357 | sdp->exclude = false; /* undo if error */ | ||
| 358 | wake_up_interruptible(&sdp->open_wait); | ||
| 359 | } | ||
| 360 | error_mutex_locked: | ||
| 361 | mutex_unlock(&sdp->open_rel_lock); | ||
| 362 | error_out: | ||
| 363 | scsi_autopm_put_device(sdp->device); | ||
| 364 | sdp_put: | ||
| 365 | scsi_device_put(sdp->device); | ||
| 366 | goto sg_put; | ||
| 353 | } | 367 | } |
| 354 | 368 | ||
| 355 | /* Following function was formerly called 'sg_close' */ | 369 | /* Release resources associated with a successful sg_open() |
| 370 | * Returns 0 on success, else a negated errno value */ | ||
| 356 | static int | 371 | static int |
| 357 | sg_release(struct inode *inode, struct file *filp) | 372 | sg_release(struct inode *inode, struct file *filp) |
| 358 | { | 373 | { |
| @@ -361,13 +376,22 @@ sg_release(struct inode *inode, struct file *filp) | |||
| 361 | 376 | ||
| 362 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 377 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
| 363 | return -ENXIO; | 378 | return -ENXIO; |
| 364 | SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); | 379 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n")); |
| 365 | |||
| 366 | set_exclude(sdp, 0); | ||
| 367 | wake_up_interruptible(&sdp->o_excl_wait); | ||
| 368 | 380 | ||
| 381 | mutex_lock(&sdp->open_rel_lock); | ||
| 369 | scsi_autopm_put_device(sdp->device); | 382 | scsi_autopm_put_device(sdp->device); |
| 370 | kref_put(&sfp->f_ref, sg_remove_sfp); | 383 | kref_put(&sfp->f_ref, sg_remove_sfp); |
| 384 | sdp->open_cnt--; | ||
| 385 | |||
| 386 | /* possibly many open()s waiting on exlude clearing, start many; | ||
| 387 | * only open(O_EXCL)s wait on 0==open_cnt so only start one */ | ||
| 388 | if (sdp->exclude) { | ||
| 389 | sdp->exclude = false; | ||
| 390 | wake_up_interruptible_all(&sdp->open_wait); | ||
| 391 | } else if (0 == sdp->open_cnt) { | ||
| 392 | wake_up_interruptible(&sdp->open_wait); | ||
| 393 | } | ||
| 394 | mutex_unlock(&sdp->open_rel_lock); | ||
| 371 | return 0; | 395 | return 0; |
| 372 | } | 396 | } |
| 373 | 397 | ||
| @@ -384,8 +408,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 384 | 408 | ||
| 385 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 409 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
| 386 | return -ENXIO; | 410 | return -ENXIO; |
| 387 | SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", | 411 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 388 | sdp->disk->disk_name, (int) count)); | 412 | "sg_read: count=%d\n", (int) count)); |
| 389 | 413 | ||
| 390 | if (!access_ok(VERIFY_WRITE, buf, count)) | 414 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 391 | return -EFAULT; | 415 | return -EFAULT; |
| @@ -419,7 +443,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 419 | } | 443 | } |
| 420 | srp = sg_get_rq_mark(sfp, req_pack_id); | 444 | srp = sg_get_rq_mark(sfp, req_pack_id); |
| 421 | if (!srp) { /* now wait on packet to arrive */ | 445 | if (!srp) { /* now wait on packet to arrive */ |
| 422 | if (sdp->detached) { | 446 | if (atomic_read(&sdp->detaching)) { |
| 423 | retval = -ENODEV; | 447 | retval = -ENODEV; |
| 424 | goto free_old_hdr; | 448 | goto free_old_hdr; |
| 425 | } | 449 | } |
| @@ -428,9 +452,9 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 428 | goto free_old_hdr; | 452 | goto free_old_hdr; |
| 429 | } | 453 | } |
| 430 | retval = wait_event_interruptible(sfp->read_wait, | 454 | retval = wait_event_interruptible(sfp->read_wait, |
| 431 | (sdp->detached || | 455 | (atomic_read(&sdp->detaching) || |
| 432 | (srp = sg_get_rq_mark(sfp, req_pack_id)))); | 456 | (srp = sg_get_rq_mark(sfp, req_pack_id)))); |
| 433 | if (sdp->detached) { | 457 | if (atomic_read(&sdp->detaching)) { |
| 434 | retval = -ENODEV; | 458 | retval = -ENODEV; |
| 435 | goto free_old_hdr; | 459 | goto free_old_hdr; |
| 436 | } | 460 | } |
| @@ -566,13 +590,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 566 | Sg_request *srp; | 590 | Sg_request *srp; |
| 567 | struct sg_header old_hdr; | 591 | struct sg_header old_hdr; |
| 568 | sg_io_hdr_t *hp; | 592 | sg_io_hdr_t *hp; |
| 569 | unsigned char cmnd[MAX_COMMAND_SIZE]; | 593 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
| 570 | 594 | ||
| 571 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 595 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
| 572 | return -ENXIO; | 596 | return -ENXIO; |
| 573 | SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", | 597 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 574 | sdp->disk->disk_name, (int) count)); | 598 | "sg_write: count=%d\n", (int) count)); |
| 575 | if (sdp->detached) | 599 | if (atomic_read(&sdp->detaching)) |
| 576 | return -ENODEV; | 600 | return -ENODEV; |
| 577 | if (!((filp->f_flags & O_NONBLOCK) || | 601 | if (!((filp->f_flags & O_NONBLOCK) || |
| 578 | scsi_block_when_processing_errors(sdp->device))) | 602 | scsi_block_when_processing_errors(sdp->device))) |
| @@ -592,18 +616,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 592 | return -EIO; /* The minimum scsi command length is 6 bytes. */ | 616 | return -EIO; /* The minimum scsi command length is 6 bytes. */ |
| 593 | 617 | ||
| 594 | if (!(srp = sg_add_request(sfp))) { | 618 | if (!(srp = sg_add_request(sfp))) { |
| 595 | SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); | 619 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, |
| 620 | "sg_write: queue full\n")); | ||
| 596 | return -EDOM; | 621 | return -EDOM; |
| 597 | } | 622 | } |
| 598 | buf += SZ_SG_HEADER; | 623 | buf += SZ_SG_HEADER; |
| 599 | __get_user(opcode, buf); | 624 | __get_user(opcode, buf); |
| 600 | if (sfp->next_cmd_len > 0) { | 625 | if (sfp->next_cmd_len > 0) { |
| 601 | if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { | ||
| 602 | SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); | ||
| 603 | sfp->next_cmd_len = 0; | ||
| 604 | sg_remove_request(sfp, srp); | ||
| 605 | return -EIO; | ||
| 606 | } | ||
| 607 | cmd_size = sfp->next_cmd_len; | 626 | cmd_size = sfp->next_cmd_len; |
| 608 | sfp->next_cmd_len = 0; /* reset so only this write() effected */ | 627 | sfp->next_cmd_len = 0; /* reset so only this write() effected */ |
| 609 | } else { | 628 | } else { |
| @@ -611,7 +630,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 611 | if ((opcode >= 0xc0) && old_hdr.twelve_byte) | 630 | if ((opcode >= 0xc0) && old_hdr.twelve_byte) |
| 612 | cmd_size = 12; | 631 | cmd_size = 12; |
| 613 | } | 632 | } |
| 614 | SCSI_LOG_TIMEOUT(4, printk( | 633 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, |
| 615 | "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); | 634 | "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); |
| 616 | /* Determine buffer size. */ | 635 | /* Determine buffer size. */ |
| 617 | input_size = count - cmd_size; | 636 | input_size = count - cmd_size; |
| @@ -675,7 +694,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, | |||
| 675 | int k; | 694 | int k; |
| 676 | Sg_request *srp; | 695 | Sg_request *srp; |
| 677 | sg_io_hdr_t *hp; | 696 | sg_io_hdr_t *hp; |
| 678 | unsigned char cmnd[MAX_COMMAND_SIZE]; | 697 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
| 679 | int timeout; | 698 | int timeout; |
| 680 | unsigned long ul_timeout; | 699 | unsigned long ul_timeout; |
| 681 | 700 | ||
| @@ -686,7 +705,8 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, | |||
| 686 | 705 | ||
| 687 | sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ | 706 | sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ |
| 688 | if (!(srp = sg_add_request(sfp))) { | 707 | if (!(srp = sg_add_request(sfp))) { |
| 689 | SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); | 708 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
| 709 | "sg_new_write: queue full\n")); | ||
| 690 | return -EDOM; | 710 | return -EDOM; |
| 691 | } | 711 | } |
| 692 | srp->sg_io_owned = sg_io_owned; | 712 | srp->sg_io_owned = sg_io_owned; |
| @@ -743,7 +763,7 @@ static int | |||
| 743 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 763 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
| 744 | unsigned char *cmnd, int timeout, int blocking) | 764 | unsigned char *cmnd, int timeout, int blocking) |
| 745 | { | 765 | { |
| 746 | int k, data_dir; | 766 | int k, data_dir, at_head; |
| 747 | Sg_device *sdp = sfp->parentdp; | 767 | Sg_device *sdp = sfp->parentdp; |
| 748 | sg_io_hdr_t *hp = &srp->header; | 768 | sg_io_hdr_t *hp = &srp->header; |
| 749 | 769 | ||
| @@ -755,16 +775,18 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
| 755 | hp->host_status = 0; | 775 | hp->host_status = 0; |
| 756 | hp->driver_status = 0; | 776 | hp->driver_status = 0; |
| 757 | hp->resid = 0; | 777 | hp->resid = 0; |
| 758 | SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", | 778 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 759 | (int) cmnd[0], (int) hp->cmd_len)); | 779 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", |
| 780 | (int) cmnd[0], (int) hp->cmd_len)); | ||
| 760 | 781 | ||
| 761 | k = sg_start_req(srp, cmnd); | 782 | k = sg_start_req(srp, cmnd); |
| 762 | if (k) { | 783 | if (k) { |
| 763 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); | 784 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
| 785 | "sg_common_write: start_req err=%d\n", k)); | ||
| 764 | sg_finish_rem_req(srp); | 786 | sg_finish_rem_req(srp); |
| 765 | return k; /* probably out of space --> ENOMEM */ | 787 | return k; /* probably out of space --> ENOMEM */ |
| 766 | } | 788 | } |
| 767 | if (sdp->detached) { | 789 | if (atomic_read(&sdp->detaching)) { |
| 768 | if (srp->bio) | 790 | if (srp->bio) |
| 769 | blk_end_request_all(srp->rq, -EIO); | 791 | blk_end_request_all(srp->rq, -EIO); |
| 770 | sg_finish_rem_req(srp); | 792 | sg_finish_rem_req(srp); |
| @@ -787,11 +809,16 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
| 787 | break; | 809 | break; |
| 788 | } | 810 | } |
| 789 | hp->duration = jiffies_to_msecs(jiffies); | 811 | hp->duration = jiffies_to_msecs(jiffies); |
| 812 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ | ||
| 813 | (SG_FLAG_Q_AT_TAIL & hp->flags)) | ||
| 814 | at_head = 0; | ||
| 815 | else | ||
| 816 | at_head = 1; | ||
| 790 | 817 | ||
| 791 | srp->rq->timeout = timeout; | 818 | srp->rq->timeout = timeout; |
| 792 | kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ | 819 | kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ |
| 793 | blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, | 820 | blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, |
| 794 | srp->rq, 1, sg_rq_end_io); | 821 | srp->rq, at_head, sg_rq_end_io); |
| 795 | return 0; | 822 | return 0; |
| 796 | } | 823 | } |
| 797 | 824 | ||
| @@ -806,6 +833,15 @@ static int srp_done(Sg_fd *sfp, Sg_request *srp) | |||
| 806 | return ret; | 833 | return ret; |
| 807 | } | 834 | } |
| 808 | 835 | ||
| 836 | static int max_sectors_bytes(struct request_queue *q) | ||
| 837 | { | ||
| 838 | unsigned int max_sectors = queue_max_sectors(q); | ||
| 839 | |||
| 840 | max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); | ||
| 841 | |||
| 842 | return max_sectors << 9; | ||
| 843 | } | ||
| 844 | |||
| 809 | static long | 845 | static long |
| 810 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | 846 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
| 811 | { | 847 | { |
| @@ -820,13 +856,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 820 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 856 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
| 821 | return -ENXIO; | 857 | return -ENXIO; |
| 822 | 858 | ||
| 823 | SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", | 859 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 824 | sdp->disk->disk_name, (int) cmd_in)); | 860 | "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); |
| 825 | read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); | 861 | read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); |
| 826 | 862 | ||
| 827 | switch (cmd_in) { | 863 | switch (cmd_in) { |
| 828 | case SG_IO: | 864 | case SG_IO: |
| 829 | if (sdp->detached) | 865 | if (atomic_read(&sdp->detaching)) |
| 830 | return -ENODEV; | 866 | return -ENODEV; |
| 831 | if (!scsi_block_when_processing_errors(sdp->device)) | 867 | if (!scsi_block_when_processing_errors(sdp->device)) |
| 832 | return -ENXIO; | 868 | return -ENXIO; |
| @@ -837,8 +873,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 837 | if (result < 0) | 873 | if (result < 0) |
| 838 | return result; | 874 | return result; |
| 839 | result = wait_event_interruptible(sfp->read_wait, | 875 | result = wait_event_interruptible(sfp->read_wait, |
| 840 | (srp_done(sfp, srp) || sdp->detached)); | 876 | (srp_done(sfp, srp) || atomic_read(&sdp->detaching))); |
| 841 | if (sdp->detached) | 877 | if (atomic_read(&sdp->detaching)) |
| 842 | return -ENODEV; | 878 | return -ENODEV; |
| 843 | write_lock_irq(&sfp->rq_list_lock); | 879 | write_lock_irq(&sfp->rq_list_lock); |
| 844 | if (srp->done) { | 880 | if (srp->done) { |
| @@ -873,11 +909,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 873 | sfp->low_dma = 1; | 909 | sfp->low_dma = 1; |
| 874 | if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { | 910 | if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { |
| 875 | val = (int) sfp->reserve.bufflen; | 911 | val = (int) sfp->reserve.bufflen; |
| 876 | sg_remove_scat(&sfp->reserve); | 912 | sg_remove_scat(sfp, &sfp->reserve); |
| 877 | sg_build_reserve(sfp, val); | 913 | sg_build_reserve(sfp, val); |
| 878 | } | 914 | } |
| 879 | } else { | 915 | } else { |
| 880 | if (sdp->detached) | 916 | if (atomic_read(&sdp->detaching)) |
| 881 | return -ENODEV; | 917 | return -ENODEV; |
| 882 | sfp->low_dma = sdp->device->host->unchecked_isa_dma; | 918 | sfp->low_dma = sdp->device->host->unchecked_isa_dma; |
| 883 | } | 919 | } |
| @@ -890,7 +926,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 890 | else { | 926 | else { |
| 891 | sg_scsi_id_t __user *sg_idp = p; | 927 | sg_scsi_id_t __user *sg_idp = p; |
| 892 | 928 | ||
| 893 | if (sdp->detached) | 929 | if (atomic_read(&sdp->detaching)) |
| 894 | return -ENODEV; | 930 | return -ENODEV; |
| 895 | __put_user((int) sdp->device->host->host_no, | 931 | __put_user((int) sdp->device->host->host_no, |
| 896 | &sg_idp->host_no); | 932 | &sg_idp->host_no); |
| @@ -945,17 +981,17 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 945 | if (val < 0) | 981 | if (val < 0) |
| 946 | return -EINVAL; | 982 | return -EINVAL; |
| 947 | val = min_t(int, val, | 983 | val = min_t(int, val, |
| 948 | queue_max_sectors(sdp->device->request_queue) * 512); | 984 | max_sectors_bytes(sdp->device->request_queue)); |
| 949 | if (val != sfp->reserve.bufflen) { | 985 | if (val != sfp->reserve.bufflen) { |
| 950 | if (sg_res_in_use(sfp) || sfp->mmap_called) | 986 | if (sg_res_in_use(sfp) || sfp->mmap_called) |
| 951 | return -EBUSY; | 987 | return -EBUSY; |
| 952 | sg_remove_scat(&sfp->reserve); | 988 | sg_remove_scat(sfp, &sfp->reserve); |
| 953 | sg_build_reserve(sfp, val); | 989 | sg_build_reserve(sfp, val); |
| 954 | } | 990 | } |
| 955 | return 0; | 991 | return 0; |
| 956 | case SG_GET_RESERVED_SIZE: | 992 | case SG_GET_RESERVED_SIZE: |
| 957 | val = min_t(int, sfp->reserve.bufflen, | 993 | val = min_t(int, sfp->reserve.bufflen, |
| 958 | queue_max_sectors(sdp->device->request_queue) * 512); | 994 | max_sectors_bytes(sdp->device->request_queue)); |
| 959 | return put_user(val, ip); | 995 | return put_user(val, ip); |
| 960 | case SG_SET_COMMAND_Q: | 996 | case SG_SET_COMMAND_Q: |
| 961 | result = get_user(val, ip); | 997 | result = get_user(val, ip); |
| @@ -1032,11 +1068,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 1032 | return result; | 1068 | return result; |
| 1033 | } | 1069 | } |
| 1034 | case SG_EMULATED_HOST: | 1070 | case SG_EMULATED_HOST: |
| 1035 | if (sdp->detached) | 1071 | if (atomic_read(&sdp->detaching)) |
| 1036 | return -ENODEV; | 1072 | return -ENODEV; |
| 1037 | return put_user(sdp->device->host->hostt->emulated, ip); | 1073 | return put_user(sdp->device->host->hostt->emulated, ip); |
| 1038 | case SG_SCSI_RESET: | 1074 | case SG_SCSI_RESET: |
| 1039 | if (sdp->detached) | 1075 | if (atomic_read(&sdp->detaching)) |
| 1040 | return -ENODEV; | 1076 | return -ENODEV; |
| 1041 | if (filp->f_flags & O_NONBLOCK) { | 1077 | if (filp->f_flags & O_NONBLOCK) { |
| 1042 | if (scsi_host_in_recovery(sdp->device->host)) | 1078 | if (scsi_host_in_recovery(sdp->device->host)) |
| @@ -1069,7 +1105,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 1069 | return (scsi_reset_provider(sdp->device, val) == | 1105 | return (scsi_reset_provider(sdp->device, val) == |
| 1070 | SUCCESS) ? 0 : -EIO; | 1106 | SUCCESS) ? 0 : -EIO; |
| 1071 | case SCSI_IOCTL_SEND_COMMAND: | 1107 | case SCSI_IOCTL_SEND_COMMAND: |
| 1072 | if (sdp->detached) | 1108 | if (atomic_read(&sdp->detaching)) |
| 1073 | return -ENODEV; | 1109 | return -ENODEV; |
| 1074 | if (read_only) { | 1110 | if (read_only) { |
| 1075 | unsigned char opcode = WRITE_6; | 1111 | unsigned char opcode = WRITE_6; |
| @@ -1091,11 +1127,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 1091 | case SCSI_IOCTL_GET_BUS_NUMBER: | 1127 | case SCSI_IOCTL_GET_BUS_NUMBER: |
| 1092 | case SCSI_IOCTL_PROBE_HOST: | 1128 | case SCSI_IOCTL_PROBE_HOST: |
| 1093 | case SG_GET_TRANSFORM: | 1129 | case SG_GET_TRANSFORM: |
| 1094 | if (sdp->detached) | 1130 | if (atomic_read(&sdp->detaching)) |
| 1095 | return -ENODEV; | 1131 | return -ENODEV; |
| 1096 | return scsi_ioctl(sdp->device, cmd_in, p); | 1132 | return scsi_ioctl(sdp->device, cmd_in, p); |
| 1097 | case BLKSECTGET: | 1133 | case BLKSECTGET: |
| 1098 | return put_user(queue_max_sectors(sdp->device->request_queue) * 512, | 1134 | return put_user(max_sectors_bytes(sdp->device->request_queue), |
| 1099 | ip); | 1135 | ip); |
| 1100 | case BLKTRACESETUP: | 1136 | case BLKTRACESETUP: |
| 1101 | return blk_trace_setup(sdp->device->request_queue, | 1137 | return blk_trace_setup(sdp->device->request_queue, |
| @@ -1165,15 +1201,15 @@ sg_poll(struct file *filp, poll_table * wait) | |||
| 1165 | } | 1201 | } |
| 1166 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); | 1202 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
| 1167 | 1203 | ||
| 1168 | if (sdp->detached) | 1204 | if (atomic_read(&sdp->detaching)) |
| 1169 | res |= POLLHUP; | 1205 | res |= POLLHUP; |
| 1170 | else if (!sfp->cmd_q) { | 1206 | else if (!sfp->cmd_q) { |
| 1171 | if (0 == count) | 1207 | if (0 == count) |
| 1172 | res |= POLLOUT | POLLWRNORM; | 1208 | res |= POLLOUT | POLLWRNORM; |
| 1173 | } else if (count < SG_MAX_QUEUE) | 1209 | } else if (count < SG_MAX_QUEUE) |
| 1174 | res |= POLLOUT | POLLWRNORM; | 1210 | res |= POLLOUT | POLLWRNORM; |
| 1175 | SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", | 1211 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 1176 | sdp->disk->disk_name, (int) res)); | 1212 | "sg_poll: res=0x%x\n", (int) res)); |
| 1177 | return res; | 1213 | return res; |
| 1178 | } | 1214 | } |
| 1179 | 1215 | ||
| @@ -1185,8 +1221,8 @@ sg_fasync(int fd, struct file *filp, int mode) | |||
| 1185 | 1221 | ||
| 1186 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 1222 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
| 1187 | return -ENXIO; | 1223 | return -ENXIO; |
| 1188 | SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", | 1224 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 1189 | sdp->disk->disk_name, mode)); | 1225 | "sg_fasync: mode=%d\n", mode)); |
| 1190 | 1226 | ||
| 1191 | return fasync_helper(fd, filp, mode, &sfp->async_qp); | 1227 | return fasync_helper(fd, filp, mode, &sfp->async_qp); |
| 1192 | } | 1228 | } |
| @@ -1205,8 +1241,9 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1205 | offset = vmf->pgoff << PAGE_SHIFT; | 1241 | offset = vmf->pgoff << PAGE_SHIFT; |
| 1206 | if (offset >= rsv_schp->bufflen) | 1242 | if (offset >= rsv_schp->bufflen) |
| 1207 | return VM_FAULT_SIGBUS; | 1243 | return VM_FAULT_SIGBUS; |
| 1208 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", | 1244 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, |
| 1209 | offset, rsv_schp->k_use_sg)); | 1245 | "sg_vma_fault: offset=%lu, scatg=%d\n", |
| 1246 | offset, rsv_schp->k_use_sg)); | ||
| 1210 | sa = vma->vm_start; | 1247 | sa = vma->vm_start; |
| 1211 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); | 1248 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
| 1212 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { | 1249 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
| @@ -1241,8 +1278,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 1241 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) | 1278 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) |
| 1242 | return -ENXIO; | 1279 | return -ENXIO; |
| 1243 | req_sz = vma->vm_end - vma->vm_start; | 1280 | req_sz = vma->vm_end - vma->vm_start; |
| 1244 | SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", | 1281 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, |
| 1245 | (void *) vma->vm_start, (int) req_sz)); | 1282 | "sg_mmap starting, vm_start=%p, len=%d\n", |
| 1283 | (void *) vma->vm_start, (int) req_sz)); | ||
| 1246 | if (vma->vm_pgoff) | 1284 | if (vma->vm_pgoff) |
| 1247 | return -EINVAL; /* want no offset */ | 1285 | return -EINVAL; /* want no offset */ |
| 1248 | rsv_schp = &sfp->reserve; | 1286 | rsv_schp = &sfp->reserve; |
| @@ -1264,7 +1302,8 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 1264 | return 0; | 1302 | return 0; |
| 1265 | } | 1303 | } |
| 1266 | 1304 | ||
| 1267 | static void sg_rq_end_io_usercontext(struct work_struct *work) | 1305 | static void |
| 1306 | sg_rq_end_io_usercontext(struct work_struct *work) | ||
| 1268 | { | 1307 | { |
| 1269 | struct sg_request *srp = container_of(work, struct sg_request, ew.work); | 1308 | struct sg_request *srp = container_of(work, struct sg_request, ew.work); |
| 1270 | struct sg_fd *sfp = srp->parentfp; | 1309 | struct sg_fd *sfp = srp->parentfp; |
| @@ -1277,7 +1316,8 @@ static void sg_rq_end_io_usercontext(struct work_struct *work) | |||
| 1277 | * This function is a "bottom half" handler that is called by the mid | 1316 | * This function is a "bottom half" handler that is called by the mid |
| 1278 | * level when a command is completed (or has failed). | 1317 | * level when a command is completed (or has failed). |
| 1279 | */ | 1318 | */ |
| 1280 | static void sg_rq_end_io(struct request *rq, int uptodate) | 1319 | static void |
| 1320 | sg_rq_end_io(struct request *rq, int uptodate) | ||
| 1281 | { | 1321 | { |
| 1282 | struct sg_request *srp = rq->end_io_data; | 1322 | struct sg_request *srp = rq->end_io_data; |
| 1283 | Sg_device *sdp; | 1323 | Sg_device *sdp; |
| @@ -1295,15 +1335,16 @@ static void sg_rq_end_io(struct request *rq, int uptodate) | |||
| 1295 | return; | 1335 | return; |
| 1296 | 1336 | ||
| 1297 | sdp = sfp->parentdp; | 1337 | sdp = sfp->parentdp; |
| 1298 | if (unlikely(sdp->detached)) | 1338 | if (unlikely(atomic_read(&sdp->detaching))) |
| 1299 | printk(KERN_INFO "sg_rq_end_io: device detached\n"); | 1339 | pr_info("%s: device detaching\n", __func__); |
| 1300 | 1340 | ||
| 1301 | sense = rq->sense; | 1341 | sense = rq->sense; |
| 1302 | result = rq->errors; | 1342 | result = rq->errors; |
| 1303 | resid = rq->resid_len; | 1343 | resid = rq->resid_len; |
| 1304 | 1344 | ||
| 1305 | SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", | 1345 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, |
| 1306 | sdp->disk->disk_name, srp->header.pack_id, result)); | 1346 | "sg_cmd_done: pack_id=%d, res=0x%x\n", |
| 1347 | srp->header.pack_id, result)); | ||
| 1307 | srp->header.resid = resid; | 1348 | srp->header.resid = resid; |
| 1308 | ms = jiffies_to_msecs(jiffies); | 1349 | ms = jiffies_to_msecs(jiffies); |
| 1309 | srp->header.duration = (ms > srp->header.duration) ? | 1350 | srp->header.duration = (ms > srp->header.duration) ? |
| @@ -1319,7 +1360,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate) | |||
| 1319 | if ((sdp->sgdebug > 0) && | 1360 | if ((sdp->sgdebug > 0) && |
| 1320 | ((CHECK_CONDITION == srp->header.masked_status) || | 1361 | ((CHECK_CONDITION == srp->header.masked_status) || |
| 1321 | (COMMAND_TERMINATED == srp->header.masked_status))) | 1362 | (COMMAND_TERMINATED == srp->header.masked_status))) |
| 1322 | __scsi_print_sense("sg_cmd_done", sense, | 1363 | __scsi_print_sense(__func__, sense, |
| 1323 | SCSI_SENSE_BUFFERSIZE); | 1364 | SCSI_SENSE_BUFFERSIZE); |
| 1324 | 1365 | ||
| 1325 | /* Following if statement is a patch supplied by Eric Youngdale */ | 1366 | /* Following if statement is a patch supplied by Eric Youngdale */ |
| @@ -1378,7 +1419,8 @@ static struct class *sg_sysfs_class; | |||
| 1378 | 1419 | ||
| 1379 | static int sg_sysfs_valid = 0; | 1420 | static int sg_sysfs_valid = 0; |
| 1380 | 1421 | ||
| 1381 | static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | 1422 | static Sg_device * |
| 1423 | sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | ||
| 1382 | { | 1424 | { |
| 1383 | struct request_queue *q = scsidp->request_queue; | 1425 | struct request_queue *q = scsidp->request_queue; |
| 1384 | Sg_device *sdp; | 1426 | Sg_device *sdp; |
| @@ -1388,7 +1430,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | |||
| 1388 | 1430 | ||
| 1389 | sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); | 1431 | sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); |
| 1390 | if (!sdp) { | 1432 | if (!sdp) { |
| 1391 | printk(KERN_WARNING "kmalloc Sg_device failure\n"); | 1433 | sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " |
| 1434 | "failure\n", __func__); | ||
| 1392 | return ERR_PTR(-ENOMEM); | 1435 | return ERR_PTR(-ENOMEM); |
| 1393 | } | 1436 | } |
| 1394 | 1437 | ||
| @@ -1403,20 +1446,25 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | |||
| 1403 | scsidp->type, SG_MAX_DEVS - 1); | 1446 | scsidp->type, SG_MAX_DEVS - 1); |
| 1404 | error = -ENODEV; | 1447 | error = -ENODEV; |
| 1405 | } else { | 1448 | } else { |
| 1406 | printk(KERN_WARNING | 1449 | sdev_printk(KERN_WARNING, scsidp, "%s: idr " |
| 1407 | "idr allocation Sg_device failure: %d\n", error); | 1450 | "allocation Sg_device failure: %d\n", |
| 1451 | __func__, error); | ||
| 1408 | } | 1452 | } |
| 1409 | goto out_unlock; | 1453 | goto out_unlock; |
| 1410 | } | 1454 | } |
| 1411 | k = error; | 1455 | k = error; |
| 1412 | 1456 | ||
| 1413 | SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); | 1457 | SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, |
| 1458 | "sg_alloc: dev=%d \n", k)); | ||
| 1414 | sprintf(disk->disk_name, "sg%d", k); | 1459 | sprintf(disk->disk_name, "sg%d", k); |
| 1415 | disk->first_minor = k; | 1460 | disk->first_minor = k; |
| 1416 | sdp->disk = disk; | 1461 | sdp->disk = disk; |
| 1417 | sdp->device = scsidp; | 1462 | sdp->device = scsidp; |
| 1463 | mutex_init(&sdp->open_rel_lock); | ||
| 1418 | INIT_LIST_HEAD(&sdp->sfds); | 1464 | INIT_LIST_HEAD(&sdp->sfds); |
| 1419 | init_waitqueue_head(&sdp->o_excl_wait); | 1465 | init_waitqueue_head(&sdp->open_wait); |
| 1466 | atomic_set(&sdp->detaching, 0); | ||
| 1467 | rwlock_init(&sdp->sfd_lock); | ||
| 1420 | sdp->sg_tablesize = queue_max_segments(q); | 1468 | sdp->sg_tablesize = queue_max_segments(q); |
| 1421 | sdp->index = k; | 1469 | sdp->index = k; |
| 1422 | kref_init(&sdp->d_ref); | 1470 | kref_init(&sdp->d_ref); |
| @@ -1434,7 +1482,7 @@ out_unlock: | |||
| 1434 | } | 1482 | } |
| 1435 | 1483 | ||
| 1436 | static int | 1484 | static int |
| 1437 | sg_add(struct device *cl_dev, struct class_interface *cl_intf) | 1485 | sg_add_device(struct device *cl_dev, struct class_interface *cl_intf) |
| 1438 | { | 1486 | { |
| 1439 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); | 1487 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); |
| 1440 | struct gendisk *disk; | 1488 | struct gendisk *disk; |
| @@ -1445,7 +1493,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf) | |||
| 1445 | 1493 | ||
| 1446 | disk = alloc_disk(1); | 1494 | disk = alloc_disk(1); |
| 1447 | if (!disk) { | 1495 | if (!disk) { |
| 1448 | printk(KERN_WARNING "alloc_disk failed\n"); | 1496 | pr_warn("%s: alloc_disk failed\n", __func__); |
| 1449 | return -ENOMEM; | 1497 | return -ENOMEM; |
| 1450 | } | 1498 | } |
| 1451 | disk->major = SCSI_GENERIC_MAJOR; | 1499 | disk->major = SCSI_GENERIC_MAJOR; |
| @@ -1453,7 +1501,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf) | |||
| 1453 | error = -ENOMEM; | 1501 | error = -ENOMEM; |
| 1454 | cdev = cdev_alloc(); | 1502 | cdev = cdev_alloc(); |
| 1455 | if (!cdev) { | 1503 | if (!cdev) { |
| 1456 | printk(KERN_WARNING "cdev_alloc failed\n"); | 1504 | pr_warn("%s: cdev_alloc failed\n", __func__); |
| 1457 | goto out; | 1505 | goto out; |
| 1458 | } | 1506 | } |
| 1459 | cdev->owner = THIS_MODULE; | 1507 | cdev->owner = THIS_MODULE; |
| @@ -1461,7 +1509,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf) | |||
| 1461 | 1509 | ||
| 1462 | sdp = sg_alloc(disk, scsidp); | 1510 | sdp = sg_alloc(disk, scsidp); |
| 1463 | if (IS_ERR(sdp)) { | 1511 | if (IS_ERR(sdp)) { |
| 1464 | printk(KERN_WARNING "sg_alloc failed\n"); | 1512 | pr_warn("%s: sg_alloc failed\n", __func__); |
| 1465 | error = PTR_ERR(sdp); | 1513 | error = PTR_ERR(sdp); |
| 1466 | goto out; | 1514 | goto out; |
| 1467 | } | 1515 | } |
| @@ -1479,22 +1527,20 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf) | |||
| 1479 | sdp->index), | 1527 | sdp->index), |
| 1480 | sdp, "%s", disk->disk_name); | 1528 | sdp, "%s", disk->disk_name); |
| 1481 | if (IS_ERR(sg_class_member)) { | 1529 | if (IS_ERR(sg_class_member)) { |
| 1482 | printk(KERN_ERR "sg_add: " | 1530 | pr_err("%s: device_create failed\n", __func__); |
| 1483 | "device_create failed\n"); | ||
| 1484 | error = PTR_ERR(sg_class_member); | 1531 | error = PTR_ERR(sg_class_member); |
| 1485 | goto cdev_add_err; | 1532 | goto cdev_add_err; |
| 1486 | } | 1533 | } |
| 1487 | error = sysfs_create_link(&scsidp->sdev_gendev.kobj, | 1534 | error = sysfs_create_link(&scsidp->sdev_gendev.kobj, |
| 1488 | &sg_class_member->kobj, "generic"); | 1535 | &sg_class_member->kobj, "generic"); |
| 1489 | if (error) | 1536 | if (error) |
| 1490 | printk(KERN_ERR "sg_add: unable to make symlink " | 1537 | pr_err("%s: unable to make symlink 'generic' back " |
| 1491 | "'generic' back to sg%d\n", sdp->index); | 1538 | "to sg%d\n", __func__, sdp->index); |
| 1492 | } else | 1539 | } else |
| 1493 | printk(KERN_WARNING "sg_add: sg_sys Invalid\n"); | 1540 | pr_warn("%s: sg_sys Invalid\n", __func__); |
| 1494 | 1541 | ||
| 1495 | sdev_printk(KERN_NOTICE, scsidp, | 1542 | sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " |
| 1496 | "Attached scsi generic sg%d type %d\n", sdp->index, | 1543 | "type %d\n", sdp->index, scsidp->type); |
| 1497 | scsidp->type); | ||
| 1498 | 1544 | ||
| 1499 | dev_set_drvdata(cl_dev, sdp); | 1545 | dev_set_drvdata(cl_dev, sdp); |
| 1500 | 1546 | ||
| @@ -1513,7 +1559,8 @@ out: | |||
| 1513 | return error; | 1559 | return error; |
| 1514 | } | 1560 | } |
| 1515 | 1561 | ||
| 1516 | static void sg_device_destroy(struct kref *kref) | 1562 | static void |
| 1563 | sg_device_destroy(struct kref *kref) | ||
| 1517 | { | 1564 | { |
| 1518 | struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); | 1565 | struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); |
| 1519 | unsigned long flags; | 1566 | unsigned long flags; |
| @@ -1528,40 +1575,45 @@ static void sg_device_destroy(struct kref *kref) | |||
| 1528 | write_unlock_irqrestore(&sg_index_lock, flags); | 1575 | write_unlock_irqrestore(&sg_index_lock, flags); |
| 1529 | 1576 | ||
| 1530 | SCSI_LOG_TIMEOUT(3, | 1577 | SCSI_LOG_TIMEOUT(3, |
| 1531 | printk("sg_device_destroy: %s\n", | 1578 | sg_printk(KERN_INFO, sdp, "sg_device_destroy\n")); |
| 1532 | sdp->disk->disk_name)); | ||
| 1533 | 1579 | ||
| 1534 | put_disk(sdp->disk); | 1580 | put_disk(sdp->disk); |
| 1535 | kfree(sdp); | 1581 | kfree(sdp); |
| 1536 | } | 1582 | } |
| 1537 | 1583 | ||
| 1538 | static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) | 1584 | static void |
| 1585 | sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf) | ||
| 1539 | { | 1586 | { |
| 1540 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); | 1587 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); |
| 1541 | Sg_device *sdp = dev_get_drvdata(cl_dev); | 1588 | Sg_device *sdp = dev_get_drvdata(cl_dev); |
| 1542 | unsigned long iflags; | 1589 | unsigned long iflags; |
| 1543 | Sg_fd *sfp; | 1590 | Sg_fd *sfp; |
| 1591 | int val; | ||
| 1544 | 1592 | ||
| 1545 | if (!sdp || sdp->detached) | 1593 | if (!sdp) |
| 1546 | return; | 1594 | return; |
| 1595 | /* want sdp->detaching non-zero as soon as possible */ | ||
| 1596 | val = atomic_inc_return(&sdp->detaching); | ||
| 1597 | if (val > 1) | ||
| 1598 | return; /* only want to do following once per device */ | ||
| 1547 | 1599 | ||
| 1548 | SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name)); | 1600 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 1601 | "%s\n", __func__)); | ||
| 1549 | 1602 | ||
| 1550 | /* Need a write lock to set sdp->detached. */ | 1603 | read_lock_irqsave(&sdp->sfd_lock, iflags); |
| 1551 | write_lock_irqsave(&sg_index_lock, iflags); | ||
| 1552 | sdp->detached = 1; | ||
| 1553 | list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { | 1604 | list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { |
| 1554 | wake_up_interruptible(&sfp->read_wait); | 1605 | wake_up_interruptible_all(&sfp->read_wait); |
| 1555 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); | 1606 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); |
| 1556 | } | 1607 | } |
| 1557 | write_unlock_irqrestore(&sg_index_lock, iflags); | 1608 | wake_up_interruptible_all(&sdp->open_wait); |
| 1609 | read_unlock_irqrestore(&sdp->sfd_lock, iflags); | ||
| 1558 | 1610 | ||
| 1559 | sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); | 1611 | sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); |
| 1560 | device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); | 1612 | device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); |
| 1561 | cdev_del(sdp->cdev); | 1613 | cdev_del(sdp->cdev); |
| 1562 | sdp->cdev = NULL; | 1614 | sdp->cdev = NULL; |
| 1563 | 1615 | ||
| 1564 | sg_put_dev(sdp); | 1616 | kref_put(&sdp->d_ref, sg_device_destroy); |
| 1565 | } | 1617 | } |
| 1566 | 1618 | ||
| 1567 | module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); | 1619 | module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); |
| @@ -1631,7 +1683,8 @@ exit_sg(void) | |||
| 1631 | idr_destroy(&sg_index_idr); | 1683 | idr_destroy(&sg_index_idr); |
| 1632 | } | 1684 | } |
| 1633 | 1685 | ||
| 1634 | static int sg_start_req(Sg_request *srp, unsigned char *cmd) | 1686 | static int |
| 1687 | sg_start_req(Sg_request *srp, unsigned char *cmd) | ||
| 1635 | { | 1688 | { |
| 1636 | int res; | 1689 | int res; |
| 1637 | struct request *rq; | 1690 | struct request *rq; |
| @@ -1645,15 +1698,28 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
| 1645 | struct request_queue *q = sfp->parentdp->device->request_queue; | 1698 | struct request_queue *q = sfp->parentdp->device->request_queue; |
| 1646 | struct rq_map_data *md, map_data; | 1699 | struct rq_map_data *md, map_data; |
| 1647 | int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; | 1700 | int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; |
| 1701 | unsigned char *long_cmdp = NULL; | ||
| 1648 | 1702 | ||
| 1649 | SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n", | 1703 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 1650 | dxfer_len)); | 1704 | "sg_start_req: dxfer_len=%d\n", |
| 1705 | dxfer_len)); | ||
| 1706 | |||
| 1707 | if (hp->cmd_len > BLK_MAX_CDB) { | ||
| 1708 | long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL); | ||
| 1709 | if (!long_cmdp) | ||
| 1710 | return -ENOMEM; | ||
| 1711 | } | ||
| 1651 | 1712 | ||
| 1652 | rq = blk_get_request(q, rw, GFP_ATOMIC); | 1713 | rq = blk_get_request(q, rw, GFP_ATOMIC); |
| 1653 | if (!rq) | 1714 | if (!rq) { |
| 1715 | kfree(long_cmdp); | ||
| 1654 | return -ENOMEM; | 1716 | return -ENOMEM; |
| 1717 | } | ||
| 1655 | 1718 | ||
| 1656 | blk_rq_set_block_pc(rq); | 1719 | blk_rq_set_block_pc(rq); |
| 1720 | |||
| 1721 | if (hp->cmd_len > BLK_MAX_CDB) | ||
| 1722 | rq->cmd = long_cmdp; | ||
| 1657 | memcpy(rq->cmd, cmd, hp->cmd_len); | 1723 | memcpy(rq->cmd, cmd, hp->cmd_len); |
| 1658 | rq->cmd_len = hp->cmd_len; | 1724 | rq->cmd_len = hp->cmd_len; |
| 1659 | 1725 | ||
| @@ -1726,25 +1792,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) | |||
| 1726 | return res; | 1792 | return res; |
| 1727 | } | 1793 | } |
| 1728 | 1794 | ||
| 1729 | static int sg_finish_rem_req(Sg_request * srp) | 1795 | static int |
| 1796 | sg_finish_rem_req(Sg_request *srp) | ||
| 1730 | { | 1797 | { |
| 1731 | int ret = 0; | 1798 | int ret = 0; |
| 1732 | 1799 | ||
| 1733 | Sg_fd *sfp = srp->parentfp; | 1800 | Sg_fd *sfp = srp->parentfp; |
| 1734 | Sg_scatter_hold *req_schp = &srp->data; | 1801 | Sg_scatter_hold *req_schp = &srp->data; |
| 1735 | 1802 | ||
| 1736 | SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); | 1803 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 1804 | "sg_finish_rem_req: res_used=%d\n", | ||
| 1805 | (int) srp->res_used)); | ||
| 1737 | if (srp->rq) { | 1806 | if (srp->rq) { |
| 1738 | if (srp->bio) | 1807 | if (srp->bio) |
| 1739 | ret = blk_rq_unmap_user(srp->bio); | 1808 | ret = blk_rq_unmap_user(srp->bio); |
| 1740 | 1809 | ||
| 1810 | if (srp->rq->cmd != srp->rq->__cmd) | ||
| 1811 | kfree(srp->rq->cmd); | ||
| 1741 | blk_put_request(srp->rq); | 1812 | blk_put_request(srp->rq); |
| 1742 | } | 1813 | } |
| 1743 | 1814 | ||
| 1744 | if (srp->res_used) | 1815 | if (srp->res_used) |
| 1745 | sg_unlink_reserve(sfp, srp); | 1816 | sg_unlink_reserve(sfp, srp); |
| 1746 | else | 1817 | else |
| 1747 | sg_remove_scat(req_schp); | 1818 | sg_remove_scat(sfp, req_schp); |
| 1748 | 1819 | ||
| 1749 | sg_remove_request(sfp, srp); | 1820 | sg_remove_request(sfp, srp); |
| 1750 | 1821 | ||
| @@ -1778,8 +1849,9 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
| 1778 | ++blk_size; /* don't know why */ | 1849 | ++blk_size; /* don't know why */ |
| 1779 | /* round request up to next highest SG_SECTOR_SZ byte boundary */ | 1850 | /* round request up to next highest SG_SECTOR_SZ byte boundary */ |
| 1780 | blk_size = ALIGN(blk_size, SG_SECTOR_SZ); | 1851 | blk_size = ALIGN(blk_size, SG_SECTOR_SZ); |
| 1781 | SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", | 1852 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 1782 | buff_size, blk_size)); | 1853 | "sg_build_indirect: buff_size=%d, blk_size=%d\n", |
| 1854 | buff_size, blk_size)); | ||
| 1783 | 1855 | ||
| 1784 | /* N.B. ret_sz carried into this block ... */ | 1856 | /* N.B. ret_sz carried into this block ... */ |
| 1785 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); | 1857 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); |
| @@ -1822,14 +1894,16 @@ retry: | |||
| 1822 | } | 1894 | } |
| 1823 | } | 1895 | } |
| 1824 | 1896 | ||
| 1825 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " | 1897 | SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, |
| 1826 | "ret_sz=%d\n", k, num, ret_sz)); | 1898 | "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n", |
| 1899 | k, num, ret_sz)); | ||
| 1827 | } /* end of for loop */ | 1900 | } /* end of for loop */ |
| 1828 | 1901 | ||
| 1829 | schp->page_order = order; | 1902 | schp->page_order = order; |
| 1830 | schp->k_use_sg = k; | 1903 | schp->k_use_sg = k; |
| 1831 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " | 1904 | SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, |
| 1832 | "rem_sz=%d\n", k, rem_sz)); | 1905 | "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", |
| 1906 | k, rem_sz)); | ||
| 1833 | 1907 | ||
| 1834 | schp->bufflen = blk_size; | 1908 | schp->bufflen = blk_size; |
| 1835 | if (rem_sz > 0) /* must have failed */ | 1909 | if (rem_sz > 0) /* must have failed */ |
| @@ -1846,17 +1920,19 @@ out: | |||
| 1846 | } | 1920 | } |
| 1847 | 1921 | ||
| 1848 | static void | 1922 | static void |
| 1849 | sg_remove_scat(Sg_scatter_hold * schp) | 1923 | sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) |
| 1850 | { | 1924 | { |
| 1851 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); | 1925 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 1926 | "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); | ||
| 1852 | if (schp->pages && schp->sglist_len > 0) { | 1927 | if (schp->pages && schp->sglist_len > 0) { |
| 1853 | if (!schp->dio_in_use) { | 1928 | if (!schp->dio_in_use) { |
| 1854 | int k; | 1929 | int k; |
| 1855 | 1930 | ||
| 1856 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { | 1931 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
| 1857 | SCSI_LOG_TIMEOUT(5, printk( | 1932 | SCSI_LOG_TIMEOUT(5, |
| 1858 | "sg_remove_scat: k=%d, pg=0x%p\n", | 1933 | sg_printk(KERN_INFO, sfp->parentdp, |
| 1859 | k, schp->pages[k])); | 1934 | "sg_remove_scat: k=%d, pg=0x%p\n", |
| 1935 | k, schp->pages[k])); | ||
| 1860 | __free_pages(schp->pages[k], schp->page_order); | 1936 | __free_pages(schp->pages[k], schp->page_order); |
| 1861 | } | 1937 | } |
| 1862 | 1938 | ||
| @@ -1872,8 +1948,9 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | |||
| 1872 | Sg_scatter_hold *schp = &srp->data; | 1948 | Sg_scatter_hold *schp = &srp->data; |
| 1873 | int k, num; | 1949 | int k, num; |
| 1874 | 1950 | ||
| 1875 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", | 1951 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, |
| 1876 | num_read_xfer)); | 1952 | "sg_read_oxfer: num_read_xfer=%d\n", |
| 1953 | num_read_xfer)); | ||
| 1877 | if ((!outp) || (num_read_xfer <= 0)) | 1954 | if ((!outp) || (num_read_xfer <= 0)) |
| 1878 | return 0; | 1955 | return 0; |
| 1879 | 1956 | ||
| @@ -1903,14 +1980,15 @@ sg_build_reserve(Sg_fd * sfp, int req_size) | |||
| 1903 | { | 1980 | { |
| 1904 | Sg_scatter_hold *schp = &sfp->reserve; | 1981 | Sg_scatter_hold *schp = &sfp->reserve; |
| 1905 | 1982 | ||
| 1906 | SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); | 1983 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 1984 | "sg_build_reserve: req_size=%d\n", req_size)); | ||
| 1907 | do { | 1985 | do { |
| 1908 | if (req_size < PAGE_SIZE) | 1986 | if (req_size < PAGE_SIZE) |
| 1909 | req_size = PAGE_SIZE; | 1987 | req_size = PAGE_SIZE; |
| 1910 | if (0 == sg_build_indirect(schp, sfp, req_size)) | 1988 | if (0 == sg_build_indirect(schp, sfp, req_size)) |
| 1911 | return; | 1989 | return; |
| 1912 | else | 1990 | else |
| 1913 | sg_remove_scat(schp); | 1991 | sg_remove_scat(sfp, schp); |
| 1914 | req_size >>= 1; /* divide by 2 */ | 1992 | req_size >>= 1; /* divide by 2 */ |
| 1915 | } while (req_size > (PAGE_SIZE / 2)); | 1993 | } while (req_size > (PAGE_SIZE / 2)); |
| 1916 | } | 1994 | } |
| @@ -1923,7 +2001,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
| 1923 | int k, num, rem; | 2001 | int k, num, rem; |
| 1924 | 2002 | ||
| 1925 | srp->res_used = 1; | 2003 | srp->res_used = 1; |
| 1926 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); | 2004 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
| 2005 | "sg_link_reserve: size=%d\n", size)); | ||
| 1927 | rem = size; | 2006 | rem = size; |
| 1928 | 2007 | ||
| 1929 | num = 1 << (PAGE_SHIFT + rsv_schp->page_order); | 2008 | num = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
| @@ -1941,7 +2020,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
| 1941 | } | 2020 | } |
| 1942 | 2021 | ||
| 1943 | if (k >= rsv_schp->k_use_sg) | 2022 | if (k >= rsv_schp->k_use_sg) |
| 1944 | SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); | 2023 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
| 2024 | "sg_link_reserve: BAD size\n")); | ||
| 1945 | } | 2025 | } |
| 1946 | 2026 | ||
| 1947 | static void | 2027 | static void |
| @@ -1949,8 +2029,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) | |||
| 1949 | { | 2029 | { |
| 1950 | Sg_scatter_hold *req_schp = &srp->data; | 2030 | Sg_scatter_hold *req_schp = &srp->data; |
| 1951 | 2031 | ||
| 1952 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", | 2032 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, |
| 1953 | (int) req_schp->k_use_sg)); | 2033 | "sg_unlink_reserve: req->k_use_sg=%d\n", |
| 2034 | (int) req_schp->k_use_sg)); | ||
| 1954 | req_schp->k_use_sg = 0; | 2035 | req_schp->k_use_sg = 0; |
| 1955 | req_schp->bufflen = 0; | 2036 | req_schp->bufflen = 0; |
| 1956 | req_schp->pages = NULL; | 2037 | req_schp->pages = NULL; |
| @@ -2055,7 +2136,7 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp) | |||
| 2055 | } | 2136 | } |
| 2056 | 2137 | ||
| 2057 | static Sg_fd * | 2138 | static Sg_fd * |
| 2058 | sg_add_sfp(Sg_device * sdp, int dev) | 2139 | sg_add_sfp(Sg_device * sdp) |
| 2059 | { | 2140 | { |
| 2060 | Sg_fd *sfp; | 2141 | Sg_fd *sfp; |
| 2061 | unsigned long iflags; | 2142 | unsigned long iflags; |
| @@ -2063,7 +2144,7 @@ sg_add_sfp(Sg_device * sdp, int dev) | |||
| 2063 | 2144 | ||
| 2064 | sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); | 2145 | sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); |
| 2065 | if (!sfp) | 2146 | if (!sfp) |
| 2066 | return NULL; | 2147 | return ERR_PTR(-ENOMEM); |
| 2067 | 2148 | ||
| 2068 | init_waitqueue_head(&sfp->read_wait); | 2149 | init_waitqueue_head(&sfp->read_wait); |
| 2069 | rwlock_init(&sfp->rq_list_lock); | 2150 | rwlock_init(&sfp->rq_list_lock); |
| @@ -2077,25 +2158,33 @@ sg_add_sfp(Sg_device * sdp, int dev) | |||
| 2077 | sfp->cmd_q = SG_DEF_COMMAND_Q; | 2158 | sfp->cmd_q = SG_DEF_COMMAND_Q; |
| 2078 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; | 2159 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; |
| 2079 | sfp->parentdp = sdp; | 2160 | sfp->parentdp = sdp; |
| 2080 | write_lock_irqsave(&sg_index_lock, iflags); | 2161 | write_lock_irqsave(&sdp->sfd_lock, iflags); |
| 2162 | if (atomic_read(&sdp->detaching)) { | ||
| 2163 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); | ||
| 2164 | return ERR_PTR(-ENODEV); | ||
| 2165 | } | ||
| 2081 | list_add_tail(&sfp->sfd_siblings, &sdp->sfds); | 2166 | list_add_tail(&sfp->sfd_siblings, &sdp->sfds); |
| 2082 | write_unlock_irqrestore(&sg_index_lock, iflags); | 2167 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); |
| 2083 | SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); | 2168 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 2169 | "sg_add_sfp: sfp=0x%p\n", sfp)); | ||
| 2084 | if (unlikely(sg_big_buff != def_reserved_size)) | 2170 | if (unlikely(sg_big_buff != def_reserved_size)) |
| 2085 | sg_big_buff = def_reserved_size; | 2171 | sg_big_buff = def_reserved_size; |
| 2086 | 2172 | ||
| 2087 | bufflen = min_t(int, sg_big_buff, | 2173 | bufflen = min_t(int, sg_big_buff, |
| 2088 | queue_max_sectors(sdp->device->request_queue) * 512); | 2174 | max_sectors_bytes(sdp->device->request_queue)); |
| 2089 | sg_build_reserve(sfp, bufflen); | 2175 | sg_build_reserve(sfp, bufflen); |
| 2090 | SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", | 2176 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
| 2091 | sfp->reserve.bufflen, sfp->reserve.k_use_sg)); | 2177 | "sg_add_sfp: bufflen=%d, k_use_sg=%d\n", |
| 2178 | sfp->reserve.bufflen, | ||
| 2179 | sfp->reserve.k_use_sg)); | ||
| 2092 | 2180 | ||
| 2093 | kref_get(&sdp->d_ref); | 2181 | kref_get(&sdp->d_ref); |
| 2094 | __module_get(THIS_MODULE); | 2182 | __module_get(THIS_MODULE); |
| 2095 | return sfp; | 2183 | return sfp; |
| 2096 | } | 2184 | } |
| 2097 | 2185 | ||
| 2098 | static void sg_remove_sfp_usercontext(struct work_struct *work) | 2186 | static void |
| 2187 | sg_remove_sfp_usercontext(struct work_struct *work) | ||
| 2099 | { | 2188 | { |
| 2100 | struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); | 2189 | struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); |
| 2101 | struct sg_device *sdp = sfp->parentdp; | 2190 | struct sg_device *sdp = sfp->parentdp; |
| @@ -2105,34 +2194,32 @@ static void sg_remove_sfp_usercontext(struct work_struct *work) | |||
| 2105 | sg_finish_rem_req(sfp->headrp); | 2194 | sg_finish_rem_req(sfp->headrp); |
| 2106 | 2195 | ||
| 2107 | if (sfp->reserve.bufflen > 0) { | 2196 | if (sfp->reserve.bufflen > 0) { |
| 2108 | SCSI_LOG_TIMEOUT(6, | 2197 | SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, |
| 2109 | printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", | 2198 | "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", |
| 2110 | (int) sfp->reserve.bufflen, | 2199 | (int) sfp->reserve.bufflen, |
| 2111 | (int) sfp->reserve.k_use_sg)); | 2200 | (int) sfp->reserve.k_use_sg)); |
| 2112 | sg_remove_scat(&sfp->reserve); | 2201 | sg_remove_scat(sfp, &sfp->reserve); |
| 2113 | } | 2202 | } |
| 2114 | 2203 | ||
| 2115 | SCSI_LOG_TIMEOUT(6, | 2204 | SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, |
| 2116 | printk("sg_remove_sfp: %s, sfp=0x%p\n", | 2205 | "sg_remove_sfp: sfp=0x%p\n", sfp)); |
| 2117 | sdp->disk->disk_name, | ||
| 2118 | sfp)); | ||
| 2119 | kfree(sfp); | 2206 | kfree(sfp); |
| 2120 | 2207 | ||
| 2121 | scsi_device_put(sdp->device); | 2208 | scsi_device_put(sdp->device); |
| 2122 | sg_put_dev(sdp); | 2209 | kref_put(&sdp->d_ref, sg_device_destroy); |
| 2123 | module_put(THIS_MODULE); | 2210 | module_put(THIS_MODULE); |
| 2124 | } | 2211 | } |
| 2125 | 2212 | ||
| 2126 | static void sg_remove_sfp(struct kref *kref) | 2213 | static void |
| 2214 | sg_remove_sfp(struct kref *kref) | ||
| 2127 | { | 2215 | { |
| 2128 | struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); | 2216 | struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); |
| 2129 | struct sg_device *sdp = sfp->parentdp; | 2217 | struct sg_device *sdp = sfp->parentdp; |
| 2130 | unsigned long iflags; | 2218 | unsigned long iflags; |
| 2131 | 2219 | ||
| 2132 | write_lock_irqsave(&sg_index_lock, iflags); | 2220 | write_lock_irqsave(&sdp->sfd_lock, iflags); |
| 2133 | list_del(&sfp->sfd_siblings); | 2221 | list_del(&sfp->sfd_siblings); |
| 2134 | write_unlock_irqrestore(&sg_index_lock, iflags); | 2222 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); |
| 2135 | wake_up_interruptible(&sdp->o_excl_wait); | ||
| 2136 | 2223 | ||
| 2137 | INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); | 2224 | INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); |
| 2138 | schedule_work(&sfp->ew.work); | 2225 | schedule_work(&sfp->ew.work); |
| @@ -2183,7 +2270,8 @@ static Sg_device *sg_lookup_dev(int dev) | |||
| 2183 | return idr_find(&sg_index_idr, dev); | 2270 | return idr_find(&sg_index_idr, dev); |
| 2184 | } | 2271 | } |
| 2185 | 2272 | ||
| 2186 | static Sg_device *sg_get_dev(int dev) | 2273 | static Sg_device * |
| 2274 | sg_get_dev(int dev) | ||
| 2187 | { | 2275 | { |
| 2188 | struct sg_device *sdp; | 2276 | struct sg_device *sdp; |
| 2189 | unsigned long flags; | 2277 | unsigned long flags; |
| @@ -2192,8 +2280,8 @@ static Sg_device *sg_get_dev(int dev) | |||
| 2192 | sdp = sg_lookup_dev(dev); | 2280 | sdp = sg_lookup_dev(dev); |
| 2193 | if (!sdp) | 2281 | if (!sdp) |
| 2194 | sdp = ERR_PTR(-ENXIO); | 2282 | sdp = ERR_PTR(-ENXIO); |
| 2195 | else if (sdp->detached) { | 2283 | else if (atomic_read(&sdp->detaching)) { |
| 2196 | /* If sdp->detached, then the refcount may already be 0, in | 2284 | /* If sdp->detaching, then the refcount may already be 0, in |
| 2197 | * which case it would be a bug to do kref_get(). | 2285 | * which case it would be a bug to do kref_get(). |
| 2198 | */ | 2286 | */ |
| 2199 | sdp = ERR_PTR(-ENODEV); | 2287 | sdp = ERR_PTR(-ENODEV); |
| @@ -2204,11 +2292,6 @@ static Sg_device *sg_get_dev(int dev) | |||
| 2204 | return sdp; | 2292 | return sdp; |
| 2205 | } | 2293 | } |
| 2206 | 2294 | ||
| 2207 | static void sg_put_dev(struct sg_device *sdp) | ||
| 2208 | { | ||
| 2209 | kref_put(&sdp->d_ref, sg_device_destroy); | ||
| 2210 | } | ||
| 2211 | |||
| 2212 | #ifdef CONFIG_SCSI_PROC_FS | 2295 | #ifdef CONFIG_SCSI_PROC_FS |
| 2213 | 2296 | ||
| 2214 | static struct proc_dir_entry *sg_proc_sgp = NULL; | 2297 | static struct proc_dir_entry *sg_proc_sgp = NULL; |
| @@ -2425,8 +2508,7 @@ static int sg_proc_single_open_version(struct inode *inode, struct file *file) | |||
| 2425 | 2508 | ||
| 2426 | static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) | 2509 | static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) |
| 2427 | { | 2510 | { |
| 2428 | seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" | 2511 | seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n"); |
| 2429 | "online\n"); | ||
| 2430 | return 0; | 2512 | return 0; |
| 2431 | } | 2513 | } |
| 2432 | 2514 | ||
| @@ -2482,16 +2564,19 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) | |||
| 2482 | 2564 | ||
| 2483 | read_lock_irqsave(&sg_index_lock, iflags); | 2565 | read_lock_irqsave(&sg_index_lock, iflags); |
| 2484 | sdp = it ? sg_lookup_dev(it->index) : NULL; | 2566 | sdp = it ? sg_lookup_dev(it->index) : NULL; |
| 2485 | if (sdp && (scsidp = sdp->device) && (!sdp->detached)) | 2567 | if ((NULL == sdp) || (NULL == sdp->device) || |
| 2486 | seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", | 2568 | (atomic_read(&sdp->detaching))) |
| 2569 | seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); | ||
| 2570 | else { | ||
| 2571 | scsidp = sdp->device; | ||
| 2572 | seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n", | ||
| 2487 | scsidp->host->host_no, scsidp->channel, | 2573 | scsidp->host->host_no, scsidp->channel, |
| 2488 | scsidp->id, scsidp->lun, (int) scsidp->type, | 2574 | scsidp->id, scsidp->lun, (int) scsidp->type, |
| 2489 | 1, | 2575 | 1, |
| 2490 | (int) scsidp->queue_depth, | 2576 | (int) scsidp->queue_depth, |
| 2491 | (int) scsidp->device_busy, | 2577 | (int) atomic_read(&scsidp->device_busy), |
| 2492 | (int) scsi_device_online(scsidp)); | 2578 | (int) scsi_device_online(scsidp)); |
| 2493 | else | 2579 | } |
| 2494 | seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); | ||
| 2495 | read_unlock_irqrestore(&sg_index_lock, iflags); | 2580 | read_unlock_irqrestore(&sg_index_lock, iflags); |
| 2496 | return 0; | 2581 | return 0; |
| 2497 | } | 2582 | } |
| @@ -2510,11 +2595,12 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) | |||
| 2510 | 2595 | ||
| 2511 | read_lock_irqsave(&sg_index_lock, iflags); | 2596 | read_lock_irqsave(&sg_index_lock, iflags); |
| 2512 | sdp = it ? sg_lookup_dev(it->index) : NULL; | 2597 | sdp = it ? sg_lookup_dev(it->index) : NULL; |
| 2513 | if (sdp && (scsidp = sdp->device) && (!sdp->detached)) | 2598 | scsidp = sdp ? sdp->device : NULL; |
| 2599 | if (sdp && scsidp && (!atomic_read(&sdp->detaching))) | ||
| 2514 | seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", | 2600 | seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", |
| 2515 | scsidp->vendor, scsidp->model, scsidp->rev); | 2601 | scsidp->vendor, scsidp->model, scsidp->rev); |
| 2516 | else | 2602 | else |
| 2517 | seq_printf(s, "<no active device>\n"); | 2603 | seq_puts(s, "<no active device>\n"); |
| 2518 | read_unlock_irqrestore(&sg_index_lock, iflags); | 2604 | read_unlock_irqrestore(&sg_index_lock, iflags); |
| 2519 | return 0; | 2605 | return 0; |
| 2520 | } | 2606 | } |
| @@ -2559,12 +2645,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) | |||
| 2559 | else | 2645 | else |
| 2560 | cp = " "; | 2646 | cp = " "; |
| 2561 | } | 2647 | } |
| 2562 | seq_printf(s, cp); | 2648 | seq_puts(s, cp); |
| 2563 | blen = srp->data.bufflen; | 2649 | blen = srp->data.bufflen; |
| 2564 | usg = srp->data.k_use_sg; | 2650 | usg = srp->data.k_use_sg; |
| 2565 | seq_printf(s, srp->done ? | 2651 | seq_puts(s, srp->done ? |
| 2566 | ((1 == srp->done) ? "rcv:" : "fin:") | 2652 | ((1 == srp->done) ? "rcv:" : "fin:") |
| 2567 | : "act:"); | 2653 | : "act:"); |
| 2568 | seq_printf(s, " id=%d blen=%d", | 2654 | seq_printf(s, " id=%d blen=%d", |
| 2569 | srp->header.pack_id, blen); | 2655 | srp->header.pack_id, blen); |
| 2570 | if (srp->done) | 2656 | if (srp->done) |
| @@ -2580,7 +2666,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) | |||
| 2580 | (int) srp->data.cmd_opcode); | 2666 | (int) srp->data.cmd_opcode); |
| 2581 | } | 2667 | } |
| 2582 | if (0 == m) | 2668 | if (0 == m) |
| 2583 | seq_printf(s, " No requests active\n"); | 2669 | seq_puts(s, " No requests active\n"); |
| 2584 | read_unlock(&fp->rq_list_lock); | 2670 | read_unlock(&fp->rq_list_lock); |
| 2585 | } | 2671 | } |
| 2586 | } | 2672 | } |
| @@ -2596,31 +2682,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) | |||
| 2596 | Sg_device *sdp; | 2682 | Sg_device *sdp; |
| 2597 | unsigned long iflags; | 2683 | unsigned long iflags; |
| 2598 | 2684 | ||
| 2599 | if (it && (0 == it->index)) { | 2685 | if (it && (0 == it->index)) |
| 2600 | seq_printf(s, "max_active_device=%d(origin 1)\n", | 2686 | seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", |
| 2601 | (int)it->max); | 2687 | (int)it->max, sg_big_buff); |
| 2602 | seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); | ||
| 2603 | } | ||
| 2604 | 2688 | ||
| 2605 | read_lock_irqsave(&sg_index_lock, iflags); | 2689 | read_lock_irqsave(&sg_index_lock, iflags); |
| 2606 | sdp = it ? sg_lookup_dev(it->index) : NULL; | 2690 | sdp = it ? sg_lookup_dev(it->index) : NULL; |
| 2607 | if (sdp && !list_empty(&sdp->sfds)) { | 2691 | if (NULL == sdp) |
| 2608 | struct scsi_device *scsidp = sdp->device; | 2692 | goto skip; |
| 2609 | 2693 | read_lock(&sdp->sfd_lock); | |
| 2694 | if (!list_empty(&sdp->sfds)) { | ||
| 2610 | seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); | 2695 | seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); |
| 2611 | if (sdp->detached) | 2696 | if (atomic_read(&sdp->detaching)) |
| 2612 | seq_printf(s, "detached pending close "); | 2697 | seq_puts(s, "detaching pending close "); |
| 2613 | else | 2698 | else if (sdp->device) { |
| 2614 | seq_printf | 2699 | struct scsi_device *scsidp = sdp->device; |
| 2615 | (s, "scsi%d chan=%d id=%d lun=%d em=%d", | 2700 | |
| 2616 | scsidp->host->host_no, | 2701 | seq_printf(s, "%d:%d:%d:%llu em=%d", |
| 2617 | scsidp->channel, scsidp->id, | 2702 | scsidp->host->host_no, |
| 2618 | scsidp->lun, | 2703 | scsidp->channel, scsidp->id, |
| 2619 | scsidp->host->hostt->emulated); | 2704 | scsidp->lun, |
| 2620 | seq_printf(s, " sg_tablesize=%d excl=%d\n", | 2705 | scsidp->host->hostt->emulated); |
| 2621 | sdp->sg_tablesize, get_exclude(sdp)); | 2706 | } |
| 2707 | seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", | ||
| 2708 | sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); | ||
| 2622 | sg_proc_debug_helper(s, sdp); | 2709 | sg_proc_debug_helper(s, sdp); |
| 2623 | } | 2710 | } |
| 2711 | read_unlock(&sdp->sfd_lock); | ||
| 2712 | skip: | ||
| 2624 | read_unlock_irqrestore(&sg_index_lock, iflags); | 2713 | read_unlock_irqrestore(&sg_index_lock, iflags); |
| 2625 | return 0; | 2714 | return 0; |
| 2626 | } | 2715 | } |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 93cbd36c990b..7eeb93627beb 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
| @@ -292,8 +292,8 @@ do_tur: | |||
| 292 | if (!cd->tur_changed) { | 292 | if (!cd->tur_changed) { |
| 293 | if (cd->get_event_changed) { | 293 | if (cd->get_event_changed) { |
| 294 | if (cd->tur_mismatch++ > 8) { | 294 | if (cd->tur_mismatch++ > 8) { |
| 295 | sdev_printk(KERN_WARNING, cd->device, | 295 | sr_printk(KERN_WARNING, cd, |
| 296 | "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n"); | 296 | "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n"); |
| 297 | cd->ignore_get_event = true; | 297 | cd->ignore_get_event = true; |
| 298 | } | 298 | } |
| 299 | } else { | 299 | } else { |
| @@ -322,7 +322,7 @@ static int sr_done(struct scsi_cmnd *SCpnt) | |||
| 322 | struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk); | 322 | struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk); |
| 323 | 323 | ||
| 324 | #ifdef DEBUG | 324 | #ifdef DEBUG |
| 325 | printk("sr.c done: %x\n", result); | 325 | scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result); |
| 326 | #endif | 326 | #endif |
| 327 | 327 | ||
| 328 | /* | 328 | /* |
| @@ -385,10 +385,9 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) | |||
| 385 | int block = 0, this_count, s_size; | 385 | int block = 0, this_count, s_size; |
| 386 | struct scsi_cd *cd; | 386 | struct scsi_cd *cd; |
| 387 | struct request *rq = SCpnt->request; | 387 | struct request *rq = SCpnt->request; |
| 388 | struct scsi_device *sdp = SCpnt->device; | ||
| 389 | int ret; | 388 | int ret; |
| 390 | 389 | ||
| 391 | ret = scsi_setup_fs_cmnd(sdp, rq); | 390 | ret = scsi_init_io(SCpnt, GFP_ATOMIC); |
| 392 | if (ret != BLKPREP_OK) | 391 | if (ret != BLKPREP_OK) |
| 393 | goto out; | 392 | goto out; |
| 394 | SCpnt = rq->special; | 393 | SCpnt = rq->special; |
| @@ -398,13 +397,14 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) | |||
| 398 | * is used for a killable error condition */ | 397 | * is used for a killable error condition */ |
| 399 | ret = BLKPREP_KILL; | 398 | ret = BLKPREP_KILL; |
| 400 | 399 | ||
| 401 | SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %s, block = %d\n", | 400 | SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, |
| 402 | cd->disk->disk_name, block)); | 401 | "Doing sr request, block = %d\n", block)); |
| 403 | 402 | ||
| 404 | if (!cd->device || !scsi_device_online(cd->device)) { | 403 | if (!cd->device || !scsi_device_online(cd->device)) { |
| 405 | SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n", | 404 | SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, |
| 406 | blk_rq_sectors(rq))); | 405 | "Finishing %u sectors\n", blk_rq_sectors(rq))); |
| 407 | SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); | 406 | SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, |
| 407 | "Retry with 0x%p\n", SCpnt)); | ||
| 408 | goto out; | 408 | goto out; |
| 409 | } | 409 | } |
| 410 | 410 | ||
| @@ -425,7 +425,8 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) | |||
| 425 | if (!in_interrupt()) | 425 | if (!in_interrupt()) |
| 426 | sr_set_blocklength(cd, 2048); | 426 | sr_set_blocklength(cd, 2048); |
| 427 | else | 427 | else |
| 428 | printk("sr: can't switch blocksize: in interrupt\n"); | 428 | scmd_printk(KERN_INFO, SCpnt, |
| 429 | "can't switch blocksize: in interrupt\n"); | ||
| 429 | } | 430 | } |
| 430 | 431 | ||
| 431 | if (s_size != 512 && s_size != 1024 && s_size != 2048) { | 432 | if (s_size != 512 && s_size != 1024 && s_size != 2048) { |
| @@ -434,14 +435,12 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) | |||
| 434 | } | 435 | } |
| 435 | 436 | ||
| 436 | if (rq_data_dir(rq) == WRITE) { | 437 | if (rq_data_dir(rq) == WRITE) { |
| 437 | if (!cd->device->writeable) | 438 | if (!cd->writeable) |
| 438 | goto out; | 439 | goto out; |
| 439 | SCpnt->cmnd[0] = WRITE_10; | 440 | SCpnt->cmnd[0] = WRITE_10; |
| 440 | SCpnt->sc_data_direction = DMA_TO_DEVICE; | 441 | cd->cdi.media_written = 1; |
| 441 | cd->cdi.media_written = 1; | ||
| 442 | } else if (rq_data_dir(rq) == READ) { | 442 | } else if (rq_data_dir(rq) == READ) { |
| 443 | SCpnt->cmnd[0] = READ_10; | 443 | SCpnt->cmnd[0] = READ_10; |
| 444 | SCpnt->sc_data_direction = DMA_FROM_DEVICE; | ||
| 445 | } else { | 444 | } else { |
| 446 | blk_dump_rq_flags(rq, "Unknown sr command"); | 445 | blk_dump_rq_flags(rq, "Unknown sr command"); |
| 447 | goto out; | 446 | goto out; |
| @@ -475,11 +474,11 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) | |||
| 475 | this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); | 474 | this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); |
| 476 | 475 | ||
| 477 | 476 | ||
| 478 | SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n", | 477 | SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, |
| 479 | cd->cdi.name, | 478 | "%s %d/%u 512 byte blocks.\n", |
| 480 | (rq_data_dir(rq) == WRITE) ? | 479 | (rq_data_dir(rq) == WRITE) ? |
| 481 | "writing" : "reading", | 480 | "writing" : "reading", |
| 482 | this_count, blk_rq_sectors(rq))); | 481 | this_count, blk_rq_sectors(rq))); |
| 483 | 482 | ||
| 484 | SCpnt->cmnd[1] = 0; | 483 | SCpnt->cmnd[1] = 0; |
| 485 | block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); | 484 | block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); |
| @@ -810,8 +809,8 @@ static void get_sectorsize(struct scsi_cd *cd) | |||
| 810 | case 512: | 809 | case 512: |
| 811 | break; | 810 | break; |
| 812 | default: | 811 | default: |
| 813 | printk("%s: unsupported sector size %d.\n", | 812 | sr_printk(KERN_INFO, cd, |
| 814 | cd->cdi.name, sector_size); | 813 | "unsupported sector size %d.", sector_size); |
| 815 | cd->capacity = 0; | 814 | cd->capacity = 0; |
| 816 | } | 815 | } |
| 817 | 816 | ||
| @@ -853,7 +852,7 @@ static void get_capabilities(struct scsi_cd *cd) | |||
| 853 | /* allocate transfer buffer */ | 852 | /* allocate transfer buffer */ |
| 854 | buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); | 853 | buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); |
| 855 | if (!buffer) { | 854 | if (!buffer) { |
| 856 | printk(KERN_ERR "sr: out of memory.\n"); | 855 | sr_printk(KERN_ERR, cd, "out of memory.\n"); |
| 857 | return; | 856 | return; |
| 858 | } | 857 | } |
| 859 | 858 | ||
| @@ -872,7 +871,7 @@ static void get_capabilities(struct scsi_cd *cd) | |||
| 872 | CDC_SELECT_DISC | CDC_SELECT_SPEED | | 871 | CDC_SELECT_DISC | CDC_SELECT_SPEED | |
| 873 | CDC_MRW | CDC_MRW_W | CDC_RAM); | 872 | CDC_MRW | CDC_MRW_W | CDC_RAM); |
| 874 | kfree(buffer); | 873 | kfree(buffer); |
| 875 | printk("%s: scsi-1 drive\n", cd->cdi.name); | 874 | sr_printk(KERN_INFO, cd, "scsi-1 drive"); |
| 876 | return; | 875 | return; |
| 877 | } | 876 | } |
| 878 | 877 | ||
| @@ -881,22 +880,23 @@ static void get_capabilities(struct scsi_cd *cd) | |||
| 881 | cd->readcd_known = 1; | 880 | cd->readcd_known = 1; |
| 882 | cd->readcd_cdda = buffer[n + 5] & 0x01; | 881 | cd->readcd_cdda = buffer[n + 5] & 0x01; |
| 883 | /* print some capability bits */ | 882 | /* print some capability bits */ |
| 884 | printk("%s: scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", cd->cdi.name, | 883 | sr_printk(KERN_INFO, cd, |
| 885 | ((buffer[n + 14] << 8) + buffer[n + 15]) / 176, | 884 | "scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", |
| 886 | cd->cdi.speed, | 885 | ((buffer[n + 14] << 8) + buffer[n + 15]) / 176, |
| 887 | buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */ | 886 | cd->cdi.speed, |
| 888 | buffer[n + 3] & 0x20 ? "dvd-ram " : "", | 887 | buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */ |
| 889 | buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */ | 888 | buffer[n + 3] & 0x20 ? "dvd-ram " : "", |
| 890 | buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */ | 889 | buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */ |
| 891 | buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */ | 890 | buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */ |
| 892 | loadmech[buffer[n + 6] >> 5]); | 891 | buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */ |
| 892 | loadmech[buffer[n + 6] >> 5]); | ||
| 893 | if ((buffer[n + 6] >> 5) == 0) | 893 | if ((buffer[n + 6] >> 5) == 0) |
| 894 | /* caddy drives can't close tray... */ | 894 | /* caddy drives can't close tray... */ |
| 895 | cd->cdi.mask |= CDC_CLOSE_TRAY; | 895 | cd->cdi.mask |= CDC_CLOSE_TRAY; |
| 896 | if ((buffer[n + 2] & 0x8) == 0) | 896 | if ((buffer[n + 2] & 0x8) == 0) |
| 897 | /* not a DVD drive */ | 897 | /* not a DVD drive */ |
| 898 | cd->cdi.mask |= CDC_DVD; | 898 | cd->cdi.mask |= CDC_DVD; |
| 899 | if ((buffer[n + 3] & 0x20) == 0) | 899 | if ((buffer[n + 3] & 0x20) == 0) |
| 900 | /* can't write DVD-RAM media */ | 900 | /* can't write DVD-RAM media */ |
| 901 | cd->cdi.mask |= CDC_DVD_RAM; | 901 | cd->cdi.mask |= CDC_DVD_RAM; |
| 902 | if ((buffer[n + 3] & 0x10) == 0) | 902 | if ((buffer[n + 3] & 0x10) == 0) |
| @@ -927,7 +927,7 @@ static void get_capabilities(struct scsi_cd *cd) | |||
| 927 | */ | 927 | */ |
| 928 | if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != | 928 | if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) != |
| 929 | (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { | 929 | (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) { |
| 930 | cd->device->writeable = 1; | 930 | cd->writeable = 1; |
| 931 | } | 931 | } |
| 932 | 932 | ||
| 933 | kfree(buffer); | 933 | kfree(buffer); |
| @@ -935,7 +935,7 @@ static void get_capabilities(struct scsi_cd *cd) | |||
| 935 | 935 | ||
| 936 | /* | 936 | /* |
| 937 | * sr_packet() is the entry point for the generic commands generated | 937 | * sr_packet() is the entry point for the generic commands generated |
| 938 | * by the Uniform CD-ROM layer. | 938 | * by the Uniform CD-ROM layer. |
| 939 | */ | 939 | */ |
| 940 | static int sr_packet(struct cdrom_device_info *cdi, | 940 | static int sr_packet(struct cdrom_device_info *cdi, |
| 941 | struct packet_command *cgc) | 941 | struct packet_command *cgc) |
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 37c8f6b17510..1d1f6f416c59 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h | |||
| @@ -36,6 +36,7 @@ typedef struct scsi_cd { | |||
| 36 | struct scsi_device *device; | 36 | struct scsi_device *device; |
| 37 | unsigned int vendor; /* vendor code, see sr_vendor.c */ | 37 | unsigned int vendor; /* vendor code, see sr_vendor.c */ |
| 38 | unsigned long ms_offset; /* for reading multisession-CD's */ | 38 | unsigned long ms_offset; /* for reading multisession-CD's */ |
| 39 | unsigned writeable : 1; | ||
| 39 | unsigned use:1; /* is this device still supportable */ | 40 | unsigned use:1; /* is this device still supportable */ |
| 40 | unsigned xa_flag:1; /* CD has XA sectors ? */ | 41 | unsigned xa_flag:1; /* CD has XA sectors ? */ |
| 41 | unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ | 42 | unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */ |
| @@ -55,6 +56,10 @@ typedef struct scsi_cd { | |||
| 55 | struct gendisk *disk; | 56 | struct gendisk *disk; |
| 56 | } Scsi_CD; | 57 | } Scsi_CD; |
| 57 | 58 | ||
| 59 | #define sr_printk(prefix, cd, fmt, a...) \ | ||
| 60 | sdev_printk(prefix, (cd)->device, "[%s] " fmt, \ | ||
| 61 | (cd)->cdi.name, ##a) | ||
| 62 | |||
| 58 | int sr_do_ioctl(Scsi_CD *, struct packet_command *); | 63 | int sr_do_ioctl(Scsi_CD *, struct packet_command *); |
| 59 | 64 | ||
| 60 | int sr_lock_door(struct cdrom_device_info *, int); | 65 | int sr_lock_door(struct cdrom_device_info *, int); |
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index a3911c39ea50..6389fcff12ec 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c | |||
| @@ -36,7 +36,6 @@ module_param(xa_test, int, S_IRUGO | S_IWUSR); | |||
| 36 | * the status of the unchecked_isa_dma flag in the host structure */ | 36 | * the status of the unchecked_isa_dma flag in the host structure */ |
| 37 | #define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0) | 37 | #define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0) |
| 38 | 38 | ||
| 39 | |||
| 40 | static int sr_read_tochdr(struct cdrom_device_info *cdi, | 39 | static int sr_read_tochdr(struct cdrom_device_info *cdi, |
| 41 | struct cdrom_tochdr *tochdr) | 40 | struct cdrom_tochdr *tochdr) |
| 42 | { | 41 | { |
| @@ -219,7 +218,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 219 | case UNIT_ATTENTION: | 218 | case UNIT_ATTENTION: |
| 220 | SDev->changed = 1; | 219 | SDev->changed = 1; |
| 221 | if (!cgc->quiet) | 220 | if (!cgc->quiet) |
| 222 | printk(KERN_INFO "%s: disc change detected.\n", cd->cdi.name); | 221 | sr_printk(KERN_INFO, cd, |
| 222 | "disc change detected.\n"); | ||
| 223 | if (retries++ < 10) | 223 | if (retries++ < 10) |
| 224 | goto retry; | 224 | goto retry; |
| 225 | err = -ENOMEDIUM; | 225 | err = -ENOMEDIUM; |
| @@ -229,7 +229,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 229 | sshdr.ascq == 0x01) { | 229 | sshdr.ascq == 0x01) { |
| 230 | /* sense: Logical unit is in process of becoming ready */ | 230 | /* sense: Logical unit is in process of becoming ready */ |
| 231 | if (!cgc->quiet) | 231 | if (!cgc->quiet) |
| 232 | printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name); | 232 | sr_printk(KERN_INFO, cd, |
| 233 | "CDROM not ready yet.\n"); | ||
| 233 | if (retries++ < 10) { | 234 | if (retries++ < 10) { |
| 234 | /* sleep 2 sec and try again */ | 235 | /* sleep 2 sec and try again */ |
| 235 | ssleep(2); | 236 | ssleep(2); |
| @@ -241,7 +242,9 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 241 | } | 242 | } |
| 242 | } | 243 | } |
| 243 | if (!cgc->quiet) | 244 | if (!cgc->quiet) |
| 244 | printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name); | 245 | sr_printk(KERN_INFO, cd, |
| 246 | "CDROM not ready. Make sure there " | ||
| 247 | "is a disc in the drive.\n"); | ||
| 245 | #ifdef DEBUG | 248 | #ifdef DEBUG |
| 246 | scsi_print_sense_hdr("sr", &sshdr); | 249 | scsi_print_sense_hdr("sr", &sshdr); |
| 247 | #endif | 250 | #endif |
| @@ -259,7 +262,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 259 | #endif | 262 | #endif |
| 260 | break; | 263 | break; |
| 261 | default: | 264 | default: |
| 262 | printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name); | 265 | sr_printk(KERN_ERR, cd, |
| 266 | "CDROM (ioctl) error, command: "); | ||
| 263 | __scsi_print_command(cgc->cmd); | 267 | __scsi_print_command(cgc->cmd); |
| 264 | scsi_print_sense_hdr("sr", &sshdr); | 268 | scsi_print_sense_hdr("sr", &sshdr); |
| 265 | err = -EIO; | 269 | err = -EIO; |
| @@ -491,8 +495,8 @@ static int sr_read_cd(Scsi_CD *cd, unsigned char *dest, int lba, int format, int | |||
| 491 | struct packet_command cgc; | 495 | struct packet_command cgc; |
| 492 | 496 | ||
| 493 | #ifdef DEBUG | 497 | #ifdef DEBUG |
| 494 | printk("%s: sr_read_cd lba=%d format=%d blksize=%d\n", | 498 | sr_printk(KERN_INFO, cd, "sr_read_cd lba=%d format=%d blksize=%d\n", |
| 495 | cd->cdi.name, lba, format, blksize); | 499 | lba, format, blksize); |
| 496 | #endif | 500 | #endif |
| 497 | 501 | ||
| 498 | memset(&cgc, 0, sizeof(struct packet_command)); | 502 | memset(&cgc, 0, sizeof(struct packet_command)); |
| @@ -539,7 +543,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest | |||
| 539 | if (-EDRIVE_CANT_DO_THIS != rc) | 543 | if (-EDRIVE_CANT_DO_THIS != rc) |
| 540 | return rc; | 544 | return rc; |
| 541 | cd->readcd_known = 0; | 545 | cd->readcd_known = 0; |
| 542 | printk("CDROM does'nt support READ CD (0xbe) command\n"); | 546 | sr_printk(KERN_INFO, cd, |
| 547 | "CDROM does'nt support READ CD (0xbe) command\n"); | ||
| 543 | /* fall & retry the other way */ | 548 | /* fall & retry the other way */ |
| 544 | } | 549 | } |
| 545 | /* ... if this fails, we switch the blocksize using MODE SELECT */ | 550 | /* ... if this fails, we switch the blocksize using MODE SELECT */ |
| @@ -548,7 +553,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest | |||
| 548 | return rc; | 553 | return rc; |
| 549 | } | 554 | } |
| 550 | #ifdef DEBUG | 555 | #ifdef DEBUG |
| 551 | printk("%s: sr_read_sector lba=%d blksize=%d\n", cd->cdi.name, lba, blksize); | 556 | sr_printk(KERN_INFO, cd, "sr_read_sector lba=%d blksize=%d\n", |
| 557 | lba, blksize); | ||
| 552 | #endif | 558 | #endif |
| 553 | 559 | ||
| 554 | memset(&cgc, 0, sizeof(struct packet_command)); | 560 | memset(&cgc, 0, sizeof(struct packet_command)); |
| @@ -592,7 +598,7 @@ int sr_is_xa(Scsi_CD *cd) | |||
| 592 | } | 598 | } |
| 593 | kfree(raw_sector); | 599 | kfree(raw_sector); |
| 594 | #ifdef DEBUG | 600 | #ifdef DEBUG |
| 595 | printk("%s: sr_is_xa: %d\n", cd->cdi.name, is_xa); | 601 | sr_printk(KERN_INFO, cd, "sr_is_xa: %d\n", is_xa); |
| 596 | #endif | 602 | #endif |
| 597 | return is_xa; | 603 | return is_xa; |
| 598 | } | 604 | } |
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 92cc2efb25d7..11a238cb2222 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c | |||
| @@ -123,7 +123,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) | |||
| 123 | return -ENOMEM; | 123 | return -ENOMEM; |
| 124 | 124 | ||
| 125 | #ifdef DEBUG | 125 | #ifdef DEBUG |
| 126 | printk("%s: MODE SELECT 0x%x/%d\n", cd->cdi.name, density, blocklength); | 126 | sr_printk(KERN_INFO, cd, "MODE SELECT 0x%x/%d\n", density, blocklength); |
| 127 | #endif | 127 | #endif |
| 128 | memset(&cgc, 0, sizeof(struct packet_command)); | 128 | memset(&cgc, 0, sizeof(struct packet_command)); |
| 129 | cgc.cmd[0] = MODE_SELECT; | 129 | cgc.cmd[0] = MODE_SELECT; |
| @@ -144,8 +144,9 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) | |||
| 144 | } | 144 | } |
| 145 | #ifdef DEBUG | 145 | #ifdef DEBUG |
| 146 | else | 146 | else |
| 147 | printk("%s: switching blocklength to %d bytes failed\n", | 147 | sr_printk(KERN_INFO, cd, |
| 148 | cd->cdi.name, blocklength); | 148 | "switching blocklength to %d bytes failed\n", |
| 149 | blocklength); | ||
| 149 | #endif | 150 | #endif |
| 150 | kfree(buffer); | 151 | kfree(buffer); |
| 151 | return rc; | 152 | return rc; |
| @@ -190,8 +191,8 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 190 | if (rc != 0) | 191 | if (rc != 0) |
| 191 | break; | 192 | break; |
| 192 | if ((buffer[0] << 8) + buffer[1] < 0x0a) { | 193 | if ((buffer[0] << 8) + buffer[1] < 0x0a) { |
| 193 | printk(KERN_INFO "%s: Hmm, seems the drive " | 194 | sr_printk(KERN_INFO, cd, "Hmm, seems the drive " |
| 194 | "doesn't support multisession CD's\n", cd->cdi.name); | 195 | "doesn't support multisession CD's\n"); |
| 195 | no_multi = 1; | 196 | no_multi = 1; |
| 196 | break; | 197 | break; |
| 197 | } | 198 | } |
| @@ -218,9 +219,9 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 218 | if (rc != 0) | 219 | if (rc != 0) |
| 219 | break; | 220 | break; |
| 220 | if (buffer[14] != 0 && buffer[14] != 0xb0) { | 221 | if (buffer[14] != 0 && buffer[14] != 0xb0) { |
| 221 | printk(KERN_INFO "%s: Hmm, seems the cdrom " | 222 | sr_printk(KERN_INFO, cd, "Hmm, seems the cdrom " |
| 222 | "doesn't support multisession CD's\n", | 223 | "doesn't support multisession CD's\n"); |
| 223 | cd->cdi.name); | 224 | |
| 224 | no_multi = 1; | 225 | no_multi = 1; |
| 225 | break; | 226 | break; |
| 226 | } | 227 | } |
| @@ -245,9 +246,8 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 245 | cgc.timeout = VENDOR_TIMEOUT; | 246 | cgc.timeout = VENDOR_TIMEOUT; |
| 246 | rc = sr_do_ioctl(cd, &cgc); | 247 | rc = sr_do_ioctl(cd, &cgc); |
| 247 | if (rc == -EINVAL) { | 248 | if (rc == -EINVAL) { |
| 248 | printk(KERN_INFO "%s: Hmm, seems the drive " | 249 | sr_printk(KERN_INFO, cd, "Hmm, seems the drive " |
| 249 | "doesn't support multisession CD's\n", | 250 | "doesn't support multisession CD's\n"); |
| 250 | cd->cdi.name); | ||
| 251 | no_multi = 1; | 251 | no_multi = 1; |
| 252 | break; | 252 | break; |
| 253 | } | 253 | } |
| @@ -277,8 +277,8 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 277 | break; | 277 | break; |
| 278 | } | 278 | } |
| 279 | if ((rc = buffer[2]) == 0) { | 279 | if ((rc = buffer[2]) == 0) { |
| 280 | printk(KERN_WARNING | 280 | sr_printk(KERN_WARNING, cd, |
| 281 | "%s: No finished session\n", cd->cdi.name); | 281 | "No finished session\n"); |
| 282 | break; | 282 | break; |
| 283 | } | 283 | } |
| 284 | cgc.cmd[0] = READ_TOC; /* Read TOC */ | 284 | cgc.cmd[0] = READ_TOC; /* Read TOC */ |
| @@ -301,9 +301,9 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 301 | 301 | ||
| 302 | default: | 302 | default: |
| 303 | /* should not happen */ | 303 | /* should not happen */ |
| 304 | printk(KERN_WARNING | 304 | sr_printk(KERN_WARNING, cd, |
| 305 | "%s: unknown vendor code (%i), not initialized ?\n", | 305 | "unknown vendor code (%i), not initialized ?\n", |
| 306 | cd->cdi.name, cd->vendor); | 306 | cd->vendor); |
| 307 | sector = 0; | 307 | sector = 0; |
| 308 | no_multi = 1; | 308 | no_multi = 1; |
| 309 | break; | 309 | break; |
| @@ -321,8 +321,8 @@ int sr_cd_check(struct cdrom_device_info *cdi) | |||
| 321 | 321 | ||
| 322 | #ifdef DEBUG | 322 | #ifdef DEBUG |
| 323 | if (sector) | 323 | if (sector) |
| 324 | printk(KERN_DEBUG "%s: multisession offset=%lu\n", | 324 | sr_printk(KERN_DEBUG, cd, "multisession offset=%lu\n", |
| 325 | cd->cdi.name, sector); | 325 | sector); |
| 326 | #endif | 326 | #endif |
| 327 | kfree(buffer); | 327 | kfree(buffer); |
| 328 | return rc; | 328 | return rc; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 14eb4b256a03..aff9689de0f7 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -58,11 +58,11 @@ static const char *verstr = "20101219"; | |||
| 58 | is defined and non-zero. */ | 58 | is defined and non-zero. */ |
| 59 | #define DEBUG 0 | 59 | #define DEBUG 0 |
| 60 | 60 | ||
| 61 | #define ST_DEB_MSG KERN_NOTICE | ||
| 61 | #if DEBUG | 62 | #if DEBUG |
| 62 | /* The message level for the debug messages is currently set to KERN_NOTICE | 63 | /* The message level for the debug messages is currently set to KERN_NOTICE |
| 63 | so that people can easily see the messages. Later when the debugging messages | 64 | so that people can easily see the messages. Later when the debugging messages |
| 64 | in the drivers are more widely classified, this may be changed to KERN_DEBUG. */ | 65 | in the drivers are more widely classified, this may be changed to KERN_DEBUG. */ |
| 65 | #define ST_DEB_MSG KERN_NOTICE | ||
| 66 | #define DEB(a) a | 66 | #define DEB(a) a |
| 67 | #define DEBC(a) if (debugging) { a ; } | 67 | #define DEBC(a) if (debugging) { a ; } |
| 68 | #else | 68 | #else |
| @@ -305,6 +305,15 @@ static inline char *tape_name(struct scsi_tape *tape) | |||
| 305 | return tape->disk->disk_name; | 305 | return tape->disk->disk_name; |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | #define st_printk(prefix, t, fmt, a...) \ | ||
| 309 | sdev_printk(prefix, (t)->device, "%s: " fmt, \ | ||
| 310 | tape_name(t), ##a) | ||
| 311 | #ifdef DEBUG | ||
| 312 | #define DEBC_printk(t, fmt, a...) \ | ||
| 313 | if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); } | ||
| 314 | #else | ||
| 315 | #define DEBC_printk(t, fmt, a...) | ||
| 316 | #endif | ||
| 308 | 317 | ||
| 309 | static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) | 318 | static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) |
| 310 | { | 319 | { |
| @@ -358,21 +367,20 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) | |||
| 358 | else | 367 | else |
| 359 | scode = 0; | 368 | scode = 0; |
| 360 | 369 | ||
| 361 | DEB( | 370 | DEB( |
| 362 | if (debugging) { | 371 | if (debugging) { |
| 363 | printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x\n", | 372 | st_printk(ST_DEB_MSG, STp, |
| 364 | name, result, | 373 | "Error: %x, cmd: %x %x %x %x %x %x\n", result, |
| 365 | SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], | 374 | SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], |
| 366 | SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); | 375 | SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); |
| 367 | if (cmdstatp->have_sense) | 376 | if (cmdstatp->have_sense) |
| 368 | __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); | 377 | __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); |
| 369 | } ) /* end DEB */ | 378 | } ) /* end DEB */ |
| 370 | if (!debugging) { /* Abnormal conditions for tape */ | 379 | if (!debugging) { /* Abnormal conditions for tape */ |
| 371 | if (!cmdstatp->have_sense) | 380 | if (!cmdstatp->have_sense) |
| 372 | printk(KERN_WARNING | 381 | st_printk(KERN_WARNING, STp, |
| 373 | "%s: Error %x (driver bt 0x%x, host bt 0x%x).\n", | 382 | "Error %x (driver bt 0x%x, host bt 0x%x).\n", |
| 374 | name, result, driver_byte(result), | 383 | result, driver_byte(result), host_byte(result)); |
| 375 | host_byte(result)); | ||
| 376 | else if (cmdstatp->have_sense && | 384 | else if (cmdstatp->have_sense && |
| 377 | scode != NO_SENSE && | 385 | scode != NO_SENSE && |
| 378 | scode != RECOVERED_ERROR && | 386 | scode != RECOVERED_ERROR && |
| @@ -411,7 +419,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) | |||
| 411 | STp->recover_count++; | 419 | STp->recover_count++; |
| 412 | STp->recover_reg++; | 420 | STp->recover_reg++; |
| 413 | 421 | ||
| 414 | DEB( | 422 | DEB( |
| 415 | if (debugging) { | 423 | if (debugging) { |
| 416 | if (SRpnt->cmd[0] == READ_6) | 424 | if (SRpnt->cmd[0] == READ_6) |
| 417 | stp = "read"; | 425 | stp = "read"; |
| @@ -419,8 +427,9 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) | |||
| 419 | stp = "write"; | 427 | stp = "write"; |
| 420 | else | 428 | else |
| 421 | stp = "ioctl"; | 429 | stp = "ioctl"; |
| 422 | printk(ST_DEB_MSG "%s: Recovered %s error (%d).\n", name, stp, | 430 | st_printk(ST_DEB_MSG, STp, |
| 423 | STp->recover_count); | 431 | "Recovered %s error (%d).\n", |
| 432 | stp, STp->recover_count); | ||
| 424 | } ) /* end DEB */ | 433 | } ) /* end DEB */ |
| 425 | 434 | ||
| 426 | if (cmdstatp->flags == 0) | 435 | if (cmdstatp->flags == 0) |
| @@ -437,8 +446,8 @@ static struct st_request *st_allocate_request(struct scsi_tape *stp) | |||
| 437 | if (streq) | 446 | if (streq) |
| 438 | streq->stp = stp; | 447 | streq->stp = stp; |
| 439 | else { | 448 | else { |
| 440 | DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n", | 449 | st_printk(KERN_ERR, stp, |
| 441 | tape_name(stp));); | 450 | "Can't get SCSI request.\n"); |
| 442 | if (signal_pending(current)) | 451 | if (signal_pending(current)) |
| 443 | stp->buffer->syscall_result = -EINTR; | 452 | stp->buffer->syscall_result = -EINTR; |
| 444 | else | 453 | else |
| @@ -525,8 +534,8 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd | |||
| 525 | 534 | ||
| 526 | /* if async, make sure there's no command outstanding */ | 535 | /* if async, make sure there's no command outstanding */ |
| 527 | if (!do_wait && ((STp->buffer)->last_SRpnt)) { | 536 | if (!do_wait && ((STp->buffer)->last_SRpnt)) { |
| 528 | printk(KERN_ERR "%s: Async command already active.\n", | 537 | st_printk(KERN_ERR, STp, |
| 529 | tape_name(STp)); | 538 | "Async command already active.\n"); |
| 530 | if (signal_pending(current)) | 539 | if (signal_pending(current)) |
| 531 | (STp->buffer)->syscall_result = (-EINTR); | 540 | (STp->buffer)->syscall_result = (-EINTR); |
| 532 | else | 541 | else |
| @@ -597,12 +606,12 @@ static int write_behind_check(struct scsi_tape * STp) | |||
| 597 | if (!STbuffer->writing) | 606 | if (!STbuffer->writing) |
| 598 | return 0; | 607 | return 0; |
| 599 | 608 | ||
| 600 | DEB( | 609 | DEB( |
| 601 | if (STp->write_pending) | 610 | if (STp->write_pending) |
| 602 | STp->nbr_waits++; | 611 | STp->nbr_waits++; |
| 603 | else | 612 | else |
| 604 | STp->nbr_finished++; | 613 | STp->nbr_finished++; |
| 605 | ) /* end DEB */ | 614 | ) /* end DEB */ |
| 606 | 615 | ||
| 607 | wait_for_completion(&(STp->wait)); | 616 | wait_for_completion(&(STp->wait)); |
| 608 | SRpnt = STbuffer->last_SRpnt; | 617 | SRpnt = STbuffer->last_SRpnt; |
| @@ -639,8 +648,9 @@ static int write_behind_check(struct scsi_tape * STp) | |||
| 639 | STbuffer->writing = 0; | 648 | STbuffer->writing = 0; |
| 640 | 649 | ||
| 641 | DEB(if (debugging && retval) | 650 | DEB(if (debugging && retval) |
| 642 | printk(ST_DEB_MSG "%s: Async write error %x, return value %d.\n", | 651 | st_printk(ST_DEB_MSG, STp, |
| 643 | tape_name(STp), STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */ | 652 | "Async write error %x, return value %d.\n", |
| 653 | STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */ | ||
| 644 | 654 | ||
| 645 | return retval; | 655 | return retval; |
| 646 | } | 656 | } |
| @@ -662,8 +672,8 @@ static int cross_eof(struct scsi_tape * STp, int forward) | |||
| 662 | cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */ | 672 | cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */ |
| 663 | cmd[5] = 0; | 673 | cmd[5] = 0; |
| 664 | 674 | ||
| 665 | DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", | 675 | DEBC_printk(STp, "Stepping over filemark %s.\n", |
| 666 | tape_name(STp), forward ? "forward" : "backward")); | 676 | forward ? "forward" : "backward"); |
| 667 | 677 | ||
| 668 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, | 678 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, |
| 669 | STp->device->request_queue->rq_timeout, | 679 | STp->device->request_queue->rq_timeout, |
| @@ -675,8 +685,9 @@ static int cross_eof(struct scsi_tape * STp, int forward) | |||
| 675 | SRpnt = NULL; | 685 | SRpnt = NULL; |
| 676 | 686 | ||
| 677 | if ((STp->buffer)->cmdstat.midlevel_result != 0) | 687 | if ((STp->buffer)->cmdstat.midlevel_result != 0) |
| 678 | printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", | 688 | st_printk(KERN_ERR, STp, |
| 679 | tape_name(STp), forward ? "forward" : "backward"); | 689 | "Stepping over filemark %s failed.\n", |
| 690 | forward ? "forward" : "backward"); | ||
| 680 | 691 | ||
| 681 | return (STp->buffer)->syscall_result; | 692 | return (STp->buffer)->syscall_result; |
| 682 | } | 693 | } |
| @@ -699,8 +710,7 @@ static int st_flush_write_buffer(struct scsi_tape * STp) | |||
| 699 | if (STp->dirty == 1) { | 710 | if (STp->dirty == 1) { |
| 700 | 711 | ||
| 701 | transfer = STp->buffer->buffer_bytes; | 712 | transfer = STp->buffer->buffer_bytes; |
| 702 | DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n", | 713 | DEBC_printk(STp, "Flushing %d bytes.\n", transfer); |
| 703 | tape_name(STp), transfer)); | ||
| 704 | 714 | ||
| 705 | memset(cmd, 0, MAX_COMMAND_SIZE); | 715 | memset(cmd, 0, MAX_COMMAND_SIZE); |
| 706 | cmd[0] = WRITE_6; | 716 | cmd[0] = WRITE_6; |
| @@ -732,8 +742,7 @@ static int st_flush_write_buffer(struct scsi_tape * STp) | |||
| 732 | STps->drv_block += blks; | 742 | STps->drv_block += blks; |
| 733 | result = (-ENOSPC); | 743 | result = (-ENOSPC); |
| 734 | } else { | 744 | } else { |
| 735 | printk(KERN_ERR "%s: Error on flush.\n", | 745 | st_printk(KERN_ERR, STp, "Error on flush.\n"); |
| 736 | tape_name(STp)); | ||
| 737 | STps->drv_block = (-1); | 746 | STps->drv_block = (-1); |
| 738 | result = (-EIO); | 747 | result = (-EIO); |
| 739 | } | 748 | } |
| @@ -811,7 +820,6 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) | |||
| 811 | { | 820 | { |
| 812 | int set_it = 0; | 821 | int set_it = 0; |
| 813 | unsigned long arg; | 822 | unsigned long arg; |
| 814 | char *name = tape_name(STp); | ||
| 815 | 823 | ||
| 816 | if (!STp->density_changed && | 824 | if (!STp->density_changed && |
| 817 | STm->default_density >= 0 && | 825 | STm->default_density >= 0 && |
| @@ -830,9 +838,10 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) | |||
| 830 | arg |= STp->block_size; | 838 | arg |= STp->block_size; |
| 831 | if (set_it && | 839 | if (set_it && |
| 832 | st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) { | 840 | st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) { |
| 833 | printk(KERN_WARNING | 841 | st_printk(KERN_WARNING, STp, |
| 834 | "%s: Can't set default block size to %d bytes and density %x.\n", | 842 | "Can't set default block size to %d bytes " |
| 835 | name, STm->default_blksize, STm->default_density); | 843 | "and density %x.\n", |
| 844 | STm->default_blksize, STm->default_density); | ||
| 836 | if (modes_defined) | 845 | if (modes_defined) |
| 837 | return (-EINVAL); | 846 | return (-EINVAL); |
| 838 | } | 847 | } |
| @@ -844,12 +853,9 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) | |||
| 844 | static int do_door_lock(struct scsi_tape * STp, int do_lock) | 853 | static int do_door_lock(struct scsi_tape * STp, int do_lock) |
| 845 | { | 854 | { |
| 846 | int retval, cmd; | 855 | int retval, cmd; |
| 847 | DEB(char *name = tape_name(STp);) | ||
| 848 | |||
| 849 | 856 | ||
| 850 | cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK; | 857 | cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK; |
| 851 | DEBC(printk(ST_DEB_MSG "%s: %socking drive door.\n", name, | 858 | DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl"); |
| 852 | do_lock ? "L" : "Unl")); | ||
| 853 | retval = scsi_ioctl(STp->device, cmd, NULL); | 859 | retval = scsi_ioctl(STp->device, cmd, NULL); |
| 854 | if (!retval) { | 860 | if (!retval) { |
| 855 | STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; | 861 | STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; |
| @@ -976,15 +982,14 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
| 976 | struct st_request *SRpnt = NULL; | 982 | struct st_request *SRpnt = NULL; |
| 977 | struct st_modedef *STm; | 983 | struct st_modedef *STm; |
| 978 | struct st_partstat *STps; | 984 | struct st_partstat *STps; |
| 979 | char *name = tape_name(STp); | ||
| 980 | struct inode *inode = file_inode(filp); | 985 | struct inode *inode = file_inode(filp); |
| 981 | int mode = TAPE_MODE(inode); | 986 | int mode = TAPE_MODE(inode); |
| 982 | 987 | ||
| 983 | STp->ready = ST_READY; | 988 | STp->ready = ST_READY; |
| 984 | 989 | ||
| 985 | if (mode != STp->current_mode) { | 990 | if (mode != STp->current_mode) { |
| 986 | DEBC(printk(ST_DEB_MSG "%s: Mode change from %d to %d.\n", | 991 | DEBC_printk(STp, "Mode change from %d to %d.\n", |
| 987 | name, STp->current_mode, mode)); | 992 | STp->current_mode, mode); |
| 988 | new_session = 1; | 993 | new_session = 1; |
| 989 | STp->current_mode = mode; | 994 | STp->current_mode = mode; |
| 990 | } | 995 | } |
| @@ -1055,13 +1060,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
| 1055 | STp->min_block = ((STp->buffer)->b_data[4] << 8) | | 1060 | STp->min_block = ((STp->buffer)->b_data[4] << 8) | |
| 1056 | (STp->buffer)->b_data[5]; | 1061 | (STp->buffer)->b_data[5]; |
| 1057 | if ( DEB( debugging || ) !STp->inited) | 1062 | if ( DEB( debugging || ) !STp->inited) |
| 1058 | printk(KERN_INFO | 1063 | st_printk(KERN_INFO, STp, |
| 1059 | "%s: Block limits %d - %d bytes.\n", name, | 1064 | "Block limits %d - %d bytes.\n", |
| 1060 | STp->min_block, STp->max_block); | 1065 | STp->min_block, STp->max_block); |
| 1061 | } else { | 1066 | } else { |
| 1062 | STp->min_block = STp->max_block = (-1); | 1067 | STp->min_block = STp->max_block = (-1); |
| 1063 | DEBC(printk(ST_DEB_MSG "%s: Can't read block limits.\n", | 1068 | DEBC_printk(STp, "Can't read block limits.\n"); |
| 1064 | name)); | ||
| 1065 | } | 1069 | } |
| 1066 | } | 1070 | } |
| 1067 | 1071 | ||
| @@ -1078,56 +1082,58 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
| 1078 | } | 1082 | } |
| 1079 | 1083 | ||
| 1080 | if ((STp->buffer)->syscall_result != 0) { | 1084 | if ((STp->buffer)->syscall_result != 0) { |
| 1081 | DEBC(printk(ST_DEB_MSG "%s: No Mode Sense.\n", name)); | 1085 | DEBC_printk(STp, "No Mode Sense.\n"); |
| 1082 | STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */ | 1086 | STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */ |
| 1083 | (STp->buffer)->syscall_result = 0; /* Prevent error propagation */ | 1087 | (STp->buffer)->syscall_result = 0; /* Prevent error propagation */ |
| 1084 | STp->drv_write_prot = 0; | 1088 | STp->drv_write_prot = 0; |
| 1085 | } else { | 1089 | } else { |
| 1086 | DEBC(printk(ST_DEB_MSG | 1090 | DEBC_printk(STp,"Mode sense. Length %d, " |
| 1087 | "%s: Mode sense. Length %d, medium %x, WBS %x, BLL %d\n", | 1091 | "medium %x, WBS %x, BLL %d\n", |
| 1088 | name, | 1092 | (STp->buffer)->b_data[0], |
| 1089 | (STp->buffer)->b_data[0], (STp->buffer)->b_data[1], | 1093 | (STp->buffer)->b_data[1], |
| 1090 | (STp->buffer)->b_data[2], (STp->buffer)->b_data[3])); | 1094 | (STp->buffer)->b_data[2], |
| 1095 | (STp->buffer)->b_data[3]); | ||
| 1091 | 1096 | ||
| 1092 | if ((STp->buffer)->b_data[3] >= 8) { | 1097 | if ((STp->buffer)->b_data[3] >= 8) { |
| 1093 | STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7; | 1098 | STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7; |
| 1094 | STp->density = (STp->buffer)->b_data[4]; | 1099 | STp->density = (STp->buffer)->b_data[4]; |
| 1095 | STp->block_size = (STp->buffer)->b_data[9] * 65536 + | 1100 | STp->block_size = (STp->buffer)->b_data[9] * 65536 + |
| 1096 | (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]; | 1101 | (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]; |
| 1097 | DEBC(printk(ST_DEB_MSG | 1102 | DEBC_printk(STp, "Density %x, tape length: %x, " |
| 1098 | "%s: Density %x, tape length: %x, drv buffer: %d\n", | 1103 | "drv buffer: %d\n", |
| 1099 | name, STp->density, (STp->buffer)->b_data[5] * 65536 + | 1104 | STp->density, |
| 1100 | (STp->buffer)->b_data[6] * 256 + (STp->buffer)->b_data[7], | 1105 | (STp->buffer)->b_data[5] * 65536 + |
| 1101 | STp->drv_buffer)); | 1106 | (STp->buffer)->b_data[6] * 256 + |
| 1107 | (STp->buffer)->b_data[7], | ||
| 1108 | STp->drv_buffer); | ||
| 1102 | } | 1109 | } |
| 1103 | STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; | 1110 | STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; |
| 1104 | if (!STp->drv_buffer && STp->immediate_filemark) { | 1111 | if (!STp->drv_buffer && STp->immediate_filemark) { |
| 1105 | printk(KERN_WARNING | 1112 | st_printk(KERN_WARNING, STp, |
| 1106 | "%s: non-buffered tape: disabling writing immediate filemarks\n", | 1113 | "non-buffered tape: disabling " |
| 1107 | name); | 1114 | "writing immediate filemarks\n"); |
| 1108 | STp->immediate_filemark = 0; | 1115 | STp->immediate_filemark = 0; |
| 1109 | } | 1116 | } |
| 1110 | } | 1117 | } |
| 1111 | st_release_request(SRpnt); | 1118 | st_release_request(SRpnt); |
| 1112 | SRpnt = NULL; | 1119 | SRpnt = NULL; |
| 1113 | STp->inited = 1; | 1120 | STp->inited = 1; |
| 1114 | 1121 | ||
| 1115 | if (STp->block_size > 0) | 1122 | if (STp->block_size > 0) |
| 1116 | (STp->buffer)->buffer_blocks = | 1123 | (STp->buffer)->buffer_blocks = |
| 1117 | (STp->buffer)->buffer_size / STp->block_size; | 1124 | (STp->buffer)->buffer_size / STp->block_size; |
| 1118 | else | 1125 | else |
| 1119 | (STp->buffer)->buffer_blocks = 1; | 1126 | (STp->buffer)->buffer_blocks = 1; |
| 1120 | (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; | 1127 | (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; |
| 1121 | 1128 | ||
| 1122 | DEBC(printk(ST_DEB_MSG | 1129 | DEBC_printk(STp, "Block size: %d, buffer size: %d (%d blocks).\n", |
| 1123 | "%s: Block size: %d, buffer size: %d (%d blocks).\n", name, | 1130 | STp->block_size, (STp->buffer)->buffer_size, |
| 1124 | STp->block_size, (STp->buffer)->buffer_size, | 1131 | (STp->buffer)->buffer_blocks); |
| 1125 | (STp->buffer)->buffer_blocks)); | ||
| 1126 | 1132 | ||
| 1127 | if (STp->drv_write_prot) { | 1133 | if (STp->drv_write_prot) { |
| 1128 | STp->write_prot = 1; | 1134 | STp->write_prot = 1; |
| 1129 | 1135 | ||
| 1130 | DEBC(printk(ST_DEB_MSG "%s: Write protected\n", name)); | 1136 | DEBC_printk(STp, "Write protected\n"); |
| 1131 | 1137 | ||
| 1132 | if (do_wait && | 1138 | if (do_wait && |
| 1133 | ((st_flags & O_ACCMODE) == O_WRONLY || | 1139 | ((st_flags & O_ACCMODE) == O_WRONLY || |
| @@ -1141,8 +1147,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
| 1141 | /* This code is reached when the device is opened for the first time | 1147 | /* This code is reached when the device is opened for the first time |
| 1142 | after the driver has been initialized with tape in the drive and the | 1148 | after the driver has been initialized with tape in the drive and the |
| 1143 | partition support has been enabled. */ | 1149 | partition support has been enabled. */ |
| 1144 | DEBC(printk(ST_DEB_MSG | 1150 | DEBC_printk(STp, "Updating partition number in status.\n"); |
| 1145 | "%s: Updating partition number in status.\n", name)); | ||
| 1146 | if ((STp->partition = find_partition(STp)) < 0) { | 1151 | if ((STp->partition = find_partition(STp)) < 0) { |
| 1147 | retval = STp->partition; | 1152 | retval = STp->partition; |
| 1148 | goto err_out; | 1153 | goto err_out; |
| @@ -1160,9 +1165,10 @@ static int check_tape(struct scsi_tape *STp, struct file *filp) | |||
| 1160 | 1165 | ||
| 1161 | if (STp->default_drvbuffer != 0xff) { | 1166 | if (STp->default_drvbuffer != 0xff) { |
| 1162 | if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer)) | 1167 | if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer)) |
| 1163 | printk(KERN_WARNING | 1168 | st_printk(KERN_WARNING, STp, |
| 1164 | "%s: Can't set default drive buffering to %d.\n", | 1169 | "Can't set default drive " |
| 1165 | name, STp->default_drvbuffer); | 1170 | "buffering to %d.\n", |
| 1171 | STp->default_drvbuffer); | ||
| 1166 | } | 1172 | } |
| 1167 | } | 1173 | } |
| 1168 | 1174 | ||
| @@ -1182,7 +1188,6 @@ static int st_open(struct inode *inode, struct file *filp) | |||
| 1182 | struct scsi_tape *STp; | 1188 | struct scsi_tape *STp; |
| 1183 | struct st_partstat *STps; | 1189 | struct st_partstat *STps; |
| 1184 | int dev = TAPE_NR(inode); | 1190 | int dev = TAPE_NR(inode); |
| 1185 | char *name; | ||
| 1186 | 1191 | ||
| 1187 | /* | 1192 | /* |
| 1188 | * We really want to do nonseekable_open(inode, filp); here, but some | 1193 | * We really want to do nonseekable_open(inode, filp); here, but some |
| @@ -1196,13 +1201,12 @@ static int st_open(struct inode *inode, struct file *filp) | |||
| 1196 | } | 1201 | } |
| 1197 | 1202 | ||
| 1198 | filp->private_data = STp; | 1203 | filp->private_data = STp; |
| 1199 | name = tape_name(STp); | ||
| 1200 | 1204 | ||
| 1201 | spin_lock(&st_use_lock); | 1205 | spin_lock(&st_use_lock); |
| 1202 | if (STp->in_use) { | 1206 | if (STp->in_use) { |
| 1203 | spin_unlock(&st_use_lock); | 1207 | spin_unlock(&st_use_lock); |
| 1204 | scsi_tape_put(STp); | 1208 | scsi_tape_put(STp); |
| 1205 | DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) | 1209 | DEBC_printk(STp, "Device already in use.\n"); |
| 1206 | return (-EBUSY); | 1210 | return (-EBUSY); |
| 1207 | } | 1211 | } |
| 1208 | 1212 | ||
| @@ -1222,8 +1226,8 @@ static int st_open(struct inode *inode, struct file *filp) | |||
| 1222 | 1226 | ||
| 1223 | /* See that we have at least a one page buffer available */ | 1227 | /* See that we have at least a one page buffer available */ |
| 1224 | if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) { | 1228 | if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) { |
| 1225 | printk(KERN_WARNING "%s: Can't allocate one page tape buffer.\n", | 1229 | st_printk(KERN_WARNING, STp, |
| 1226 | name); | 1230 | "Can't allocate one page tape buffer.\n"); |
| 1227 | retval = (-EOVERFLOW); | 1231 | retval = (-EOVERFLOW); |
| 1228 | goto err_out; | 1232 | goto err_out; |
| 1229 | } | 1233 | } |
| @@ -1279,7 +1283,6 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
| 1279 | struct scsi_tape *STp = filp->private_data; | 1283 | struct scsi_tape *STp = filp->private_data; |
| 1280 | struct st_modedef *STm = &(STp->modes[STp->current_mode]); | 1284 | struct st_modedef *STm = &(STp->modes[STp->current_mode]); |
| 1281 | struct st_partstat *STps = &(STp->ps[STp->partition]); | 1285 | struct st_partstat *STps = &(STp->ps[STp->partition]); |
| 1282 | char *name = tape_name(STp); | ||
| 1283 | 1286 | ||
| 1284 | if (file_count(filp) > 1) | 1287 | if (file_count(filp) > 1) |
| 1285 | return 0; | 1288 | return 0; |
| @@ -1292,24 +1295,25 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
| 1292 | 1295 | ||
| 1293 | if (STp->can_partitions && | 1296 | if (STp->can_partitions && |
| 1294 | (result2 = switch_partition(STp)) < 0) { | 1297 | (result2 = switch_partition(STp)) < 0) { |
| 1295 | DEBC(printk(ST_DEB_MSG | 1298 | DEBC_printk(STp, "switch_partition at close failed.\n"); |
| 1296 | "%s: switch_partition at close failed.\n", name)); | ||
| 1297 | if (result == 0) | 1299 | if (result == 0) |
| 1298 | result = result2; | 1300 | result = result2; |
| 1299 | goto out; | 1301 | goto out; |
| 1300 | } | 1302 | } |
| 1301 | 1303 | ||
| 1302 | DEBC( if (STp->nbr_requests) | 1304 | DEBC( if (STp->nbr_requests) |
| 1303 | printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n", | 1305 | st_printk(KERN_DEBUG, STp, |
| 1304 | name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages)); | 1306 | "Number of r/w requests %d, dio used in %d, " |
| 1307 | "pages %d.\n", STp->nbr_requests, STp->nbr_dio, | ||
| 1308 | STp->nbr_pages)); | ||
| 1305 | 1309 | ||
| 1306 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { | 1310 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { |
| 1307 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; | 1311 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; |
| 1308 | 1312 | ||
| 1309 | DEBC(printk(ST_DEB_MSG "%s: Async write waits %d, finished %d.\n", | 1313 | #if DEBUG |
| 1310 | name, STp->nbr_waits, STp->nbr_finished); | 1314 | DEBC_printk(STp, "Async write waits %d, finished %d.\n", |
| 1311 | ) | 1315 | STp->nbr_waits, STp->nbr_finished); |
| 1312 | 1316 | #endif | |
| 1313 | memset(cmd, 0, MAX_COMMAND_SIZE); | 1317 | memset(cmd, 0, MAX_COMMAND_SIZE); |
| 1314 | cmd[0] = WRITE_FILEMARKS; | 1318 | cmd[0] = WRITE_FILEMARKS; |
| 1315 | if (STp->immediate_filemark) | 1319 | if (STp->immediate_filemark) |
| @@ -1343,13 +1347,13 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
| 1343 | else { /* Write error */ | 1347 | else { /* Write error */ |
| 1344 | st_release_request(SRpnt); | 1348 | st_release_request(SRpnt); |
| 1345 | SRpnt = NULL; | 1349 | SRpnt = NULL; |
| 1346 | printk(KERN_ERR "%s: Error on write filemark.\n", name); | 1350 | st_printk(KERN_ERR, STp, |
| 1351 | "Error on write filemark.\n"); | ||
| 1347 | if (result == 0) | 1352 | if (result == 0) |
| 1348 | result = (-EIO); | 1353 | result = (-EIO); |
| 1349 | } | 1354 | } |
| 1350 | 1355 | ||
| 1351 | DEBC(printk(ST_DEB_MSG "%s: Buffer flushed, %d EOF(s) written\n", | 1356 | DEBC_printk(STp, "Buffer flushed, %d EOF(s) written\n", cmd[4]); |
| 1352 | name, cmd[4])); | ||
| 1353 | } else if (!STp->rew_at_close) { | 1357 | } else if (!STp->rew_at_close) { |
| 1354 | STps = &(STp->ps[STp->partition]); | 1358 | STps = &(STp->ps[STp->partition]); |
| 1355 | if (!STm->sysv || STps->rw != ST_READING) { | 1359 | if (!STm->sysv || STps->rw != ST_READING) { |
| @@ -1447,9 +1451,10 @@ static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count) | |||
| 1447 | if (count == 0) | 1451 | if (count == 0) |
| 1448 | goto out; | 1452 | goto out; |
| 1449 | 1453 | ||
| 1450 | DEB( | 1454 | DEB( |
| 1451 | if (!STp->in_use) { | 1455 | if (!STp->in_use) { |
| 1452 | printk(ST_DEB_MSG "%s: Incorrect device.\n", tape_name(STp)); | 1456 | st_printk(ST_DEB_MSG, STp, |
| 1457 | "Incorrect device.\n"); | ||
| 1453 | retval = (-EIO); | 1458 | retval = (-EIO); |
| 1454 | goto out; | 1459 | goto out; |
| 1455 | } ) /* end DEB */ | 1460 | } ) /* end DEB */ |
| @@ -1519,8 +1524,9 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf, | |||
| 1519 | 1524 | ||
| 1520 | if (bufsize > STbp->buffer_size && | 1525 | if (bufsize > STbp->buffer_size && |
| 1521 | !enlarge_buffer(STbp, bufsize, STp->restr_dma)) { | 1526 | !enlarge_buffer(STbp, bufsize, STp->restr_dma)) { |
| 1522 | printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n", | 1527 | st_printk(KERN_WARNING, STp, |
| 1523 | tape_name(STp), bufsize); | 1528 | "Can't allocate %d byte tape buffer.\n", |
| 1529 | bufsize); | ||
| 1524 | retval = (-EOVERFLOW); | 1530 | retval = (-EOVERFLOW); |
| 1525 | goto out; | 1531 | goto out; |
| 1526 | } | 1532 | } |
| @@ -1563,7 +1569,6 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1563 | struct st_modedef *STm; | 1569 | struct st_modedef *STm; |
| 1564 | struct st_partstat *STps; | 1570 | struct st_partstat *STps; |
| 1565 | struct st_buffer *STbp; | 1571 | struct st_buffer *STbp; |
| 1566 | char *name = tape_name(STp); | ||
| 1567 | 1572 | ||
| 1568 | if (mutex_lock_interruptible(&STp->lock)) | 1573 | if (mutex_lock_interruptible(&STp->lock)) |
| 1569 | return -ERESTARTSYS; | 1574 | return -ERESTARTSYS; |
| @@ -1574,8 +1579,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1574 | 1579 | ||
| 1575 | /* Write must be integral number of blocks */ | 1580 | /* Write must be integral number of blocks */ |
| 1576 | if (STp->block_size != 0 && (count % STp->block_size) != 0) { | 1581 | if (STp->block_size != 0 && (count % STp->block_size) != 0) { |
| 1577 | printk(KERN_WARNING "%s: Write not multiple of tape block size.\n", | 1582 | st_printk(KERN_WARNING, STp, |
| 1578 | name); | 1583 | "Write not multiple of tape block size.\n"); |
| 1579 | retval = (-EINVAL); | 1584 | retval = (-EINVAL); |
| 1580 | goto out; | 1585 | goto out; |
| 1581 | } | 1586 | } |
| @@ -1601,8 +1606,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1601 | if (STm->default_compression != ST_DONT_TOUCH && | 1606 | if (STm->default_compression != ST_DONT_TOUCH && |
| 1602 | !(STp->compression_changed)) { | 1607 | !(STp->compression_changed)) { |
| 1603 | if (st_compression(STp, (STm->default_compression == ST_YES))) { | 1608 | if (st_compression(STp, (STm->default_compression == ST_YES))) { |
| 1604 | printk(KERN_WARNING "%s: Can't set default compression.\n", | 1609 | st_printk(KERN_WARNING, STp, |
| 1605 | name); | 1610 | "Can't set default compression.\n"); |
| 1606 | if (modes_defined) { | 1611 | if (modes_defined) { |
| 1607 | retval = (-EINVAL); | 1612 | retval = (-EINVAL); |
| 1608 | goto out; | 1613 | goto out; |
| @@ -1723,7 +1728,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1723 | if (STbp->syscall_result != 0) { | 1728 | if (STbp->syscall_result != 0) { |
| 1724 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; | 1729 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; |
| 1725 | 1730 | ||
| 1726 | DEBC(printk(ST_DEB_MSG "%s: Error on write:\n", name)); | 1731 | DEBC_printk(STp, "Error on write:\n"); |
| 1727 | if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { | 1732 | if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { |
| 1728 | scode = cmdstatp->sense_hdr.sense_key; | 1733 | scode = cmdstatp->sense_hdr.sense_key; |
| 1729 | if (cmdstatp->remainder_valid) | 1734 | if (cmdstatp->remainder_valid) |
| @@ -1750,9 +1755,9 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1750 | if (STp->block_size == 0 || | 1755 | if (STp->block_size == 0 || |
| 1751 | undone > 0 || count == 0) | 1756 | undone > 0 || count == 0) |
| 1752 | retval = (-ENOSPC); /* EOM within current request */ | 1757 | retval = (-ENOSPC); /* EOM within current request */ |
| 1753 | DEBC(printk(ST_DEB_MSG | 1758 | DEBC_printk(STp, "EOM with %d " |
| 1754 | "%s: EOM with %d bytes unwritten.\n", | 1759 | "bytes unwritten.\n", |
| 1755 | name, (int)count)); | 1760 | (int)count); |
| 1756 | } else { | 1761 | } else { |
| 1757 | /* EOT within data buffered earlier (possible only | 1762 | /* EOT within data buffered earlier (possible only |
| 1758 | in fixed block mode without direct i/o) */ | 1763 | in fixed block mode without direct i/o) */ |
| @@ -1765,9 +1770,10 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1765 | STp->block_size; | 1770 | STp->block_size; |
| 1766 | } | 1771 | } |
| 1767 | STps->eof = ST_EOM_OK; | 1772 | STps->eof = ST_EOM_OK; |
| 1768 | DEBC(printk(ST_DEB_MSG | 1773 | DEBC_printk(STp, "Retry " |
| 1769 | "%s: Retry write of %d bytes at EOM.\n", | 1774 | "write of %d " |
| 1770 | name, STp->buffer->buffer_bytes)); | 1775 | "bytes at EOM.\n", |
| 1776 | STp->buffer->buffer_bytes); | ||
| 1771 | goto retry_write; | 1777 | goto retry_write; |
| 1772 | } | 1778 | } |
| 1773 | else { | 1779 | else { |
| @@ -1778,9 +1784,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
| 1778 | STps->eof = ST_EOM_ERROR; | 1784 | STps->eof = ST_EOM_ERROR; |
| 1779 | STps->drv_block = (-1); /* Too cautious? */ | 1785 | STps->drv_block = (-1); /* Too cautious? */ |
| 1780 | retval = (-EIO); /* EOM for old data */ | 1786 | retval = (-EIO); /* EOM for old data */ |
| 1781 | DEBC(printk(ST_DEB_MSG | 1787 | DEBC_printk(STp, "EOM with " |
| 1782 | "%s: EOM with lost data.\n", | 1788 | "lost data.\n"); |
| 1783 | name)); | ||
| 1784 | } | 1789 | } |
| 1785 | } | 1790 | } |
| 1786 | } else { | 1791 | } else { |
| @@ -1839,7 +1844,6 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1839 | struct st_partstat *STps; | 1844 | struct st_partstat *STps; |
| 1840 | struct st_buffer *STbp; | 1845 | struct st_buffer *STbp; |
| 1841 | int retval = 0; | 1846 | int retval = 0; |
| 1842 | char *name = tape_name(STp); | ||
| 1843 | 1847 | ||
| 1844 | if (count == 0) | 1848 | if (count == 0) |
| 1845 | return 0; | 1849 | return 0; |
| @@ -1891,12 +1895,12 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1891 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; | 1895 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; |
| 1892 | 1896 | ||
| 1893 | retval = 1; | 1897 | retval = 1; |
| 1894 | DEBC(printk(ST_DEB_MSG "%s: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n", | 1898 | DEBC_printk(STp, |
| 1895 | name, | 1899 | "Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n", |
| 1896 | SRpnt->sense[0], SRpnt->sense[1], | 1900 | SRpnt->sense[0], SRpnt->sense[1], |
| 1897 | SRpnt->sense[2], SRpnt->sense[3], | 1901 | SRpnt->sense[2], SRpnt->sense[3], |
| 1898 | SRpnt->sense[4], SRpnt->sense[5], | 1902 | SRpnt->sense[4], SRpnt->sense[5], |
| 1899 | SRpnt->sense[6], SRpnt->sense[7])); | 1903 | SRpnt->sense[6], SRpnt->sense[7]); |
| 1900 | if (cmdstatp->have_sense) { | 1904 | if (cmdstatp->have_sense) { |
| 1901 | 1905 | ||
| 1902 | if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) | 1906 | if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) |
| @@ -1913,23 +1917,27 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1913 | transfer = bytes; | 1917 | transfer = bytes; |
| 1914 | 1918 | ||
| 1915 | if (cmdstatp->flags & SENSE_ILI) { /* ILI */ | 1919 | if (cmdstatp->flags & SENSE_ILI) { /* ILI */ |
| 1916 | if (STp->block_size == 0) { | 1920 | if (STp->block_size == 0 && |
| 1917 | if (transfer <= 0) { | 1921 | transfer < 0) { |
| 1918 | if (transfer < 0) | 1922 | st_printk(KERN_NOTICE, STp, |
| 1919 | printk(KERN_NOTICE | 1923 | "Failed to read %d " |
| 1920 | "%s: Failed to read %d byte block with %d byte transfer.\n", | 1924 | "byte block with %d " |
| 1921 | name, bytes - transfer, bytes); | 1925 | "byte transfer.\n", |
| 1922 | if (STps->drv_block >= 0) | 1926 | bytes - transfer, |
| 1923 | STps->drv_block += 1; | 1927 | bytes); |
| 1924 | STbp->buffer_bytes = 0; | 1928 | if (STps->drv_block >= 0) |
| 1925 | return (-ENOMEM); | 1929 | STps->drv_block += 1; |
| 1926 | } | 1930 | STbp->buffer_bytes = 0; |
| 1931 | return (-ENOMEM); | ||
| 1932 | } else if (STp->block_size == 0) { | ||
| 1927 | STbp->buffer_bytes = bytes - transfer; | 1933 | STbp->buffer_bytes = bytes - transfer; |
| 1928 | } else { | 1934 | } else { |
| 1929 | st_release_request(SRpnt); | 1935 | st_release_request(SRpnt); |
| 1930 | SRpnt = *aSRpnt = NULL; | 1936 | SRpnt = *aSRpnt = NULL; |
| 1931 | if (transfer == blks) { /* We did not get anything, error */ | 1937 | if (transfer == blks) { /* We did not get anything, error */ |
| 1932 | printk(KERN_NOTICE "%s: Incorrect block size.\n", name); | 1938 | st_printk(KERN_NOTICE, STp, |
| 1939 | "Incorrect " | ||
| 1940 | "block size.\n"); | ||
| 1933 | if (STps->drv_block >= 0) | 1941 | if (STps->drv_block >= 0) |
| 1934 | STps->drv_block += blks - transfer + 1; | 1942 | STps->drv_block += blks - transfer + 1; |
| 1935 | st_int_ioctl(STp, MTBSR, 1); | 1943 | st_int_ioctl(STp, MTBSR, 1); |
| @@ -1938,9 +1946,11 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1938 | /* We have some data, deliver it */ | 1946 | /* We have some data, deliver it */ |
| 1939 | STbp->buffer_bytes = (blks - transfer) * | 1947 | STbp->buffer_bytes = (blks - transfer) * |
| 1940 | STp->block_size; | 1948 | STp->block_size; |
| 1941 | DEBC(printk(ST_DEB_MSG | 1949 | DEBC_printk(STp, "ILI but " |
| 1942 | "%s: ILI but enough data received %ld %d.\n", | 1950 | "enough data " |
| 1943 | name, count, STbp->buffer_bytes)); | 1951 | "received %ld " |
| 1952 | "%d.\n", count, | ||
| 1953 | STbp->buffer_bytes); | ||
| 1944 | if (STps->drv_block >= 0) | 1954 | if (STps->drv_block >= 0) |
| 1945 | STps->drv_block += 1; | 1955 | STps->drv_block += 1; |
| 1946 | if (st_int_ioctl(STp, MTBSR, 1)) | 1956 | if (st_int_ioctl(STp, MTBSR, 1)) |
| @@ -1956,9 +1966,9 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1956 | else | 1966 | else |
| 1957 | STbp->buffer_bytes = | 1967 | STbp->buffer_bytes = |
| 1958 | bytes - transfer * STp->block_size; | 1968 | bytes - transfer * STp->block_size; |
| 1959 | DEBC(printk(ST_DEB_MSG | 1969 | DEBC_printk(STp, "EOF detected (%d " |
| 1960 | "%s: EOF detected (%d bytes read).\n", | 1970 | "bytes read).\n", |
| 1961 | name, STbp->buffer_bytes)); | 1971 | STbp->buffer_bytes); |
| 1962 | } else if (cmdstatp->flags & SENSE_EOM) { | 1972 | } else if (cmdstatp->flags & SENSE_EOM) { |
| 1963 | if (STps->eof == ST_FM) | 1973 | if (STps->eof == ST_FM) |
| 1964 | STps->eof = ST_EOD_1; | 1974 | STps->eof = ST_EOD_1; |
| @@ -1970,20 +1980,20 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1970 | STbp->buffer_bytes = | 1980 | STbp->buffer_bytes = |
| 1971 | bytes - transfer * STp->block_size; | 1981 | bytes - transfer * STp->block_size; |
| 1972 | 1982 | ||
| 1973 | DEBC(printk(ST_DEB_MSG "%s: EOM detected (%d bytes read).\n", | 1983 | DEBC_printk(STp, "EOM detected (%d " |
| 1974 | name, STbp->buffer_bytes)); | 1984 | "bytes read).\n", |
| 1985 | STbp->buffer_bytes); | ||
| 1975 | } | 1986 | } |
| 1976 | } | 1987 | } |
| 1977 | /* end of EOF, EOM, ILI test */ | 1988 | /* end of EOF, EOM, ILI test */ |
| 1978 | else { /* nonzero sense key */ | 1989 | else { /* nonzero sense key */ |
| 1979 | DEBC(printk(ST_DEB_MSG | 1990 | DEBC_printk(STp, "Tape error while reading.\n"); |
| 1980 | "%s: Tape error while reading.\n", name)); | ||
| 1981 | STps->drv_block = (-1); | 1991 | STps->drv_block = (-1); |
| 1982 | if (STps->eof == ST_FM && | 1992 | if (STps->eof == ST_FM && |
| 1983 | cmdstatp->sense_hdr.sense_key == BLANK_CHECK) { | 1993 | cmdstatp->sense_hdr.sense_key == BLANK_CHECK) { |
| 1984 | DEBC(printk(ST_DEB_MSG | 1994 | DEBC_printk(STp, "Zero returned for " |
| 1985 | "%s: Zero returned for first BLANK CHECK after EOF.\n", | 1995 | "first BLANK CHECK " |
| 1986 | name)); | 1996 | "after EOF.\n"); |
| 1987 | STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */ | 1997 | STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */ |
| 1988 | } else /* Some other extended sense code */ | 1998 | } else /* Some other extended sense code */ |
| 1989 | retval = (-EIO); | 1999 | retval = (-EIO); |
| @@ -1992,13 +2002,13 @@ static long read_tape(struct scsi_tape *STp, long count, | |||
| 1992 | if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */ | 2002 | if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */ |
| 1993 | STbp->buffer_bytes = 0; | 2003 | STbp->buffer_bytes = 0; |
| 1994 | } | 2004 | } |
| 1995 | /* End of extended sense test */ | 2005 | /* End of extended sense test */ |
| 1996 | else { /* Non-extended sense */ | 2006 | else { /* Non-extended sense */ |
| 1997 | retval = STbp->syscall_result; | 2007 | retval = STbp->syscall_result; |
| 1998 | } | 2008 | } |
| 1999 | 2009 | ||
| 2000 | } | 2010 | } |
| 2001 | /* End of error handling */ | 2011 | /* End of error handling */ |
| 2002 | else { /* Read successful */ | 2012 | else { /* Read successful */ |
| 2003 | STbp->buffer_bytes = bytes; | 2013 | STbp->buffer_bytes = bytes; |
| 2004 | if (STp->sili) /* In fixed block mode residual is always zero here */ | 2014 | if (STp->sili) /* In fixed block mode residual is always zero here */ |
| @@ -2028,7 +2038,6 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 2028 | struct st_modedef *STm; | 2038 | struct st_modedef *STm; |
| 2029 | struct st_partstat *STps; | 2039 | struct st_partstat *STps; |
| 2030 | struct st_buffer *STbp = STp->buffer; | 2040 | struct st_buffer *STbp = STp->buffer; |
| 2031 | DEB( char *name = tape_name(STp); ) | ||
| 2032 | 2041 | ||
| 2033 | if (mutex_lock_interruptible(&STp->lock)) | 2042 | if (mutex_lock_interruptible(&STp->lock)) |
| 2034 | return -ERESTARTSYS; | 2043 | return -ERESTARTSYS; |
| @@ -2053,11 +2062,12 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 2053 | goto out; | 2062 | goto out; |
| 2054 | STps->rw = ST_READING; | 2063 | STps->rw = ST_READING; |
| 2055 | } | 2064 | } |
| 2056 | DEB( | 2065 | DEB( |
| 2057 | if (debugging && STps->eof != ST_NOEOF) | 2066 | if (debugging && STps->eof != ST_NOEOF) |
| 2058 | printk(ST_DEB_MSG "%s: EOF/EOM flag up (%d). Bytes %d\n", name, | 2067 | st_printk(ST_DEB_MSG, STp, |
| 2059 | STps->eof, STbp->buffer_bytes); | 2068 | "EOF/EOM flag up (%d). Bytes %d\n", |
| 2060 | ) /* end DEB */ | 2069 | STps->eof, STbp->buffer_bytes); |
| 2070 | ) /* end DEB */ | ||
| 2061 | 2071 | ||
| 2062 | retval = setup_buffering(STp, buf, count, 1); | 2072 | retval = setup_buffering(STp, buf, count, 1); |
| 2063 | if (retval) | 2073 | if (retval) |
| @@ -2104,13 +2114,13 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 2104 | 2114 | ||
| 2105 | /* Move the data from driver buffer to user buffer */ | 2115 | /* Move the data from driver buffer to user buffer */ |
| 2106 | if (STbp->buffer_bytes > 0) { | 2116 | if (STbp->buffer_bytes > 0) { |
| 2107 | DEB( | 2117 | DEB( |
| 2108 | if (debugging && STps->eof != ST_NOEOF) | 2118 | if (debugging && STps->eof != ST_NOEOF) |
| 2109 | printk(ST_DEB_MSG | 2119 | st_printk(ST_DEB_MSG, STp, |
| 2110 | "%s: EOF up (%d). Left %d, needed %d.\n", name, | 2120 | "EOF up (%d). Left %d, needed %d.\n", |
| 2111 | STps->eof, STbp->buffer_bytes, | 2121 | STps->eof, STbp->buffer_bytes, |
| 2112 | (int)(count - total)); | 2122 | (int)(count - total)); |
| 2113 | ) /* end DEB */ | 2123 | ) /* end DEB */ |
| 2114 | transfer = STbp->buffer_bytes < count - total ? | 2124 | transfer = STbp->buffer_bytes < count - total ? |
| 2115 | STbp->buffer_bytes : count - total; | 2125 | STbp->buffer_bytes : count - total; |
| 2116 | if (!do_dio) { | 2126 | if (!do_dio) { |
| @@ -2166,26 +2176,30 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
| 2166 | 2176 | ||
| 2167 | DEB( | 2177 | DEB( |
| 2168 | /* Set the driver options */ | 2178 | /* Set the driver options */ |
| 2169 | static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char *name) | 2179 | static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm) |
| 2170 | { | 2180 | { |
| 2171 | if (debugging) { | 2181 | if (debugging) { |
| 2172 | printk(KERN_INFO | 2182 | st_printk(KERN_INFO, STp, |
| 2173 | "%s: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n", | 2183 | "Mode %d options: buffer writes: %d, " |
| 2174 | name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes, | 2184 | "async writes: %d, read ahead: %d\n", |
| 2175 | STm->do_read_ahead); | 2185 | STp->current_mode, STm->do_buffer_writes, |
| 2176 | printk(KERN_INFO | 2186 | STm->do_async_writes, STm->do_read_ahead); |
| 2177 | "%s: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n", | 2187 | st_printk(KERN_INFO, STp, |
| 2178 | name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock); | 2188 | " can bsr: %d, two FMs: %d, " |
| 2179 | printk(KERN_INFO | 2189 | "fast mteom: %d, auto lock: %d,\n", |
| 2180 | "%s: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n", | 2190 | STp->can_bsr, STp->two_fm, STp->fast_mteom, |
| 2181 | name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, | 2191 | STp->do_auto_lock); |
| 2182 | STp->scsi2_logical); | 2192 | st_printk(KERN_INFO, STp, |
| 2183 | printk(KERN_INFO | 2193 | " defs for wr: %d, no block limits: %d, " |
| 2184 | "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n", | 2194 | "partitions: %d, s2 log: %d\n", |
| 2185 | name, STm->sysv, STp->immediate, STp->sili, | 2195 | STm->defaults_for_writes, STp->omit_blklims, |
| 2186 | STp->immediate_filemark); | 2196 | STp->can_partitions, STp->scsi2_logical); |
| 2187 | printk(KERN_INFO "%s: debugging: %d\n", | 2197 | st_printk(KERN_INFO, STp, |
| 2188 | name, debugging); | 2198 | " sysv: %d nowait: %d sili: %d " |
| 2199 | "nowait_filemark: %d\n", | ||
| 2200 | STm->sysv, STp->immediate, STp->sili, | ||
| 2201 | STp->immediate_filemark); | ||
| 2202 | st_printk(KERN_INFO, STp, " debugging: %d\n", debugging); | ||
| 2189 | } | 2203 | } |
| 2190 | } | 2204 | } |
| 2191 | ) | 2205 | ) |
| @@ -2196,7 +2210,6 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2196 | int value; | 2210 | int value; |
| 2197 | long code; | 2211 | long code; |
| 2198 | struct st_modedef *STm; | 2212 | struct st_modedef *STm; |
| 2199 | char *name = tape_name(STp); | ||
| 2200 | struct cdev *cd0, *cd1; | 2213 | struct cdev *cd0, *cd1; |
| 2201 | struct device *d0, *d1; | 2214 | struct device *d0, *d1; |
| 2202 | 2215 | ||
| @@ -2212,9 +2225,8 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2212 | STm->devs[0] = d0; | 2225 | STm->devs[0] = d0; |
| 2213 | STm->devs[1] = d1; | 2226 | STm->devs[1] = d1; |
| 2214 | modes_defined = 1; | 2227 | modes_defined = 1; |
| 2215 | DEBC(printk(ST_DEB_MSG | 2228 | DEBC_printk(STp, "Initialized mode %d definition from mode 0\n", |
| 2216 | "%s: Initialized mode %d definition from mode 0\n", | 2229 | STp->current_mode); |
| 2217 | name, STp->current_mode)); | ||
| 2218 | } | 2230 | } |
| 2219 | 2231 | ||
| 2220 | code = options & MT_ST_OPTIONS; | 2232 | code = options & MT_ST_OPTIONS; |
| @@ -2236,7 +2248,7 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2236 | STm->sysv = (options & MT_ST_SYSV) != 0; | 2248 | STm->sysv = (options & MT_ST_SYSV) != 0; |
| 2237 | STp->sili = (options & MT_ST_SILI) != 0; | 2249 | STp->sili = (options & MT_ST_SILI) != 0; |
| 2238 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; | 2250 | DEB( debugging = (options & MT_ST_DEBUGGING) != 0; |
| 2239 | st_log_options(STp, STm, name); ) | 2251 | st_log_options(STp, STm); ) |
| 2240 | } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { | 2252 | } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { |
| 2241 | value = (code == MT_ST_SETBOOLEANS); | 2253 | value = (code == MT_ST_SETBOOLEANS); |
| 2242 | if ((options & MT_ST_BUFFER_WRITES) != 0) | 2254 | if ((options & MT_ST_BUFFER_WRITES) != 0) |
| @@ -2270,21 +2282,21 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2270 | STm->sysv = value; | 2282 | STm->sysv = value; |
| 2271 | if ((options & MT_ST_SILI) != 0) | 2283 | if ((options & MT_ST_SILI) != 0) |
| 2272 | STp->sili = value; | 2284 | STp->sili = value; |
| 2273 | DEB( | 2285 | DEB( |
| 2274 | if ((options & MT_ST_DEBUGGING) != 0) | 2286 | if ((options & MT_ST_DEBUGGING) != 0) |
| 2275 | debugging = value; | 2287 | debugging = value; |
| 2276 | st_log_options(STp, STm, name); ) | 2288 | st_log_options(STp, STm); ) |
| 2277 | } else if (code == MT_ST_WRITE_THRESHOLD) { | 2289 | } else if (code == MT_ST_WRITE_THRESHOLD) { |
| 2278 | /* Retained for compatibility */ | 2290 | /* Retained for compatibility */ |
| 2279 | } else if (code == MT_ST_DEF_BLKSIZE) { | 2291 | } else if (code == MT_ST_DEF_BLKSIZE) { |
| 2280 | value = (options & ~MT_ST_OPTIONS); | 2292 | value = (options & ~MT_ST_OPTIONS); |
| 2281 | if (value == ~MT_ST_OPTIONS) { | 2293 | if (value == ~MT_ST_OPTIONS) { |
| 2282 | STm->default_blksize = (-1); | 2294 | STm->default_blksize = (-1); |
| 2283 | DEBC( printk(KERN_INFO "%s: Default block size disabled.\n", name)); | 2295 | DEBC_printk(STp, "Default block size disabled.\n"); |
| 2284 | } else { | 2296 | } else { |
| 2285 | STm->default_blksize = value; | 2297 | STm->default_blksize = value; |
| 2286 | DEBC( printk(KERN_INFO "%s: Default block size set to %d bytes.\n", | 2298 | DEBC_printk(STp,"Default block size set to " |
| 2287 | name, STm->default_blksize)); | 2299 | "%d bytes.\n", STm->default_blksize); |
| 2288 | if (STp->ready == ST_READY) { | 2300 | if (STp->ready == ST_READY) { |
| 2289 | STp->blksize_changed = 0; | 2301 | STp->blksize_changed = 0; |
| 2290 | set_mode_densblk(STp, STm); | 2302 | set_mode_densblk(STp, STm); |
| @@ -2294,13 +2306,13 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2294 | value = (options & ~MT_ST_OPTIONS); | 2306 | value = (options & ~MT_ST_OPTIONS); |
| 2295 | if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) { | 2307 | if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) { |
| 2296 | STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ; | 2308 | STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ; |
| 2297 | DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name, | 2309 | DEBC_printk(STp, "Long timeout set to %d seconds.\n", |
| 2298 | (value & ~MT_ST_SET_LONG_TIMEOUT))); | 2310 | (value & ~MT_ST_SET_LONG_TIMEOUT)); |
| 2299 | } else { | 2311 | } else { |
| 2300 | blk_queue_rq_timeout(STp->device->request_queue, | 2312 | blk_queue_rq_timeout(STp->device->request_queue, |
| 2301 | value * HZ); | 2313 | value * HZ); |
| 2302 | DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n", | 2314 | DEBC_printk(STp, "Normal timeout set to %d seconds.\n", |
| 2303 | name, value) ); | 2315 | value); |
| 2304 | } | 2316 | } |
| 2305 | } else if (code == MT_ST_SET_CLN) { | 2317 | } else if (code == MT_ST_SET_CLN) { |
| 2306 | value = (options & ~MT_ST_OPTIONS) & 0xff; | 2318 | value = (options & ~MT_ST_OPTIONS) & 0xff; |
| @@ -2311,21 +2323,21 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2311 | STp->cln_mode = value; | 2323 | STp->cln_mode = value; |
| 2312 | STp->cln_sense_mask = (options >> 8) & 0xff; | 2324 | STp->cln_sense_mask = (options >> 8) & 0xff; |
| 2313 | STp->cln_sense_value = (options >> 16) & 0xff; | 2325 | STp->cln_sense_value = (options >> 16) & 0xff; |
| 2314 | printk(KERN_INFO | 2326 | st_printk(KERN_INFO, STp, |
| 2315 | "%s: Cleaning request mode %d, mask %02x, value %02x\n", | 2327 | "Cleaning request mode %d, mask %02x, value %02x\n", |
| 2316 | name, value, STp->cln_sense_mask, STp->cln_sense_value); | 2328 | value, STp->cln_sense_mask, STp->cln_sense_value); |
| 2317 | } else if (code == MT_ST_DEF_OPTIONS) { | 2329 | } else if (code == MT_ST_DEF_OPTIONS) { |
| 2318 | code = (options & ~MT_ST_CLEAR_DEFAULT); | 2330 | code = (options & ~MT_ST_CLEAR_DEFAULT); |
| 2319 | value = (options & MT_ST_CLEAR_DEFAULT); | 2331 | value = (options & MT_ST_CLEAR_DEFAULT); |
| 2320 | if (code == MT_ST_DEF_DENSITY) { | 2332 | if (code == MT_ST_DEF_DENSITY) { |
| 2321 | if (value == MT_ST_CLEAR_DEFAULT) { | 2333 | if (value == MT_ST_CLEAR_DEFAULT) { |
| 2322 | STm->default_density = (-1); | 2334 | STm->default_density = (-1); |
| 2323 | DEBC( printk(KERN_INFO "%s: Density default disabled.\n", | 2335 | DEBC_printk(STp, |
| 2324 | name)); | 2336 | "Density default disabled.\n"); |
| 2325 | } else { | 2337 | } else { |
| 2326 | STm->default_density = value & 0xff; | 2338 | STm->default_density = value & 0xff; |
| 2327 | DEBC( printk(KERN_INFO "%s: Density default set to %x\n", | 2339 | DEBC_printk(STp, "Density default set to %x\n", |
| 2328 | name, STm->default_density)); | 2340 | STm->default_density); |
| 2329 | if (STp->ready == ST_READY) { | 2341 | if (STp->ready == ST_READY) { |
| 2330 | STp->density_changed = 0; | 2342 | STp->density_changed = 0; |
| 2331 | set_mode_densblk(STp, STm); | 2343 | set_mode_densblk(STp, STm); |
| @@ -2334,31 +2346,33 @@ static int st_set_options(struct scsi_tape *STp, long options) | |||
| 2334 | } else if (code == MT_ST_DEF_DRVBUFFER) { | 2346 | } else if (code == MT_ST_DEF_DRVBUFFER) { |
| 2335 | if (value == MT_ST_CLEAR_DEFAULT) { | 2347 | if (value == MT_ST_CLEAR_DEFAULT) { |
| 2336 | STp->default_drvbuffer = 0xff; | 2348 | STp->default_drvbuffer = 0xff; |
| 2337 | DEBC( printk(KERN_INFO | 2349 | DEBC_printk(STp, |
| 2338 | "%s: Drive buffer default disabled.\n", name)); | 2350 | "Drive buffer default disabled.\n"); |
| 2339 | } else { | 2351 | } else { |
| 2340 | STp->default_drvbuffer = value & 7; | 2352 | STp->default_drvbuffer = value & 7; |
| 2341 | DEBC( printk(KERN_INFO | 2353 | DEBC_printk(STp, |
| 2342 | "%s: Drive buffer default set to %x\n", | 2354 | "Drive buffer default set to %x\n", |
| 2343 | name, STp->default_drvbuffer)); | 2355 | STp->default_drvbuffer); |
| 2344 | if (STp->ready == ST_READY) | 2356 | if (STp->ready == ST_READY) |
| 2345 | st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer); | 2357 | st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer); |
| 2346 | } | 2358 | } |
| 2347 | } else if (code == MT_ST_DEF_COMPRESSION) { | 2359 | } else if (code == MT_ST_DEF_COMPRESSION) { |
| 2348 | if (value == MT_ST_CLEAR_DEFAULT) { | 2360 | if (value == MT_ST_CLEAR_DEFAULT) { |
| 2349 | STm->default_compression = ST_DONT_TOUCH; | 2361 | STm->default_compression = ST_DONT_TOUCH; |
| 2350 | DEBC( printk(KERN_INFO | 2362 | DEBC_printk(STp, |
| 2351 | "%s: Compression default disabled.\n", name)); | 2363 | "Compression default disabled.\n"); |
| 2352 | } else { | 2364 | } else { |
| 2353 | if ((value & 0xff00) != 0) { | 2365 | if ((value & 0xff00) != 0) { |
| 2354 | STp->c_algo = (value & 0xff00) >> 8; | 2366 | STp->c_algo = (value & 0xff00) >> 8; |
| 2355 | DEBC( printk(KERN_INFO "%s: Compression algorithm set to 0x%x.\n", | 2367 | DEBC_printk(STp, "Compression " |
| 2356 | name, STp->c_algo)); | 2368 | "algorithm set to 0x%x.\n", |
| 2369 | STp->c_algo); | ||
| 2357 | } | 2370 | } |
| 2358 | if ((value & 0xff) != 0xff) { | 2371 | if ((value & 0xff) != 0xff) { |
| 2359 | STm->default_compression = (value & 1 ? ST_YES : ST_NO); | 2372 | STm->default_compression = (value & 1 ? ST_YES : ST_NO); |
| 2360 | DEBC( printk(KERN_INFO "%s: Compression default set to %x\n", | 2373 | DEBC_printk(STp, "Compression default " |
| 2361 | name, (value & 1))); | 2374 | "set to %x\n", |
| 2375 | (value & 1)); | ||
| 2362 | if (STp->ready == ST_READY) { | 2376 | if (STp->ready == ST_READY) { |
| 2363 | STp->compression_changed = 0; | 2377 | STp->compression_changed = 0; |
| 2364 | st_compression(STp, (STm->default_compression == ST_YES)); | 2378 | st_compression(STp, (STm->default_compression == ST_YES)); |
| @@ -2473,7 +2487,6 @@ static int st_compression(struct scsi_tape * STp, int state) | |||
| 2473 | int retval; | 2487 | int retval; |
| 2474 | int mpoffs; /* Offset to mode page start */ | 2488 | int mpoffs; /* Offset to mode page start */ |
| 2475 | unsigned char *b_data = (STp->buffer)->b_data; | 2489 | unsigned char *b_data = (STp->buffer)->b_data; |
| 2476 | DEB( char *name = tape_name(STp); ) | ||
| 2477 | 2490 | ||
| 2478 | if (STp->ready != ST_READY) | 2491 | if (STp->ready != ST_READY) |
| 2479 | return (-EIO); | 2492 | return (-EIO); |
| @@ -2481,18 +2494,17 @@ static int st_compression(struct scsi_tape * STp, int state) | |||
| 2481 | /* Read the current page contents */ | 2494 | /* Read the current page contents */ |
| 2482 | retval = read_mode_page(STp, COMPRESSION_PAGE, 0); | 2495 | retval = read_mode_page(STp, COMPRESSION_PAGE, 0); |
| 2483 | if (retval) { | 2496 | if (retval) { |
| 2484 | DEBC(printk(ST_DEB_MSG "%s: Compression mode page not supported.\n", | 2497 | DEBC_printk(STp, "Compression mode page not supported.\n"); |
| 2485 | name)); | ||
| 2486 | return (-EIO); | 2498 | return (-EIO); |
| 2487 | } | 2499 | } |
| 2488 | 2500 | ||
| 2489 | mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH]; | 2501 | mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH]; |
| 2490 | DEBC(printk(ST_DEB_MSG "%s: Compression state is %d.\n", name, | 2502 | DEBC_printk(STp, "Compression state is %d.\n", |
| 2491 | (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0))); | 2503 | (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0)); |
| 2492 | 2504 | ||
| 2493 | /* Check if compression can be changed */ | 2505 | /* Check if compression can be changed */ |
| 2494 | if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) { | 2506 | if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) { |
| 2495 | DEBC(printk(ST_DEB_MSG "%s: Compression not supported.\n", name)); | 2507 | DEBC_printk(STp, "Compression not supported.\n"); |
| 2496 | return (-EIO); | 2508 | return (-EIO); |
| 2497 | } | 2509 | } |
| 2498 | 2510 | ||
| @@ -2510,11 +2522,10 @@ static int st_compression(struct scsi_tape * STp, int state) | |||
| 2510 | 2522 | ||
| 2511 | retval = write_mode_page(STp, COMPRESSION_PAGE, 0); | 2523 | retval = write_mode_page(STp, COMPRESSION_PAGE, 0); |
| 2512 | if (retval) { | 2524 | if (retval) { |
| 2513 | DEBC(printk(ST_DEB_MSG "%s: Compression change failed.\n", name)); | 2525 | DEBC_printk(STp, "Compression change failed.\n"); |
| 2514 | return (-EIO); | 2526 | return (-EIO); |
| 2515 | } | 2527 | } |
| 2516 | DEBC(printk(ST_DEB_MSG "%s: Compression state changed to %d.\n", | 2528 | DEBC_printk(STp, "Compression state changed to %d.\n", state); |
| 2517 | name, state)); | ||
| 2518 | 2529 | ||
| 2519 | STp->compression_changed = 1; | 2530 | STp->compression_changed = 1; |
| 2520 | return 0; | 2531 | return 0; |
| @@ -2525,7 +2536,6 @@ static int st_compression(struct scsi_tape * STp, int state) | |||
| 2525 | static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code) | 2536 | static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code) |
| 2526 | { | 2537 | { |
| 2527 | int retval = (-EIO), timeout; | 2538 | int retval = (-EIO), timeout; |
| 2528 | DEB( char *name = tape_name(STp); ) | ||
| 2529 | unsigned char cmd[MAX_COMMAND_SIZE]; | 2539 | unsigned char cmd[MAX_COMMAND_SIZE]; |
| 2530 | struct st_partstat *STps; | 2540 | struct st_partstat *STps; |
| 2531 | struct st_request *SRpnt; | 2541 | struct st_request *SRpnt; |
| @@ -2546,9 +2556,9 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod | |||
| 2546 | */ | 2556 | */ |
| 2547 | if (load_code >= 1 + MT_ST_HPLOADER_OFFSET | 2557 | if (load_code >= 1 + MT_ST_HPLOADER_OFFSET |
| 2548 | && load_code <= 6 + MT_ST_HPLOADER_OFFSET) { | 2558 | && load_code <= 6 + MT_ST_HPLOADER_OFFSET) { |
| 2549 | DEBC(printk(ST_DEB_MSG "%s: Enhanced %sload slot %2d.\n", | 2559 | DEBC_printk(STp, " Enhanced %sload slot %2d.\n", |
| 2550 | name, (cmd[4]) ? "" : "un", | 2560 | (cmd[4]) ? "" : "un", |
| 2551 | load_code - MT_ST_HPLOADER_OFFSET)); | 2561 | load_code - MT_ST_HPLOADER_OFFSET); |
| 2552 | cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */ | 2562 | cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */ |
| 2553 | } | 2563 | } |
| 2554 | if (STp->immediate) { | 2564 | if (STp->immediate) { |
| @@ -2560,9 +2570,9 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod | |||
| 2560 | 2570 | ||
| 2561 | DEBC( | 2571 | DEBC( |
| 2562 | if (!load_code) | 2572 | if (!load_code) |
| 2563 | printk(ST_DEB_MSG "%s: Unloading tape.\n", name); | 2573 | st_printk(ST_DEB_MSG, STp, "Unloading tape.\n"); |
| 2564 | else | 2574 | else |
| 2565 | printk(ST_DEB_MSG "%s: Loading tape.\n", name); | 2575 | st_printk(ST_DEB_MSG, STp, "Loading tape.\n"); |
| 2566 | ); | 2576 | ); |
| 2567 | 2577 | ||
| 2568 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, | 2578 | SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, |
| @@ -2597,17 +2607,24 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod | |||
| 2597 | #if DEBUG | 2607 | #if DEBUG |
| 2598 | #define ST_DEB_FORWARD 0 | 2608 | #define ST_DEB_FORWARD 0 |
| 2599 | #define ST_DEB_BACKWARD 1 | 2609 | #define ST_DEB_BACKWARD 1 |
| 2600 | static void deb_space_print(char *name, int direction, char *units, unsigned char *cmd) | 2610 | static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) |
| 2601 | { | 2611 | { |
| 2602 | s32 sc; | 2612 | s32 sc; |
| 2603 | 2613 | ||
| 2614 | if (!debugging) | ||
| 2615 | return; | ||
| 2616 | |||
| 2604 | sc = cmd[2] & 0x80 ? 0xff000000 : 0; | 2617 | sc = cmd[2] & 0x80 ? 0xff000000 : 0; |
| 2605 | sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4]; | 2618 | sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4]; |
| 2606 | if (direction) | 2619 | if (direction) |
| 2607 | sc = -sc; | 2620 | sc = -sc; |
| 2608 | printk(ST_DEB_MSG "%s: Spacing tape %s over %d %s.\n", name, | 2621 | st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n", |
| 2609 | direction ? "backward" : "forward", sc, units); | 2622 | direction ? "backward" : "forward", sc, units); |
| 2610 | } | 2623 | } |
| 2624 | #else | ||
| 2625 | #define ST_DEB_FORWARD 0 | ||
| 2626 | #define ST_DEB_BACKWARD 1 | ||
| 2627 | static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) {} | ||
| 2611 | #endif | 2628 | #endif |
| 2612 | 2629 | ||
| 2613 | 2630 | ||
| @@ -2623,7 +2640,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2623 | struct st_partstat *STps; | 2640 | struct st_partstat *STps; |
| 2624 | int fileno, blkno, at_sm, undone; | 2641 | int fileno, blkno, at_sm, undone; |
| 2625 | int datalen = 0, direction = DMA_NONE; | 2642 | int datalen = 0, direction = DMA_NONE; |
| 2626 | char *name = tape_name(STp); | ||
| 2627 | 2643 | ||
| 2628 | WARN_ON(STp->buffer->do_dio != 0); | 2644 | WARN_ON(STp->buffer->do_dio != 0); |
| 2629 | if (STp->ready != ST_READY) { | 2645 | if (STp->ready != ST_READY) { |
| @@ -2648,7 +2664,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2648 | cmd[2] = (arg >> 16); | 2664 | cmd[2] = (arg >> 16); |
| 2649 | cmd[3] = (arg >> 8); | 2665 | cmd[3] = (arg >> 8); |
| 2650 | cmd[4] = arg; | 2666 | cmd[4] = arg; |
| 2651 | DEBC(deb_space_print(name, ST_DEB_FORWARD, "filemarks", cmd);) | 2667 | deb_space_print(STp, ST_DEB_FORWARD, "filemarks", cmd); |
| 2652 | if (fileno >= 0) | 2668 | if (fileno >= 0) |
| 2653 | fileno += arg; | 2669 | fileno += arg; |
| 2654 | blkno = 0; | 2670 | blkno = 0; |
| @@ -2663,7 +2679,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2663 | cmd[2] = (ltmp >> 16); | 2679 | cmd[2] = (ltmp >> 16); |
| 2664 | cmd[3] = (ltmp >> 8); | 2680 | cmd[3] = (ltmp >> 8); |
| 2665 | cmd[4] = ltmp; | 2681 | cmd[4] = ltmp; |
| 2666 | DEBC(deb_space_print(name, ST_DEB_BACKWARD, "filemarks", cmd);) | 2682 | deb_space_print(STp, ST_DEB_BACKWARD, "filemarks", cmd); |
| 2667 | if (fileno >= 0) | 2683 | if (fileno >= 0) |
| 2668 | fileno -= arg; | 2684 | fileno -= arg; |
| 2669 | blkno = (-1); /* We can't know the block number */ | 2685 | blkno = (-1); /* We can't know the block number */ |
| @@ -2675,7 +2691,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2675 | cmd[2] = (arg >> 16); | 2691 | cmd[2] = (arg >> 16); |
| 2676 | cmd[3] = (arg >> 8); | 2692 | cmd[3] = (arg >> 8); |
| 2677 | cmd[4] = arg; | 2693 | cmd[4] = arg; |
| 2678 | DEBC(deb_space_print(name, ST_DEB_FORWARD, "blocks", cmd);) | 2694 | deb_space_print(STp, ST_DEB_FORWARD, "blocks", cmd); |
| 2679 | if (blkno >= 0) | 2695 | if (blkno >= 0) |
| 2680 | blkno += arg; | 2696 | blkno += arg; |
| 2681 | at_sm &= (arg == 0); | 2697 | at_sm &= (arg == 0); |
| @@ -2687,7 +2703,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2687 | cmd[2] = (ltmp >> 16); | 2703 | cmd[2] = (ltmp >> 16); |
| 2688 | cmd[3] = (ltmp >> 8); | 2704 | cmd[3] = (ltmp >> 8); |
| 2689 | cmd[4] = ltmp; | 2705 | cmd[4] = ltmp; |
| 2690 | DEBC(deb_space_print(name, ST_DEB_BACKWARD, "blocks", cmd);) | 2706 | deb_space_print(STp, ST_DEB_BACKWARD, "blocks", cmd); |
| 2691 | if (blkno >= 0) | 2707 | if (blkno >= 0) |
| 2692 | blkno -= arg; | 2708 | blkno -= arg; |
| 2693 | at_sm &= (arg == 0); | 2709 | at_sm &= (arg == 0); |
| @@ -2698,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2698 | cmd[2] = (arg >> 16); | 2714 | cmd[2] = (arg >> 16); |
| 2699 | cmd[3] = (arg >> 8); | 2715 | cmd[3] = (arg >> 8); |
| 2700 | cmd[4] = arg; | 2716 | cmd[4] = arg; |
| 2701 | DEBC(deb_space_print(name, ST_DEB_FORWARD, "setmarks", cmd);) | 2717 | deb_space_print(STp, ST_DEB_FORWARD, "setmarks", cmd); |
| 2702 | if (arg != 0) { | 2718 | if (arg != 0) { |
| 2703 | blkno = fileno = (-1); | 2719 | blkno = fileno = (-1); |
| 2704 | at_sm = 1; | 2720 | at_sm = 1; |
| @@ -2711,7 +2727,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2711 | cmd[2] = (ltmp >> 16); | 2727 | cmd[2] = (ltmp >> 16); |
| 2712 | cmd[3] = (ltmp >> 8); | 2728 | cmd[3] = (ltmp >> 8); |
| 2713 | cmd[4] = ltmp; | 2729 | cmd[4] = ltmp; |
| 2714 | DEBC(deb_space_print(name, ST_DEB_BACKWARD, "setmarks", cmd);) | 2730 | deb_space_print(STp, ST_DEB_BACKWARD, "setmarks", cmd); |
| 2715 | if (arg != 0) { | 2731 | if (arg != 0) { |
| 2716 | blkno = fileno = (-1); | 2732 | blkno = fileno = (-1); |
| 2717 | at_sm = 1; | 2733 | at_sm = 1; |
| @@ -2732,13 +2748,19 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2732 | cmd[3] = (arg >> 8); | 2748 | cmd[3] = (arg >> 8); |
| 2733 | cmd[4] = arg; | 2749 | cmd[4] = arg; |
| 2734 | timeout = STp->device->request_queue->rq_timeout; | 2750 | timeout = STp->device->request_queue->rq_timeout; |
| 2735 | DEBC( | 2751 | DEBC( |
| 2736 | if (cmd_in != MTWSM) | 2752 | if (cmd_in != MTWSM) |
| 2737 | printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, | 2753 | st_printk(ST_DEB_MSG, STp, |
| 2738 | cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); | 2754 | "Writing %d filemarks.\n", |
| 2739 | else | 2755 | cmd[2] * 65536 + |
| 2740 | printk(ST_DEB_MSG "%s: Writing %d setmarks.\n", name, | 2756 | cmd[3] * 256 + |
| 2741 | cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); | 2757 | cmd[4]); |
| 2758 | else | ||
| 2759 | st_printk(ST_DEB_MSG, STp, | ||
| 2760 | "Writing %d setmarks.\n", | ||
| 2761 | cmd[2] * 65536 + | ||
| 2762 | cmd[3] * 256 + | ||
| 2763 | cmd[4]); | ||
| 2742 | ) | 2764 | ) |
| 2743 | if (fileno >= 0) | 2765 | if (fileno >= 0) |
| 2744 | fileno += arg; | 2766 | fileno += arg; |
| @@ -2751,11 +2773,11 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2751 | cmd[1] = 1; /* Don't wait for completion */ | 2773 | cmd[1] = 1; /* Don't wait for completion */ |
| 2752 | timeout = STp->device->request_queue->rq_timeout; | 2774 | timeout = STp->device->request_queue->rq_timeout; |
| 2753 | } | 2775 | } |
| 2754 | DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name)); | 2776 | DEBC_printk(STp, "Rewinding tape.\n"); |
| 2755 | fileno = blkno = at_sm = 0; | 2777 | fileno = blkno = at_sm = 0; |
| 2756 | break; | 2778 | break; |
| 2757 | case MTNOP: | 2779 | case MTNOP: |
| 2758 | DEBC(printk(ST_DEB_MSG "%s: No op on tape.\n", name)); | 2780 | DEBC_printk(STp, "No op on tape.\n"); |
| 2759 | return 0; /* Should do something ? */ | 2781 | return 0; /* Should do something ? */ |
| 2760 | break; | 2782 | break; |
| 2761 | case MTRETEN: | 2783 | case MTRETEN: |
| @@ -2765,7 +2787,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2765 | timeout = STp->device->request_queue->rq_timeout; | 2787 | timeout = STp->device->request_queue->rq_timeout; |
| 2766 | } | 2788 | } |
| 2767 | cmd[4] = 3; | 2789 | cmd[4] = 3; |
| 2768 | DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name)); | 2790 | DEBC_printk(STp, "Retensioning tape.\n"); |
| 2769 | fileno = blkno = at_sm = 0; | 2791 | fileno = blkno = at_sm = 0; |
| 2770 | break; | 2792 | break; |
| 2771 | case MTEOM: | 2793 | case MTEOM: |
| @@ -2783,8 +2805,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2783 | fileno = (-1); | 2805 | fileno = (-1); |
| 2784 | cmd[0] = SPACE; | 2806 | cmd[0] = SPACE; |
| 2785 | cmd[1] = 3; | 2807 | cmd[1] = 3; |
| 2786 | DEBC(printk(ST_DEB_MSG "%s: Spacing to end of recorded medium.\n", | 2808 | DEBC_printk(STp, "Spacing to end of recorded medium.\n"); |
| 2787 | name)); | ||
| 2788 | blkno = -1; | 2809 | blkno = -1; |
| 2789 | at_sm = 0; | 2810 | at_sm = 0; |
| 2790 | break; | 2811 | break; |
| @@ -2800,7 +2821,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2800 | else | 2821 | else |
| 2801 | timeout = STp->long_timeout * 8; | 2822 | timeout = STp->long_timeout * 8; |
| 2802 | 2823 | ||
| 2803 | DEBC(printk(ST_DEB_MSG "%s: Erasing tape.\n", name)); | 2824 | DEBC_printk(STp, "Erasing tape.\n"); |
| 2804 | fileno = blkno = at_sm = 0; | 2825 | fileno = blkno = at_sm = 0; |
| 2805 | break; | 2826 | break; |
| 2806 | case MTSETBLK: /* Set block length */ | 2827 | case MTSETBLK: /* Set block length */ |
| @@ -2815,7 +2836,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2815 | STp->max_block > 0 && | 2836 | STp->max_block > 0 && |
| 2816 | ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block || | 2837 | ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block || |
| 2817 | (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) { | 2838 | (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) { |
| 2818 | printk(KERN_WARNING "%s: Illegal block size.\n", name); | 2839 | st_printk(KERN_WARNING, STp, "Illegal block size.\n"); |
| 2819 | return (-EINVAL); | 2840 | return (-EINVAL); |
| 2820 | } | 2841 | } |
| 2821 | cmd[0] = MODE_SELECT; | 2842 | cmd[0] = MODE_SELECT; |
| @@ -2848,21 +2869,21 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon | |||
| 2848 | (STp->buffer)->b_data[10] = (ltmp >> 8); | 2869 | (STp->buffer)->b_data[10] = (ltmp >> 8); |
| 2849 | (STp->buffer)->b_data[11] = ltmp; | 2870 | (STp->buffer)->b_data[11] = ltmp; |
| 2850 | timeout = STp->device->request_queue->rq_timeout; | 2871 | timeout = STp->device->request_queue->rq_timeout; |
| 2851 | DEBC( | 2872 | DEBC( |
| 2852 | if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) | 2873 | if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) |
| 2853 | printk(ST_DEB_MSG | 2874 | st_printk(ST_DEB_MSG, STp, |
| 2854 | "%s: Setting block size to %d bytes.\n", name, | 2875 | "Setting block size to %d bytes.\n", |
| 2855 | (STp->buffer)->b_data[9] * 65536 + | 2876 | (STp->buffer)->b_data[9] * 65536 + |
| 2856 | (STp->buffer)->b_data[10] * 256 + | 2877 | (STp->buffer)->b_data[10] * 256 + |
| 2857 | (STp->buffer)->b_data[11]); | 2878 | (STp->buffer)->b_data[11]); |
| 2858 | if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK) | 2879 | if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK) |
| 2859 | printk(ST_DEB_MSG | 2880 | st_printk(ST_DEB_MSG, STp, |
| 2860 | "%s: Setting density code to %x.\n", name, | 2881 | "Setting density code to %x.\n", |
| 2861 | (STp->buffer)->b_data[4]); | 2882 | (STp->buffer)->b_data[4]); |
| 2862 | if (cmd_in == MTSETDRVBUFFER) | 2883 | if (cmd_in == MTSETDRVBUFFER) |
| 2863 | printk(ST_DEB_MSG | 2884 | st_printk(ST_DEB_MSG, STp, |
| 2864 | "%s: Setting drive buffer code to %d.\n", name, | 2885 | "Setting drive buffer code to %d.\n", |
| 2865 | ((STp->buffer)->b_data[2] >> 4) & 7); | 2886 | ((STp->buffer)->b_data[2] >> 4) & 7); |
| 2866 | ) | 2887 | ) |
| 2867 | break; | 2888 | break; |
| 2868 | default: | 2889 | default: |
| @@ -3019,7 +3040,6 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti | |||
| 3019 | int result; | 3040 | int result; |
| 3020 | unsigned char scmd[MAX_COMMAND_SIZE]; | 3041 | unsigned char scmd[MAX_COMMAND_SIZE]; |
| 3021 | struct st_request *SRpnt; | 3042 | struct st_request *SRpnt; |
| 3022 | DEB( char *name = tape_name(STp); ) | ||
| 3023 | 3043 | ||
| 3024 | if (STp->ready != ST_READY) | 3044 | if (STp->ready != ST_READY) |
| 3025 | return (-EIO); | 3045 | return (-EIO); |
| @@ -3043,7 +3063,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti | |||
| 3043 | (STp->device->scsi_level >= SCSI_2 && | 3063 | (STp->device->scsi_level >= SCSI_2 && |
| 3044 | ((STp->buffer)->b_data[0] & 4) != 0)) { | 3064 | ((STp->buffer)->b_data[0] & 4) != 0)) { |
| 3045 | *block = *partition = 0; | 3065 | *block = *partition = 0; |
| 3046 | DEBC(printk(ST_DEB_MSG "%s: Can't read tape position.\n", name)); | 3066 | DEBC_printk(STp, " Can't read tape position.\n"); |
| 3047 | result = (-EIO); | 3067 | result = (-EIO); |
| 3048 | } else { | 3068 | } else { |
| 3049 | result = 0; | 3069 | result = 0; |
| @@ -3062,8 +3082,8 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti | |||
| 3062 | (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */ | 3082 | (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */ |
| 3063 | STp->ps[0].drv_block = STp->ps[0].drv_file = 0; | 3083 | STp->ps[0].drv_block = STp->ps[0].drv_file = 0; |
| 3064 | } | 3084 | } |
| 3065 | DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, | 3085 | DEBC_printk(STp, "Got tape pos. blk %d part %d.\n", |
| 3066 | *block, *partition)); | 3086 | *block, *partition); |
| 3067 | } | 3087 | } |
| 3068 | st_release_request(SRpnt); | 3088 | st_release_request(SRpnt); |
| 3069 | SRpnt = NULL; | 3089 | SRpnt = NULL; |
| @@ -3083,15 +3103,14 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition | |||
| 3083 | int timeout; | 3103 | int timeout; |
| 3084 | unsigned char scmd[MAX_COMMAND_SIZE]; | 3104 | unsigned char scmd[MAX_COMMAND_SIZE]; |
| 3085 | struct st_request *SRpnt; | 3105 | struct st_request *SRpnt; |
| 3086 | DEB( char *name = tape_name(STp); ) | ||
| 3087 | 3106 | ||
| 3088 | if (STp->ready != ST_READY) | 3107 | if (STp->ready != ST_READY) |
| 3089 | return (-EIO); | 3108 | return (-EIO); |
| 3090 | timeout = STp->long_timeout; | 3109 | timeout = STp->long_timeout; |
| 3091 | STps = &(STp->ps[STp->partition]); | 3110 | STps = &(STp->ps[STp->partition]); |
| 3092 | 3111 | ||
| 3093 | DEBC(printk(ST_DEB_MSG "%s: Setting block to %d and partition to %d.\n", | 3112 | DEBC_printk(STp, "Setting block to %d and partition to %d.\n", |
| 3094 | name, block, partition)); | 3113 | block, partition); |
| 3095 | DEB(if (partition < 0) | 3114 | DEB(if (partition < 0) |
| 3096 | return (-EIO); ) | 3115 | return (-EIO); ) |
| 3097 | 3116 | ||
| @@ -3105,9 +3124,9 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition | |||
| 3105 | else { | 3124 | else { |
| 3106 | STps->last_block_valid = 1; | 3125 | STps->last_block_valid = 1; |
| 3107 | STps->last_block_visited = blk; | 3126 | STps->last_block_visited = blk; |
| 3108 | DEBC(printk(ST_DEB_MSG | 3127 | DEBC_printk(STp, "Visited block %d for " |
| 3109 | "%s: Visited block %d for partition %d saved.\n", | 3128 | "partition %d saved.\n", |
| 3110 | name, blk, STp->partition)); | 3129 | blk, STp->partition); |
| 3111 | } | 3130 | } |
| 3112 | } | 3131 | } |
| 3113 | 3132 | ||
| @@ -3129,9 +3148,9 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition | |||
| 3129 | if (STp->partition != partition) { | 3148 | if (STp->partition != partition) { |
| 3130 | scmd[1] |= 2; | 3149 | scmd[1] |= 2; |
| 3131 | scmd[8] = partition; | 3150 | scmd[8] = partition; |
| 3132 | DEBC(printk(ST_DEB_MSG | 3151 | DEBC_printk(STp, "Trying to change partition " |
| 3133 | "%s: Trying to change partition from %d to %d\n", | 3152 | "from %d to %d\n", STp->partition, |
| 3134 | name, STp->partition, partition)); | 3153 | partition); |
| 3135 | } | 3154 | } |
| 3136 | } | 3155 | } |
| 3137 | if (STp->immediate) { | 3156 | if (STp->immediate) { |
| @@ -3222,7 +3241,6 @@ static int switch_partition(struct scsi_tape *STp) | |||
| 3222 | static int nbr_partitions(struct scsi_tape *STp) | 3241 | static int nbr_partitions(struct scsi_tape *STp) |
| 3223 | { | 3242 | { |
| 3224 | int result; | 3243 | int result; |
| 3225 | DEB( char *name = tape_name(STp); ) | ||
| 3226 | 3244 | ||
| 3227 | if (STp->ready != ST_READY) | 3245 | if (STp->ready != ST_READY) |
| 3228 | return (-EIO); | 3246 | return (-EIO); |
| @@ -3230,13 +3248,12 @@ static int nbr_partitions(struct scsi_tape *STp) | |||
| 3230 | result = read_mode_page(STp, PART_PAGE, 1); | 3248 | result = read_mode_page(STp, PART_PAGE, 1); |
| 3231 | 3249 | ||
| 3232 | if (result) { | 3250 | if (result) { |
| 3233 | DEBC(printk(ST_DEB_MSG "%s: Can't read medium partition page.\n", | 3251 | DEBC_printk(STp, "Can't read medium partition page.\n"); |
| 3234 | name)); | ||
| 3235 | result = (-EIO); | 3252 | result = (-EIO); |
| 3236 | } else { | 3253 | } else { |
| 3237 | result = (STp->buffer)->b_data[MODE_HEADER_LENGTH + | 3254 | result = (STp->buffer)->b_data[MODE_HEADER_LENGTH + |
| 3238 | PP_OFF_NBR_ADD_PARTS] + 1; | 3255 | PP_OFF_NBR_ADD_PARTS] + 1; |
| 3239 | DEBC(printk(ST_DEB_MSG "%s: Number of partitions %d.\n", name, result)); | 3256 | DEBC_printk(STp, "Number of partitions %d.\n", result); |
| 3240 | } | 3257 | } |
| 3241 | 3258 | ||
| 3242 | return result; | 3259 | return result; |
| @@ -3264,21 +3281,20 @@ static int nbr_partitions(struct scsi_tape *STp) | |||
| 3264 | */ | 3281 | */ |
| 3265 | static int partition_tape(struct scsi_tape *STp, int size) | 3282 | static int partition_tape(struct scsi_tape *STp, int size) |
| 3266 | { | 3283 | { |
| 3267 | char *name = tape_name(STp); | ||
| 3268 | int result; | 3284 | int result; |
| 3269 | int pgo, psd_cnt, psdo; | 3285 | int pgo, psd_cnt, psdo; |
| 3270 | unsigned char *bp; | 3286 | unsigned char *bp; |
| 3271 | 3287 | ||
| 3272 | result = read_mode_page(STp, PART_PAGE, 0); | 3288 | result = read_mode_page(STp, PART_PAGE, 0); |
| 3273 | if (result) { | 3289 | if (result) { |
| 3274 | DEBC(printk(ST_DEB_MSG "%s: Can't read partition mode page.\n", name)); | 3290 | DEBC_printk(STp, "Can't read partition mode page.\n"); |
| 3275 | return result; | 3291 | return result; |
| 3276 | } | 3292 | } |
| 3277 | /* The mode page is in the buffer. Let's modify it and write it. */ | 3293 | /* The mode page is in the buffer. Let's modify it and write it. */ |
| 3278 | bp = (STp->buffer)->b_data; | 3294 | bp = (STp->buffer)->b_data; |
| 3279 | pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; | 3295 | pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; |
| 3280 | DEBC(printk(ST_DEB_MSG "%s: Partition page length is %d bytes.\n", | 3296 | DEBC_printk(STp, "Partition page length is %d bytes.\n", |
| 3281 | name, bp[pgo + MP_OFF_PAGE_LENGTH] + 2)); | 3297 | bp[pgo + MP_OFF_PAGE_LENGTH] + 2); |
| 3282 | 3298 | ||
| 3283 | psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; | 3299 | psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; |
| 3284 | psdo = pgo + PART_PAGE_FIXED_LENGTH; | 3300 | psdo = pgo + PART_PAGE_FIXED_LENGTH; |
| @@ -3288,25 +3304,23 @@ static int partition_tape(struct scsi_tape *STp, int size) | |||
| 3288 | } | 3304 | } |
| 3289 | memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); | 3305 | memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); |
| 3290 | 3306 | ||
| 3291 | DEBC(printk("%s: psd_cnt %d, max.parts %d, nbr_parts %d\n", name, | 3307 | DEBC_printk(STp, "psd_cnt %d, max.parts %d, nbr_parts %d\n", |
| 3292 | psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], | 3308 | psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], |
| 3293 | bp[pgo + PP_OFF_NBR_ADD_PARTS])); | 3309 | bp[pgo + PP_OFF_NBR_ADD_PARTS]); |
| 3294 | 3310 | ||
| 3295 | if (size <= 0) { | 3311 | if (size <= 0) { |
| 3296 | bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; | 3312 | bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; |
| 3297 | if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) | 3313 | if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) |
| 3298 | bp[pgo + MP_OFF_PAGE_LENGTH] = 6; | 3314 | bp[pgo + MP_OFF_PAGE_LENGTH] = 6; |
| 3299 | DEBC(printk(ST_DEB_MSG "%s: Formatting tape with one partition.\n", | 3315 | DEBC_printk(STp, "Formatting tape with one partition.\n"); |
| 3300 | name)); | ||
| 3301 | } else { | 3316 | } else { |
| 3302 | bp[psdo] = (size >> 8) & 0xff; | 3317 | bp[psdo] = (size >> 8) & 0xff; |
| 3303 | bp[psdo + 1] = size & 0xff; | 3318 | bp[psdo + 1] = size & 0xff; |
| 3304 | bp[pgo + 3] = 1; | 3319 | bp[pgo + 3] = 1; |
| 3305 | if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) | 3320 | if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) |
| 3306 | bp[pgo + MP_OFF_PAGE_LENGTH] = 8; | 3321 | bp[pgo + MP_OFF_PAGE_LENGTH] = 8; |
| 3307 | DEBC(printk(ST_DEB_MSG | 3322 | DEBC_printk(STp, "Formatting tape with two partitions " |
| 3308 | "%s: Formatting tape with two partitions (1 = %d MB).\n", | 3323 | "(1 = %d MB).\n", size); |
| 3309 | name, size)); | ||
| 3310 | } | 3324 | } |
| 3311 | bp[pgo + PP_OFF_PART_UNITS] = 0; | 3325 | bp[pgo + PP_OFF_PART_UNITS] = 0; |
| 3312 | bp[pgo + PP_OFF_RESERVED] = 0; | 3326 | bp[pgo + PP_OFF_RESERVED] = 0; |
| @@ -3314,7 +3328,7 @@ static int partition_tape(struct scsi_tape *STp, int size) | |||
| 3314 | 3328 | ||
| 3315 | result = write_mode_page(STp, PART_PAGE, 1); | 3329 | result = write_mode_page(STp, PART_PAGE, 1); |
| 3316 | if (result) { | 3330 | if (result) { |
| 3317 | printk(KERN_INFO "%s: Partitioning of tape failed.\n", name); | 3331 | st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n"); |
| 3318 | result = (-EIO); | 3332 | result = (-EIO); |
| 3319 | } | 3333 | } |
| 3320 | 3334 | ||
| @@ -3332,15 +3346,14 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) | |||
| 3332 | struct scsi_tape *STp = file->private_data; | 3346 | struct scsi_tape *STp = file->private_data; |
| 3333 | struct st_modedef *STm; | 3347 | struct st_modedef *STm; |
| 3334 | struct st_partstat *STps; | 3348 | struct st_partstat *STps; |
| 3335 | char *name = tape_name(STp); | ||
| 3336 | void __user *p = (void __user *)arg; | 3349 | void __user *p = (void __user *)arg; |
| 3337 | 3350 | ||
| 3338 | if (mutex_lock_interruptible(&STp->lock)) | 3351 | if (mutex_lock_interruptible(&STp->lock)) |
| 3339 | return -ERESTARTSYS; | 3352 | return -ERESTARTSYS; |
| 3340 | 3353 | ||
| 3341 | DEB( | 3354 | DEB( |
| 3342 | if (debugging && !STp->in_use) { | 3355 | if (debugging && !STp->in_use) { |
| 3343 | printk(ST_DEB_MSG "%s: Incorrect device.\n", name); | 3356 | st_printk(ST_DEB_MSG, STp, "Incorrect device.\n"); |
| 3344 | retval = (-EIO); | 3357 | retval = (-EIO); |
| 3345 | goto out; | 3358 | goto out; |
| 3346 | } ) /* end DEB */ | 3359 | } ) /* end DEB */ |
| @@ -3378,8 +3391,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) | |||
| 3378 | } | 3391 | } |
| 3379 | 3392 | ||
| 3380 | if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { | 3393 | if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { |
| 3381 | printk(KERN_WARNING | 3394 | st_printk(KERN_WARNING, STp, |
| 3382 | "%s: MTSETDRVBUFFER only allowed for root.\n", name); | 3395 | "MTSETDRVBUFFER only allowed for root.\n"); |
| 3383 | retval = (-EPERM); | 3396 | retval = (-EPERM); |
| 3384 | goto out; | 3397 | goto out; |
| 3385 | } | 3398 | } |
| @@ -4087,7 +4100,8 @@ static int st_probe(struct device *dev) | |||
| 4087 | return -ENODEV; | 4100 | return -ENODEV; |
| 4088 | if ((stp = st_incompatible(SDp))) { | 4101 | if ((stp = st_incompatible(SDp))) { |
| 4089 | sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n"); | 4102 | sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n"); |
| 4090 | printk(KERN_INFO "st: The suggested driver is %s.\n", stp); | 4103 | sdev_printk(KERN_INFO, SDp, |
| 4104 | "st: The suggested driver is %s.\n", stp); | ||
| 4091 | return -ENODEV; | 4105 | return -ENODEV; |
| 4092 | } | 4106 | } |
| 4093 | 4107 | ||
| @@ -4096,20 +4110,23 @@ static int st_probe(struct device *dev) | |||
| 4096 | i = st_max_sg_segs; | 4110 | i = st_max_sg_segs; |
| 4097 | buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); | 4111 | buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); |
| 4098 | if (buffer == NULL) { | 4112 | if (buffer == NULL) { |
| 4099 | printk(KERN_ERR | 4113 | sdev_printk(KERN_ERR, SDp, |
| 4100 | "st: Can't allocate new tape buffer. Device not attached.\n"); | 4114 | "st: Can't allocate new tape buffer. " |
| 4115 | "Device not attached.\n"); | ||
| 4101 | goto out; | 4116 | goto out; |
| 4102 | } | 4117 | } |
| 4103 | 4118 | ||
| 4104 | disk = alloc_disk(1); | 4119 | disk = alloc_disk(1); |
| 4105 | if (!disk) { | 4120 | if (!disk) { |
| 4106 | printk(KERN_ERR "st: out of memory. Device not attached.\n"); | 4121 | sdev_printk(KERN_ERR, SDp, |
| 4122 | "st: out of memory. Device not attached.\n"); | ||
| 4107 | goto out_buffer_free; | 4123 | goto out_buffer_free; |
| 4108 | } | 4124 | } |
| 4109 | 4125 | ||
| 4110 | tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC); | 4126 | tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC); |
| 4111 | if (tpnt == NULL) { | 4127 | if (tpnt == NULL) { |
| 4112 | printk(KERN_ERR "st: Can't allocate device descriptor.\n"); | 4128 | sdev_printk(KERN_ERR, SDp, |
| 4129 | "st: Can't allocate device descriptor.\n"); | ||
| 4113 | goto out_put_disk; | 4130 | goto out_put_disk; |
| 4114 | } | 4131 | } |
| 4115 | kref_init(&tpnt->kref); | 4132 | kref_init(&tpnt->kref); |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 9969fa1ef7c4..fecac5d03fdd 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/device.h> | 33 | #include <linux/device.h> |
| 34 | #include <linux/hyperv.h> | 34 | #include <linux/hyperv.h> |
| 35 | #include <linux/mempool.h> | 35 | #include <linux/mempool.h> |
| 36 | #include <linux/blkdev.h> | ||
| 36 | #include <scsi/scsi.h> | 37 | #include <scsi/scsi.h> |
| 37 | #include <scsi/scsi_cmnd.h> | 38 | #include <scsi/scsi_cmnd.h> |
| 38 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
| @@ -326,21 +327,23 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); | |||
| 326 | */ | 327 | */ |
| 327 | static int storvsc_timeout = 180; | 328 | static int storvsc_timeout = 180; |
| 328 | 329 | ||
| 330 | static int msft_blist_flags = BLIST_TRY_VPD_PAGES; | ||
| 331 | |||
| 329 | #define STORVSC_MAX_IO_REQUESTS 200 | 332 | #define STORVSC_MAX_IO_REQUESTS 200 |
| 330 | 333 | ||
| 331 | static void storvsc_on_channel_callback(void *context); | 334 | static void storvsc_on_channel_callback(void *context); |
| 332 | 335 | ||
| 333 | /* | 336 | #define STORVSC_MAX_LUNS_PER_TARGET 255 |
| 334 | * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In | 337 | #define STORVSC_MAX_TARGETS 2 |
| 335 | * reality, the path/target is not used (ie always set to 0) so our | 338 | #define STORVSC_MAX_CHANNELS 8 |
| 336 | * scsi host adapter essentially has 1 bus with 1 target that contains | ||
| 337 | * up to 256 luns. | ||
| 338 | */ | ||
| 339 | #define STORVSC_MAX_LUNS_PER_TARGET 64 | ||
| 340 | #define STORVSC_MAX_TARGETS 1 | ||
| 341 | #define STORVSC_MAX_CHANNELS 1 | ||
| 342 | 339 | ||
| 340 | #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 | ||
| 341 | #define STORVSC_FC_MAX_TARGETS 128 | ||
| 342 | #define STORVSC_FC_MAX_CHANNELS 8 | ||
| 343 | 343 | ||
| 344 | #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 | ||
| 345 | #define STORVSC_IDE_MAX_TARGETS 1 | ||
| 346 | #define STORVSC_IDE_MAX_CHANNELS 1 | ||
| 344 | 347 | ||
| 345 | struct storvsc_cmd_request { | 348 | struct storvsc_cmd_request { |
| 346 | struct list_head entry; | 349 | struct list_head entry; |
| @@ -1017,6 +1020,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, | |||
| 1017 | case ATA_12: | 1020 | case ATA_12: |
| 1018 | set_host_byte(scmnd, DID_PASSTHROUGH); | 1021 | set_host_byte(scmnd, DID_PASSTHROUGH); |
| 1019 | break; | 1022 | break; |
| 1023 | /* | ||
| 1024 | * On Some Windows hosts TEST_UNIT_READY command can return | ||
| 1025 | * SRB_STATUS_ERROR, let the upper level code deal with it | ||
| 1026 | * based on the sense information. | ||
| 1027 | */ | ||
| 1028 | case TEST_UNIT_READY: | ||
| 1029 | break; | ||
| 1020 | default: | 1030 | default: |
| 1021 | set_host_byte(scmnd, DID_TARGET_FAILURE); | 1031 | set_host_byte(scmnd, DID_TARGET_FAILURE); |
| 1022 | } | 1032 | } |
| @@ -1441,6 +1451,14 @@ static int storvsc_device_configure(struct scsi_device *sdevice) | |||
| 1441 | 1451 | ||
| 1442 | sdevice->no_write_same = 1; | 1452 | sdevice->no_write_same = 1; |
| 1443 | 1453 | ||
| 1454 | /* | ||
| 1455 | * Add blist flags to permit the reading of the VPD pages even when | ||
| 1456 | * the target may claim SPC-2 compliance. MSFT targets currently | ||
| 1457 | * claim SPC-2 compliance while they implement post SPC-2 features. | ||
| 1458 | * With this patch we can correctly handle WRITE_SAME_16 issues. | ||
| 1459 | */ | ||
| 1460 | sdevice->sdev_bflags |= msft_blist_flags; | ||
| 1461 | |||
| 1444 | return 0; | 1462 | return 0; |
| 1445 | } | 1463 | } |
| 1446 | 1464 | ||
| @@ -1518,6 +1536,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) | |||
| 1518 | return SUCCESS; | 1536 | return SUCCESS; |
| 1519 | } | 1537 | } |
| 1520 | 1538 | ||
| 1539 | /* | ||
| 1540 | * The host guarantees to respond to each command, although I/O latencies might | ||
| 1541 | * be unbounded on Azure. Reset the timer unconditionally to give the host a | ||
| 1542 | * chance to perform EH. | ||
| 1543 | */ | ||
| 1544 | static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) | ||
| 1545 | { | ||
| 1546 | return BLK_EH_RESET_TIMER; | ||
| 1547 | } | ||
| 1548 | |||
| 1521 | static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) | 1549 | static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) |
| 1522 | { | 1550 | { |
| 1523 | bool allowed = true; | 1551 | bool allowed = true; |
| @@ -1553,9 +1581,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
| 1553 | struct vmscsi_request *vm_srb; | 1581 | struct vmscsi_request *vm_srb; |
| 1554 | struct stor_mem_pools *memp = scmnd->device->hostdata; | 1582 | struct stor_mem_pools *memp = scmnd->device->hostdata; |
| 1555 | 1583 | ||
| 1556 | if (!storvsc_scsi_cmd_ok(scmnd)) { | 1584 | if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { |
| 1557 | scmnd->scsi_done(scmnd); | 1585 | /* |
| 1558 | return 0; | 1586 | * On legacy hosts filter unimplemented commands. |
| 1587 | * Future hosts are expected to correctly handle | ||
| 1588 | * unsupported commands. Furthermore, it is | ||
| 1589 | * possible that some of the currently | ||
| 1590 | * unsupported commands maybe supported in | ||
| 1591 | * future versions of the host. | ||
| 1592 | */ | ||
| 1593 | if (!storvsc_scsi_cmd_ok(scmnd)) { | ||
| 1594 | scmnd->scsi_done(scmnd); | ||
| 1595 | return 0; | ||
| 1596 | } | ||
| 1559 | } | 1597 | } |
| 1560 | 1598 | ||
| 1561 | request_size = sizeof(struct storvsc_cmd_request); | 1599 | request_size = sizeof(struct storvsc_cmd_request); |
| @@ -1580,26 +1618,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
| 1580 | vm_srb = &cmd_request->vstor_packet.vm_srb; | 1618 | vm_srb = &cmd_request->vstor_packet.vm_srb; |
| 1581 | vm_srb->win8_extension.time_out_value = 60; | 1619 | vm_srb->win8_extension.time_out_value = 60; |
| 1582 | 1620 | ||
| 1621 | vm_srb->win8_extension.srb_flags |= | ||
| 1622 | (SRB_FLAGS_QUEUE_ACTION_ENABLE | | ||
| 1623 | SRB_FLAGS_DISABLE_SYNCH_TRANSFER); | ||
| 1583 | 1624 | ||
| 1584 | /* Build the SRB */ | 1625 | /* Build the SRB */ |
| 1585 | switch (scmnd->sc_data_direction) { | 1626 | switch (scmnd->sc_data_direction) { |
| 1586 | case DMA_TO_DEVICE: | 1627 | case DMA_TO_DEVICE: |
| 1587 | vm_srb->data_in = WRITE_TYPE; | 1628 | vm_srb->data_in = WRITE_TYPE; |
| 1588 | vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; | 1629 | vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; |
| 1589 | vm_srb->win8_extension.srb_flags |= | ||
| 1590 | (SRB_FLAGS_QUEUE_ACTION_ENABLE | | ||
| 1591 | SRB_FLAGS_DISABLE_SYNCH_TRANSFER); | ||
| 1592 | break; | 1630 | break; |
| 1593 | case DMA_FROM_DEVICE: | 1631 | case DMA_FROM_DEVICE: |
| 1594 | vm_srb->data_in = READ_TYPE; | 1632 | vm_srb->data_in = READ_TYPE; |
| 1595 | vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; | 1633 | vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; |
| 1596 | vm_srb->win8_extension.srb_flags |= | ||
| 1597 | (SRB_FLAGS_QUEUE_ACTION_ENABLE | | ||
| 1598 | SRB_FLAGS_DISABLE_SYNCH_TRANSFER); | ||
| 1599 | break; | 1634 | break; |
| 1600 | default: | 1635 | default: |
| 1601 | vm_srb->data_in = UNKNOWN_TYPE; | 1636 | vm_srb->data_in = UNKNOWN_TYPE; |
| 1602 | vm_srb->win8_extension.srb_flags = 0; | 1637 | vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | |
| 1638 | SRB_FLAGS_DATA_OUT); | ||
| 1603 | break; | 1639 | break; |
| 1604 | } | 1640 | } |
| 1605 | 1641 | ||
| @@ -1687,11 +1723,11 @@ static struct scsi_host_template scsi_driver = { | |||
| 1687 | .bios_param = storvsc_get_chs, | 1723 | .bios_param = storvsc_get_chs, |
| 1688 | .queuecommand = storvsc_queuecommand, | 1724 | .queuecommand = storvsc_queuecommand, |
| 1689 | .eh_host_reset_handler = storvsc_host_reset_handler, | 1725 | .eh_host_reset_handler = storvsc_host_reset_handler, |
| 1726 | .eh_timed_out = storvsc_eh_timed_out, | ||
| 1690 | .slave_alloc = storvsc_device_alloc, | 1727 | .slave_alloc = storvsc_device_alloc, |
| 1691 | .slave_destroy = storvsc_device_destroy, | 1728 | .slave_destroy = storvsc_device_destroy, |
| 1692 | .slave_configure = storvsc_device_configure, | 1729 | .slave_configure = storvsc_device_configure, |
| 1693 | .cmd_per_lun = 1, | 1730 | .cmd_per_lun = 255, |
| 1694 | /* 64 max_queue * 1 target */ | ||
| 1695 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, | 1731 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, |
| 1696 | .this_id = -1, | 1732 | .this_id = -1, |
| 1697 | /* no use setting to 0 since ll_blk_rw reset it to 1 */ | 1733 | /* no use setting to 0 since ll_blk_rw reset it to 1 */ |
| @@ -1743,19 +1779,25 @@ static int storvsc_probe(struct hv_device *device, | |||
| 1743 | * set state to properly communicate with the host. | 1779 | * set state to properly communicate with the host. |
| 1744 | */ | 1780 | */ |
| 1745 | 1781 | ||
| 1746 | if (vmbus_proto_version == VERSION_WIN8) { | 1782 | switch (vmbus_proto_version) { |
| 1747 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; | 1783 | case VERSION_WS2008: |
| 1748 | vmscsi_size_delta = 0; | 1784 | case VERSION_WIN7: |
| 1749 | vmstor_current_major = VMSTOR_WIN8_MAJOR; | ||
| 1750 | vmstor_current_minor = VMSTOR_WIN8_MINOR; | ||
| 1751 | } else { | ||
| 1752 | sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; | 1785 | sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; |
| 1753 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); | 1786 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); |
| 1754 | vmstor_current_major = VMSTOR_WIN7_MAJOR; | 1787 | vmstor_current_major = VMSTOR_WIN7_MAJOR; |
| 1755 | vmstor_current_minor = VMSTOR_WIN7_MINOR; | 1788 | vmstor_current_minor = VMSTOR_WIN7_MINOR; |
| 1789 | break; | ||
| 1790 | default: | ||
| 1791 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; | ||
| 1792 | vmscsi_size_delta = 0; | ||
| 1793 | vmstor_current_major = VMSTOR_WIN8_MAJOR; | ||
| 1794 | vmstor_current_minor = VMSTOR_WIN8_MINOR; | ||
| 1795 | break; | ||
| 1756 | } | 1796 | } |
| 1757 | 1797 | ||
| 1758 | 1798 | if (dev_id->driver_data == SFC_GUID) | |
| 1799 | scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS * | ||
| 1800 | STORVSC_FC_MAX_TARGETS); | ||
| 1759 | host = scsi_host_alloc(&scsi_driver, | 1801 | host = scsi_host_alloc(&scsi_driver, |
| 1760 | sizeof(struct hv_host_device)); | 1802 | sizeof(struct hv_host_device)); |
| 1761 | if (!host) | 1803 | if (!host) |
| @@ -1789,12 +1831,25 @@ static int storvsc_probe(struct hv_device *device, | |||
| 1789 | host_dev->path = stor_device->path_id; | 1831 | host_dev->path = stor_device->path_id; |
| 1790 | host_dev->target = stor_device->target_id; | 1832 | host_dev->target = stor_device->target_id; |
| 1791 | 1833 | ||
| 1792 | /* max # of devices per target */ | 1834 | switch (dev_id->driver_data) { |
| 1793 | host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; | 1835 | case SFC_GUID: |
| 1794 | /* max # of targets per channel */ | 1836 | host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; |
| 1795 | host->max_id = STORVSC_MAX_TARGETS; | 1837 | host->max_id = STORVSC_FC_MAX_TARGETS; |
| 1796 | /* max # of channels */ | 1838 | host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; |
| 1797 | host->max_channel = STORVSC_MAX_CHANNELS - 1; | 1839 | break; |
| 1840 | |||
| 1841 | case SCSI_GUID: | ||
| 1842 | host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; | ||
| 1843 | host->max_id = STORVSC_MAX_TARGETS; | ||
| 1844 | host->max_channel = STORVSC_MAX_CHANNELS - 1; | ||
| 1845 | break; | ||
| 1846 | |||
| 1847 | default: | ||
| 1848 | host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; | ||
| 1849 | host->max_id = STORVSC_IDE_MAX_TARGETS; | ||
| 1850 | host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; | ||
| 1851 | break; | ||
| 1852 | } | ||
| 1798 | /* max cmd length */ | 1853 | /* max cmd length */ |
| 1799 | host->max_cmd_len = STORVSC_MAX_CMD_LEN; | 1854 | host->max_cmd_len = STORVSC_MAX_CMD_LEN; |
| 1800 | 1855 | ||
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 88220794cc98..1a2367a1b1f2 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c | |||
| @@ -355,17 +355,18 @@ static void __init init_tags( void ) | |||
| 355 | 355 | ||
| 356 | static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) | 356 | static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) |
| 357 | { | 357 | { |
| 358 | u8 lun = cmd->device->lun; | ||
| 358 | SETUP_HOSTDATA(cmd->device->host); | 359 | SETUP_HOSTDATA(cmd->device->host); |
| 359 | 360 | ||
| 360 | if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) | 361 | if (hostdata->busy[cmd->device->id] & (1 << lun)) |
| 361 | return( 1 ); | 362 | return( 1 ); |
| 362 | if (!should_be_tagged || | 363 | if (!should_be_tagged || |
| 363 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) | 364 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) |
| 364 | return( 0 ); | 365 | return( 0 ); |
| 365 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= | 366 | if (TagAlloc[cmd->device->id][lun].nr_allocated >= |
| 366 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { | 367 | TagAlloc[cmd->device->id][lun].queue_size ) { |
| 367 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", | 368 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", |
| 368 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 369 | H_NO(cmd), cmd->device->id, lun ); |
| 369 | return( 1 ); | 370 | return( 1 ); |
| 370 | } | 371 | } |
| 371 | return( 0 ); | 372 | return( 0 ); |
| @@ -379,6 +380,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 379 | 380 | ||
| 380 | static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | 381 | static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) |
| 381 | { | 382 | { |
| 383 | u8 lun = cmd->device->lun; | ||
| 382 | SETUP_HOSTDATA(cmd->device->host); | 384 | SETUP_HOSTDATA(cmd->device->host); |
| 383 | 385 | ||
| 384 | /* If we or the target don't support tagged queuing, allocate the LUN for | 386 | /* If we or the target don't support tagged queuing, allocate the LUN for |
| @@ -387,19 +389,19 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 387 | if (!should_be_tagged || | 389 | if (!should_be_tagged || |
| 388 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { | 390 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { |
| 389 | cmd->tag = TAG_NONE; | 391 | cmd->tag = TAG_NONE; |
| 390 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 392 | hostdata->busy[cmd->device->id] |= (1 << lun); |
| 391 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " | 393 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " |
| 392 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); | 394 | "command\n", H_NO(cmd), cmd->device->id, lun ); |
| 393 | } | 395 | } |
| 394 | else { | 396 | else { |
| 395 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 397 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; |
| 396 | 398 | ||
| 397 | cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); | 399 | cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); |
| 398 | set_bit( cmd->tag, &ta->allocated ); | 400 | set_bit( cmd->tag, &ta->allocated ); |
| 399 | ta->nr_allocated++; | 401 | ta->nr_allocated++; |
| 400 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " | 402 | dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " |
| 401 | "(now %d tags in use)\n", | 403 | "(now %d tags in use)\n", |
| 402 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, | 404 | H_NO(cmd), cmd->tag, cmd->device->id, lun, |
| 403 | ta->nr_allocated ); | 405 | ta->nr_allocated ); |
| 404 | } | 406 | } |
| 405 | } | 407 | } |
| @@ -411,23 +413,24 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) | |||
| 411 | 413 | ||
| 412 | static void cmd_free_tag(struct scsi_cmnd *cmd) | 414 | static void cmd_free_tag(struct scsi_cmnd *cmd) |
| 413 | { | 415 | { |
| 416 | u8 lun = cmd->device->lun; | ||
| 414 | SETUP_HOSTDATA(cmd->device->host); | 417 | SETUP_HOSTDATA(cmd->device->host); |
| 415 | 418 | ||
| 416 | if (cmd->tag == TAG_NONE) { | 419 | if (cmd->tag == TAG_NONE) { |
| 417 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 420 | hostdata->busy[cmd->device->id] &= ~(1 << lun); |
| 418 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", | 421 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", |
| 419 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 422 | H_NO(cmd), cmd->device->id, lun ); |
| 420 | } | 423 | } |
| 421 | else if (cmd->tag >= MAX_TAGS) { | 424 | else if (cmd->tag >= MAX_TAGS) { |
| 422 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", | 425 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", |
| 423 | H_NO(cmd), cmd->tag ); | 426 | H_NO(cmd), cmd->tag ); |
| 424 | } | 427 | } |
| 425 | else { | 428 | else { |
| 426 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 429 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; |
| 427 | clear_bit( cmd->tag, &ta->allocated ); | 430 | clear_bit( cmd->tag, &ta->allocated ); |
| 428 | ta->nr_allocated--; | 431 | ta->nr_allocated--; |
| 429 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", | 432 | dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", |
| 430 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); | 433 | H_NO(cmd), cmd->tag, cmd->device->id, lun ); |
| 431 | } | 434 | } |
| 432 | } | 435 | } |
| 433 | 436 | ||
| @@ -659,7 +662,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) | |||
| 659 | { | 662 | { |
| 660 | int i, s; | 663 | int i, s; |
| 661 | unsigned char *command; | 664 | unsigned char *command; |
| 662 | printk("scsi%d: destination target %d, lun %d\n", | 665 | printk("scsi%d: destination target %d, lun %llu\n", |
| 663 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 666 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 664 | printk(KERN_CONT " command = "); | 667 | printk(KERN_CONT " command = "); |
| 665 | command = cmd->cmnd; | 668 | command = cmd->cmnd; |
| @@ -705,7 +708,7 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) | |||
| 705 | { | 708 | { |
| 706 | int i, s; | 709 | int i, s; |
| 707 | unsigned char *command; | 710 | unsigned char *command; |
| 708 | seq_printf(m, "scsi%d: destination target %d, lun %d\n", | 711 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", |
| 709 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 712 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
| 710 | seq_printf(m, " command = "); | 713 | seq_printf(m, " command = "); |
| 711 | command = cmd->cmnd; | 714 | command = cmd->cmnd; |
| @@ -1007,7 +1010,7 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1007 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { | 1010 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { |
| 1008 | 1011 | ||
| 1009 | if (prev != tmp) | 1012 | if (prev != tmp) |
| 1010 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); | 1013 | dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); |
| 1011 | /* When we find one, remove it from the issue queue. */ | 1014 | /* When we find one, remove it from the issue queue. */ |
| 1012 | /* ++guenther: possible race with Falcon locking */ | 1015 | /* ++guenther: possible race with Falcon locking */ |
| 1013 | if ( | 1016 | if ( |
| @@ -1038,7 +1041,7 @@ static void NCR5380_main (struct work_struct *bl) | |||
| 1038 | * issue queue so we can keep trying. | 1041 | * issue queue so we can keep trying. |
| 1039 | */ | 1042 | */ |
| 1040 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " | 1043 | dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " |
| 1041 | "lun %d removed from issue_queue\n", | 1044 | "lun %llu removed from issue_queue\n", |
| 1042 | HOSTNO, tmp->device->id, tmp->device->lun); | 1045 | HOSTNO, tmp->device->id, tmp->device->lun); |
| 1043 | /* | 1046 | /* |
| 1044 | * REQUEST SENSE commands are issued without tagged | 1047 | * REQUEST SENSE commands are issued without tagged |
| @@ -2020,7 +2023,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2020 | * accesses to this device will use the | 2023 | * accesses to this device will use the |
| 2021 | * polled-IO. */ | 2024 | * polled-IO. */ |
| 2022 | printk(KERN_NOTICE "scsi%d: switching target %d " | 2025 | printk(KERN_NOTICE "scsi%d: switching target %d " |
| 2023 | "lun %d to slow handshake\n", HOSTNO, | 2026 | "lun %llu to slow handshake\n", HOSTNO, |
| 2024 | cmd->device->id, cmd->device->lun); | 2027 | cmd->device->id, cmd->device->lun); |
| 2025 | cmd->device->borken = 1; | 2028 | cmd->device->borken = 1; |
| 2026 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 2029 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
| @@ -2078,7 +2081,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2078 | /* Accept message by clearing ACK */ | 2081 | /* Accept message by clearing ACK */ |
| 2079 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2082 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2080 | 2083 | ||
| 2081 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " | 2084 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " |
| 2082 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2085 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2083 | 2086 | ||
| 2084 | /* Enable reselect interrupts */ | 2087 | /* Enable reselect interrupts */ |
| @@ -2090,7 +2093,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2090 | */ | 2093 | */ |
| 2091 | 2094 | ||
| 2092 | if (!cmd->next_link) { | 2095 | if (!cmd->next_link) { |
| 2093 | printk(KERN_NOTICE "scsi%d: target %d lun %d " | 2096 | printk(KERN_NOTICE "scsi%d: target %d lun %llu " |
| 2094 | "linked command complete, no next_link\n", | 2097 | "linked command complete, no next_link\n", |
| 2095 | HOSTNO, cmd->device->id, cmd->device->lun); | 2098 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2096 | sink = 1; | 2099 | sink = 1; |
| @@ -2103,7 +2106,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2103 | * and don't free it! */ | 2106 | * and don't free it! */ |
| 2104 | cmd->next_link->tag = cmd->tag; | 2107 | cmd->next_link->tag = cmd->tag; |
| 2105 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2108 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
| 2106 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " | 2109 | dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " |
| 2107 | "done, calling scsi_done().\n", | 2110 | "done, calling scsi_done().\n", |
| 2108 | HOSTNO, cmd->device->id, cmd->device->lun); | 2111 | HOSTNO, cmd->device->id, cmd->device->lun); |
| 2109 | #ifdef NCR5380_STATS | 2112 | #ifdef NCR5380_STATS |
| @@ -2118,7 +2121,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2118 | /* Accept message by clearing ACK */ | 2121 | /* Accept message by clearing ACK */ |
| 2119 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2122 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
| 2120 | hostdata->connected = NULL; | 2123 | hostdata->connected = NULL; |
| 2121 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " | 2124 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " |
| 2122 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2125 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); |
| 2123 | #ifdef SUPPORT_TAGS | 2126 | #ifdef SUPPORT_TAGS |
| 2124 | cmd_free_tag( cmd ); | 2127 | cmd_free_tag( cmd ); |
| @@ -2132,7 +2135,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2132 | /* ++Andreas: the mid level code knows about | 2135 | /* ++Andreas: the mid level code knows about |
| 2133 | QUEUE_FULL now. */ | 2136 | QUEUE_FULL now. */ |
| 2134 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 2137 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
| 2135 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " | 2138 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " |
| 2136 | "QUEUE_FULL after %d commands\n", | 2139 | "QUEUE_FULL after %d commands\n", |
| 2137 | HOSTNO, cmd->device->id, cmd->device->lun, | 2140 | HOSTNO, cmd->device->id, cmd->device->lun, |
| 2138 | ta->nr_allocated); | 2141 | ta->nr_allocated); |
| @@ -2228,7 +2231,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2228 | cmd->device->tagged_supported = 0; | 2231 | cmd->device->tagged_supported = 0; |
| 2229 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2232 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
| 2230 | cmd->tag = TAG_NONE; | 2233 | cmd->tag = TAG_NONE; |
| 2231 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " | 2234 | dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " |
| 2232 | "QUEUE_TAG message; tagged queuing " | 2235 | "QUEUE_TAG message; tagged queuing " |
| 2233 | "disabled\n", | 2236 | "disabled\n", |
| 2234 | HOSTNO, cmd->device->id, cmd->device->lun); | 2237 | HOSTNO, cmd->device->id, cmd->device->lun); |
| @@ -2245,7 +2248,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2245 | hostdata->connected = NULL; | 2248 | hostdata->connected = NULL; |
| 2246 | hostdata->disconnected_queue = cmd; | 2249 | hostdata->disconnected_queue = cmd; |
| 2247 | local_irq_restore(flags); | 2250 | local_irq_restore(flags); |
| 2248 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " | 2251 | dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " |
| 2249 | "moved from connected to the " | 2252 | "moved from connected to the " |
| 2250 | "disconnected_queue\n", HOSTNO, | 2253 | "disconnected_queue\n", HOSTNO, |
| 2251 | cmd->device->id, cmd->device->lun); | 2254 | cmd->device->id, cmd->device->lun); |
| @@ -2349,12 +2352,12 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) | |||
| 2349 | printk("\n"); | 2352 | printk("\n"); |
| 2350 | } else if (tmp != EXTENDED_MESSAGE) | 2353 | } else if (tmp != EXTENDED_MESSAGE) |
| 2351 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2354 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
| 2352 | "message %02x from target %d, lun %d\n", | 2355 | "message %02x from target %d, lun %llu\n", |
| 2353 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); | 2356 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); |
| 2354 | else | 2357 | else |
| 2355 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2358 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
| 2356 | "extended message " | 2359 | "extended message " |
| 2357 | "code %02x, length %d from target %d, lun %d\n", | 2360 | "code %02x, length %d from target %d, lun %llu\n", |
| 2358 | HOSTNO, extended_msg[1], extended_msg[0], | 2361 | HOSTNO, extended_msg[1], extended_msg[0], |
| 2359 | cmd->device->id, cmd->device->lun); | 2362 | cmd->device->id, cmd->device->lun); |
| 2360 | 2363 | ||
| @@ -2576,7 +2579,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) | |||
| 2576 | #endif | 2579 | #endif |
| 2577 | 2580 | ||
| 2578 | hostdata->connected = tmp; | 2581 | hostdata->connected = tmp; |
| 2579 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", | 2582 | dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", |
| 2580 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); | 2583 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); |
| 2581 | } | 2584 | } |
| 2582 | 2585 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 6d3ee1ab6362..e59e6f96b725 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
| @@ -851,7 +851,7 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev) | |||
| 851 | * so let's try to stop all on-going I/O. | 851 | * so let's try to stop all on-going I/O. |
| 852 | */ | 852 | */ |
| 853 | starget_printk(KERN_WARNING, tp->starget, | 853 | starget_printk(KERN_WARNING, tp->starget, |
| 854 | "Removing busy LCB (%d)\n", sdev->lun); | 854 | "Removing busy LCB (%d)\n", (u8)sdev->lun); |
| 855 | sym_reset_scsi_bus(np, 1); | 855 | sym_reset_scsi_bus(np, 1); |
| 856 | } | 856 | } |
| 857 | 857 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 5a80cbac3f92..a141b1758033 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h | |||
| @@ -581,7 +581,7 @@ struct sym_pmc { | |||
| 581 | #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL | 581 | #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL |
| 582 | #else | 582 | #else |
| 583 | #define sym_lp(tp, lun) \ | 583 | #define sym_lp(tp, lun) \ |
| 584 | (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL | 584 | (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL |
| 585 | #endif | 585 | #endif |
| 586 | 586 | ||
| 587 | /* | 587 | /* |
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index b006cf789ba1..764575726c85 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c | |||
| @@ -621,7 +621,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr | |||
| 621 | { | 621 | { |
| 622 | dc390_freetag (pDCB, pSRB); | 622 | dc390_freetag (pDCB, pSRB); |
| 623 | DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n", | 623 | DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n", |
| 624 | scmd->device->id, scmd->device->lun)); | 624 | scmd->device->id, (u8)scmd->device->lun)); |
| 625 | pSRB->SRBState = SRB_READY; | 625 | pSRB->SRBState = SRB_READY; |
| 626 | //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); | 626 | //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); |
| 627 | pACB->SelLost++; | 627 | pACB->SelLost++; |
| @@ -1726,7 +1726,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* | |||
| 1726 | } else { | 1726 | } else { |
| 1727 | SET_RES_DRV(pcmd->result, DRIVER_SENSE); | 1727 | SET_RES_DRV(pcmd->result, DRIVER_SENSE); |
| 1728 | //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); | 1728 | //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); |
| 1729 | DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); | 1729 | DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); |
| 1730 | pSRB->TotalXferredLen = 0; | 1730 | pSRB->TotalXferredLen = 0; |
| 1731 | SET_RES_DID(pcmd->result, DID_SOFT_ERROR); | 1731 | SET_RES_DID(pcmd->result, DID_SOFT_ERROR); |
| 1732 | } | 1732 | } |
| @@ -1746,7 +1746,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* | |||
| 1746 | else if (status == SAM_STAT_TASK_SET_FULL) | 1746 | else if (status == SAM_STAT_TASK_SET_FULL) |
| 1747 | { | 1747 | { |
| 1748 | scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); | 1748 | scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); |
| 1749 | DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); | 1749 | DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); |
| 1750 | pSRB->TotalXferredLen = 0; | 1750 | pSRB->TotalXferredLen = 0; |
| 1751 | SET_RES_DID(pcmd->result, DID_SOFT_ERROR); | 1751 | SET_RES_DID(pcmd->result, DID_SOFT_ERROR); |
| 1752 | } | 1752 | } |
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 5a03bb3bcfef..d8dcf36aed11 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c | |||
| @@ -1006,7 +1006,7 @@ static int port_detect \ | |||
| 1006 | sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); | 1006 | sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); |
| 1007 | 1007 | ||
| 1008 | if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) | 1008 | if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) |
| 1009 | printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n", | 1009 | printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n", |
| 1010 | BN(j), sh[j]->max_id, sh[j]->max_lun); | 1010 | BN(j), sh[j]->max_id, sh[j]->max_lun); |
| 1011 | 1011 | ||
| 1012 | for (i = 0; i <= sh[j]->max_channel; i++) | 1012 | for (i = 0; i <= sh[j]->max_channel; i++) |
| @@ -1285,14 +1285,14 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct | |||
| 1285 | cpp->cpp_index = i; | 1285 | cpp->cpp_index = i; |
| 1286 | SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; | 1286 | SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; |
| 1287 | 1287 | ||
| 1288 | if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n", | 1288 | if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n", |
| 1289 | BN(j), i, SCpnt->device->channel, SCpnt->device->id, | 1289 | BN(j), i, SCpnt->device->channel, SCpnt->device->id, |
| 1290 | SCpnt->device->lun); | 1290 | (u8)SCpnt->device->lun); |
| 1291 | 1291 | ||
| 1292 | cpp->opcode = OP_SCSI; | 1292 | cpp->opcode = OP_SCSI; |
| 1293 | cpp->channel = SCpnt->device->channel; | 1293 | cpp->channel = SCpnt->device->channel; |
| 1294 | cpp->target = SCpnt->device->id; | 1294 | cpp->target = SCpnt->device->id; |
| 1295 | cpp->lun = SCpnt->device->lun; | 1295 | cpp->lun = (u8)SCpnt->device->lun; |
| 1296 | cpp->SCpnt = SCpnt; | 1296 | cpp->SCpnt = SCpnt; |
| 1297 | cpp->cdb_len = SCpnt->cmd_len; | 1297 | cpp->cdb_len = SCpnt->cmd_len; |
| 1298 | memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len); | 1298 | memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len); |
| @@ -1663,10 +1663,10 @@ static int reorder(unsigned int j, unsigned long cursec, | |||
| 1663 | if (link_statistics && (overlap || !(flushcount % link_statistics))) | 1663 | if (link_statistics && (overlap || !(flushcount % link_statistics))) |
| 1664 | for (n = 0; n < n_ready; n++) { | 1664 | for (n = 0; n < n_ready; n++) { |
| 1665 | k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; | 1665 | k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; |
| 1666 | printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\ | 1666 | printk("%s %d.%d:%llu mb %d fc %d nr %d sec %ld ns %u"\ |
| 1667 | " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", | 1667 | " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", |
| 1668 | (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, | 1668 | (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, |
| 1669 | SCpnt->lun, k, flushcount, n_ready, | 1669 | (u8)SCpnt->lun, k, flushcount, n_ready, |
| 1670 | blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), | 1670 | blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), |
| 1671 | cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), | 1671 | cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), |
| 1672 | YESNO(overlap), cpp->xdir); | 1672 | YESNO(overlap), cpp->xdir); |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index f42d1cee652a..fafcf5e354c6 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
| @@ -41,7 +41,8 @@ | |||
| 41 | 41 | ||
| 42 | #define MAX_CDB_SIZE 16 | 42 | #define MAX_CDB_SIZE 16 |
| 43 | #define GENERAL_UPIU_REQUEST_SIZE 32 | 43 | #define GENERAL_UPIU_REQUEST_SIZE 32 |
| 44 | #define QUERY_DESC_MAX_SIZE 256 | 44 | #define QUERY_DESC_MAX_SIZE 255 |
| 45 | #define QUERY_DESC_MIN_SIZE 2 | ||
| 45 | #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ | 46 | #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ |
| 46 | (sizeof(struct utp_upiu_header))) | 47 | (sizeof(struct utp_upiu_header))) |
| 47 | 48 | ||
| @@ -117,6 +118,41 @@ enum attr_idn { | |||
| 117 | QUERY_ATTR_IDN_EE_STATUS = 0x0E, | 118 | QUERY_ATTR_IDN_EE_STATUS = 0x0E, |
| 118 | }; | 119 | }; |
| 119 | 120 | ||
| 121 | /* Descriptor idn for Query requests */ | ||
| 122 | enum desc_idn { | ||
| 123 | QUERY_DESC_IDN_DEVICE = 0x0, | ||
| 124 | QUERY_DESC_IDN_CONFIGURAION = 0x1, | ||
| 125 | QUERY_DESC_IDN_UNIT = 0x2, | ||
| 126 | QUERY_DESC_IDN_RFU_0 = 0x3, | ||
| 127 | QUERY_DESC_IDN_INTERCONNECT = 0x4, | ||
| 128 | QUERY_DESC_IDN_STRING = 0x5, | ||
| 129 | QUERY_DESC_IDN_RFU_1 = 0x6, | ||
| 130 | QUERY_DESC_IDN_GEOMETRY = 0x7, | ||
| 131 | QUERY_DESC_IDN_POWER = 0x8, | ||
| 132 | QUERY_DESC_IDN_RFU_2 = 0x9, | ||
| 133 | }; | ||
| 134 | |||
| 135 | #define UNIT_DESC_MAX_SIZE 0x22 | ||
| 136 | /* Unit descriptor parameters offsets in bytes*/ | ||
| 137 | enum unit_desc_param { | ||
| 138 | UNIT_DESC_PARAM_LEN = 0x0, | ||
| 139 | UNIT_DESC_PARAM_TYPE = 0x1, | ||
| 140 | UNIT_DESC_PARAM_UNIT_INDEX = 0x2, | ||
| 141 | UNIT_DESC_PARAM_LU_ENABLE = 0x3, | ||
| 142 | UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4, | ||
| 143 | UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5, | ||
| 144 | UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6, | ||
| 145 | UNIT_DESC_PARAM_MEM_TYPE = 0x8, | ||
| 146 | UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9, | ||
| 147 | UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA, | ||
| 148 | UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB, | ||
| 149 | UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13, | ||
| 150 | UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17, | ||
| 151 | UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, | ||
| 152 | UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, | ||
| 153 | UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, | ||
| 154 | }; | ||
| 155 | |||
| 120 | /* Exception event mask values */ | 156 | /* Exception event mask values */ |
| 121 | enum { | 157 | enum { |
| 122 | MASK_EE_STATUS = 0xFFFF, | 158 | MASK_EE_STATUS = 0xFFFF, |
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 8b9531204c2b..afaabe2aeac8 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c | |||
| @@ -135,26 +135,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) | |||
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | /** | 137 | /** |
| 138 | * ufshcd_set_dma_mask - Set dma mask based on the controller | ||
| 139 | * addressing capability | ||
| 140 | * @pdev: PCI device structure | ||
| 141 | * | ||
| 142 | * Returns 0 for success, non-zero for failure | ||
| 143 | */ | ||
| 144 | static int ufshcd_set_dma_mask(struct pci_dev *pdev) | ||
| 145 | { | ||
| 146 | int err; | ||
| 147 | |||
| 148 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) | ||
| 149 | && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
| 150 | return 0; | ||
| 151 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 152 | if (!err) | ||
| 153 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
| 154 | return err; | ||
| 155 | } | ||
| 156 | |||
| 157 | /** | ||
| 158 | * ufshcd_pci_probe - probe routine of the driver | 138 | * ufshcd_pci_probe - probe routine of the driver |
| 159 | * @pdev: pointer to PCI device handle | 139 | * @pdev: pointer to PCI device handle |
| 160 | * @id: PCI device id | 140 | * @id: PCI device id |
| @@ -184,12 +164,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 184 | 164 | ||
| 185 | mmio_base = pcim_iomap_table(pdev)[0]; | 165 | mmio_base = pcim_iomap_table(pdev)[0]; |
| 186 | 166 | ||
| 187 | err = ufshcd_set_dma_mask(pdev); | ||
| 188 | if (err) { | ||
| 189 | dev_err(&pdev->dev, "set dma mask failed\n"); | ||
| 190 | return err; | ||
| 191 | } | ||
| 192 | |||
| 193 | err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); | 167 | err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); |
| 194 | if (err) { | 168 | if (err) { |
| 195 | dev_err(&pdev->dev, "Initialization failed\n"); | 169 | dev_err(&pdev->dev, "Initialization failed\n"); |
| @@ -211,7 +185,7 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { | |||
| 211 | .runtime_idle = ufshcd_pci_runtime_idle, | 185 | .runtime_idle = ufshcd_pci_runtime_idle, |
| 212 | }; | 186 | }; |
| 213 | 187 | ||
| 214 | static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { | 188 | static const struct pci_device_id ufshcd_pci_tbl[] = { |
| 215 | { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 189 | { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
| 216 | { } /* terminate list */ | 190 | { } /* terminate list */ |
| 217 | }; | 191 | }; |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 0c2877251251..ba27215b8034 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -110,6 +110,8 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba); | |||
| 110 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | 110 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); |
| 111 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); | 111 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
| 112 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | 112 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); |
| 113 | static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, | ||
| 114 | struct scsi_device *sdev); | ||
| 113 | 115 | ||
| 114 | /* | 116 | /* |
| 115 | * ufshcd_wait_for_register - wait for register value to change | 117 | * ufshcd_wait_for_register - wait for register value to change |
| @@ -446,30 +448,35 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | |||
| 446 | * @lrb - pointer to local reference block | 448 | * @lrb - pointer to local reference block |
| 447 | */ | 449 | */ |
| 448 | static | 450 | static |
| 449 | void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | 451 | int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) |
| 450 | { | 452 | { |
| 451 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | 453 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; |
| 452 | 454 | ||
| 453 | /* Get the UPIU response */ | ||
| 454 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | ||
| 455 | UPIU_RSP_CODE_OFFSET; | ||
| 456 | |||
| 457 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); | 455 | memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); |
| 458 | 456 | ||
| 459 | |||
| 460 | /* Get the descriptor */ | 457 | /* Get the descriptor */ |
| 461 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | 458 | if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { |
| 462 | u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr + | 459 | u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + |
| 463 | GENERAL_UPIU_REQUEST_SIZE; | 460 | GENERAL_UPIU_REQUEST_SIZE; |
| 464 | u16 len; | 461 | u16 resp_len; |
| 462 | u16 buf_len; | ||
| 465 | 463 | ||
| 466 | /* data segment length */ | 464 | /* data segment length */ |
| 467 | len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & | 465 | resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & |
| 468 | MASK_QUERY_DATA_SEG_LEN; | 466 | MASK_QUERY_DATA_SEG_LEN; |
| 469 | 467 | buf_len = be16_to_cpu( | |
| 470 | memcpy(hba->dev_cmd.query.descriptor, descp, | 468 | hba->dev_cmd.query.request.upiu_req.length); |
| 471 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); | 469 | if (likely(buf_len >= resp_len)) { |
| 470 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); | ||
| 471 | } else { | ||
| 472 | dev_warn(hba->dev, | ||
| 473 | "%s: Response size is bigger than buffer", | ||
| 474 | __func__); | ||
| 475 | return -EINVAL; | ||
| 476 | } | ||
| 472 | } | 477 | } |
| 478 | |||
| 479 | return 0; | ||
| 473 | } | 480 | } |
| 474 | 481 | ||
| 475 | /** | 482 | /** |
| @@ -797,11 +804,9 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |||
| 797 | QUERY_OSF_SIZE); | 804 | QUERY_OSF_SIZE); |
| 798 | 805 | ||
| 799 | /* Copy the Descriptor */ | 806 | /* Copy the Descriptor */ |
| 800 | if ((len > 0) && (query->request.upiu_req.opcode == | 807 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) |
| 801 | UPIU_QUERY_OPCODE_WRITE_DESC)) { | 808 | memcpy(descp, query->descriptor, len); |
| 802 | memcpy(descp, query->descriptor, | 809 | |
| 803 | min_t(u16, len, QUERY_DESC_MAX_SIZE)); | ||
| 804 | } | ||
| 805 | } | 810 | } |
| 806 | 811 | ||
| 807 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) | 812 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) |
| @@ -980,6 +985,17 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) | |||
| 980 | return err; | 985 | return err; |
| 981 | } | 986 | } |
| 982 | 987 | ||
| 988 | static int | ||
| 989 | ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | ||
| 990 | { | ||
| 991 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | ||
| 992 | |||
| 993 | /* Get the UPIU response */ | ||
| 994 | query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> | ||
| 995 | UPIU_RSP_CODE_OFFSET; | ||
| 996 | return query_res->response; | ||
| 997 | } | ||
| 998 | |||
| 983 | /** | 999 | /** |
| 984 | * ufshcd_dev_cmd_completion() - handles device management command responses | 1000 | * ufshcd_dev_cmd_completion() - handles device management command responses |
| 985 | * @hba: per adapter instance | 1001 | * @hba: per adapter instance |
| @@ -1002,7 +1018,9 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
| 1002 | } | 1018 | } |
| 1003 | break; | 1019 | break; |
| 1004 | case UPIU_TRANSACTION_QUERY_RSP: | 1020 | case UPIU_TRANSACTION_QUERY_RSP: |
| 1005 | ufshcd_copy_query_response(hba, lrbp); | 1021 | err = ufshcd_check_query_response(hba, lrbp); |
| 1022 | if (!err) | ||
| 1023 | err = ufshcd_copy_query_response(hba, lrbp); | ||
| 1006 | break; | 1024 | break; |
| 1007 | case UPIU_TRANSACTION_REJECT_UPIU: | 1025 | case UPIU_TRANSACTION_REJECT_UPIU: |
| 1008 | /* TODO: handle Reject UPIU Response */ | 1026 | /* TODO: handle Reject UPIU Response */ |
| @@ -1134,6 +1152,30 @@ out_put_tag: | |||
| 1134 | } | 1152 | } |
| 1135 | 1153 | ||
| 1136 | /** | 1154 | /** |
| 1155 | * ufshcd_init_query() - init the query response and request parameters | ||
| 1156 | * @hba: per-adapter instance | ||
| 1157 | * @request: address of the request pointer to be initialized | ||
| 1158 | * @response: address of the response pointer to be initialized | ||
| 1159 | * @opcode: operation to perform | ||
| 1160 | * @idn: flag idn to access | ||
| 1161 | * @index: LU number to access | ||
| 1162 | * @selector: query/flag/descriptor further identification | ||
| 1163 | */ | ||
| 1164 | static inline void ufshcd_init_query(struct ufs_hba *hba, | ||
| 1165 | struct ufs_query_req **request, struct ufs_query_res **response, | ||
| 1166 | enum query_opcode opcode, u8 idn, u8 index, u8 selector) | ||
| 1167 | { | ||
| 1168 | *request = &hba->dev_cmd.query.request; | ||
| 1169 | *response = &hba->dev_cmd.query.response; | ||
| 1170 | memset(*request, 0, sizeof(struct ufs_query_req)); | ||
| 1171 | memset(*response, 0, sizeof(struct ufs_query_res)); | ||
| 1172 | (*request)->upiu_req.opcode = opcode; | ||
| 1173 | (*request)->upiu_req.idn = idn; | ||
| 1174 | (*request)->upiu_req.index = index; | ||
| 1175 | (*request)->upiu_req.selector = selector; | ||
| 1176 | } | ||
| 1177 | |||
| 1178 | /** | ||
| 1137 | * ufshcd_query_flag() - API function for sending flag query requests | 1179 | * ufshcd_query_flag() - API function for sending flag query requests |
| 1138 | * hba: per-adapter instance | 1180 | * hba: per-adapter instance |
| 1139 | * query_opcode: flag query to perform | 1181 | * query_opcode: flag query to perform |
| @@ -1145,17 +1187,15 @@ out_put_tag: | |||
| 1145 | static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | 1187 | static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, |
| 1146 | enum flag_idn idn, bool *flag_res) | 1188 | enum flag_idn idn, bool *flag_res) |
| 1147 | { | 1189 | { |
| 1148 | struct ufs_query_req *request; | 1190 | struct ufs_query_req *request = NULL; |
| 1149 | struct ufs_query_res *response; | 1191 | struct ufs_query_res *response = NULL; |
| 1150 | int err; | 1192 | int err, index = 0, selector = 0; |
| 1151 | 1193 | ||
| 1152 | BUG_ON(!hba); | 1194 | BUG_ON(!hba); |
| 1153 | 1195 | ||
| 1154 | mutex_lock(&hba->dev_cmd.lock); | 1196 | mutex_lock(&hba->dev_cmd.lock); |
| 1155 | request = &hba->dev_cmd.query.request; | 1197 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
| 1156 | response = &hba->dev_cmd.query.response; | 1198 | selector); |
| 1157 | memset(request, 0, sizeof(struct ufs_query_req)); | ||
| 1158 | memset(response, 0, sizeof(struct ufs_query_res)); | ||
| 1159 | 1199 | ||
| 1160 | switch (opcode) { | 1200 | switch (opcode) { |
| 1161 | case UPIU_QUERY_OPCODE_SET_FLAG: | 1201 | case UPIU_QUERY_OPCODE_SET_FLAG: |
| @@ -1180,12 +1220,8 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1180 | err = -EINVAL; | 1220 | err = -EINVAL; |
| 1181 | goto out_unlock; | 1221 | goto out_unlock; |
| 1182 | } | 1222 | } |
| 1183 | request->upiu_req.opcode = opcode; | ||
| 1184 | request->upiu_req.idn = idn; | ||
| 1185 | 1223 | ||
| 1186 | /* Send query request */ | 1224 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
| 1187 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, | ||
| 1188 | QUERY_REQ_TIMEOUT); | ||
| 1189 | 1225 | ||
| 1190 | if (err) { | 1226 | if (err) { |
| 1191 | dev_err(hba->dev, | 1227 | dev_err(hba->dev, |
| @@ -1217,8 +1253,8 @@ out_unlock: | |||
| 1217 | static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | 1253 | static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, |
| 1218 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) | 1254 | enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) |
| 1219 | { | 1255 | { |
| 1220 | struct ufs_query_req *request; | 1256 | struct ufs_query_req *request = NULL; |
| 1221 | struct ufs_query_res *response; | 1257 | struct ufs_query_res *response = NULL; |
| 1222 | int err; | 1258 | int err; |
| 1223 | 1259 | ||
| 1224 | BUG_ON(!hba); | 1260 | BUG_ON(!hba); |
| @@ -1231,10 +1267,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1231 | } | 1267 | } |
| 1232 | 1268 | ||
| 1233 | mutex_lock(&hba->dev_cmd.lock); | 1269 | mutex_lock(&hba->dev_cmd.lock); |
| 1234 | request = &hba->dev_cmd.query.request; | 1270 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
| 1235 | response = &hba->dev_cmd.query.response; | 1271 | selector); |
| 1236 | memset(request, 0, sizeof(struct ufs_query_req)); | ||
| 1237 | memset(response, 0, sizeof(struct ufs_query_res)); | ||
| 1238 | 1272 | ||
| 1239 | switch (opcode) { | 1273 | switch (opcode) { |
| 1240 | case UPIU_QUERY_OPCODE_WRITE_ATTR: | 1274 | case UPIU_QUERY_OPCODE_WRITE_ATTR: |
| @@ -1251,14 +1285,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
| 1251 | goto out_unlock; | 1285 | goto out_unlock; |
| 1252 | } | 1286 | } |
| 1253 | 1287 | ||
| 1254 | request->upiu_req.opcode = opcode; | 1288 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); |
| 1255 | request->upiu_req.idn = idn; | ||
| 1256 | request->upiu_req.index = index; | ||
| 1257 | request->upiu_req.selector = selector; | ||
| 1258 | |||
| 1259 | /* Send query request */ | ||
| 1260 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, | ||
| 1261 | QUERY_REQ_TIMEOUT); | ||
| 1262 | 1289 | ||
| 1263 | if (err) { | 1290 | if (err) { |
| 1264 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | 1291 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", |
| @@ -1275,6 +1302,82 @@ out: | |||
| 1275 | } | 1302 | } |
| 1276 | 1303 | ||
| 1277 | /** | 1304 | /** |
| 1305 | * ufshcd_query_descriptor - API function for sending descriptor requests | ||
| 1306 | * hba: per-adapter instance | ||
| 1307 | * opcode: attribute opcode | ||
| 1308 | * idn: attribute idn to access | ||
| 1309 | * index: index field | ||
| 1310 | * selector: selector field | ||
| 1311 | * desc_buf: the buffer that contains the descriptor | ||
| 1312 | * buf_len: length parameter passed to the device | ||
| 1313 | * | ||
| 1314 | * Returns 0 for success, non-zero in case of failure. | ||
| 1315 | * The buf_len parameter will contain, on return, the length parameter | ||
| 1316 | * received on the response. | ||
| 1317 | */ | ||
| 1318 | static int ufshcd_query_descriptor(struct ufs_hba *hba, | ||
| 1319 | enum query_opcode opcode, enum desc_idn idn, u8 index, | ||
| 1320 | u8 selector, u8 *desc_buf, int *buf_len) | ||
| 1321 | { | ||
| 1322 | struct ufs_query_req *request = NULL; | ||
| 1323 | struct ufs_query_res *response = NULL; | ||
| 1324 | int err; | ||
| 1325 | |||
| 1326 | BUG_ON(!hba); | ||
| 1327 | |||
| 1328 | if (!desc_buf) { | ||
| 1329 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | ||
| 1330 | __func__, opcode); | ||
| 1331 | err = -EINVAL; | ||
| 1332 | goto out; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { | ||
| 1336 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", | ||
| 1337 | __func__, *buf_len); | ||
| 1338 | err = -EINVAL; | ||
| 1339 | goto out; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | mutex_lock(&hba->dev_cmd.lock); | ||
| 1343 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | ||
| 1344 | selector); | ||
| 1345 | hba->dev_cmd.query.descriptor = desc_buf; | ||
| 1346 | request->upiu_req.length = cpu_to_be16(*buf_len); | ||
| 1347 | |||
| 1348 | switch (opcode) { | ||
| 1349 | case UPIU_QUERY_OPCODE_WRITE_DESC: | ||
| 1350 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | ||
| 1351 | break; | ||
| 1352 | case UPIU_QUERY_OPCODE_READ_DESC: | ||
| 1353 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | ||
| 1354 | break; | ||
| 1355 | default: | ||
| 1356 | dev_err(hba->dev, | ||
| 1357 | "%s: Expected query descriptor opcode but got = 0x%.2x\n", | ||
| 1358 | __func__, opcode); | ||
| 1359 | err = -EINVAL; | ||
| 1360 | goto out_unlock; | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); | ||
| 1364 | |||
| 1365 | if (err) { | ||
| 1366 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", | ||
| 1367 | __func__, opcode, idn, err); | ||
| 1368 | goto out_unlock; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | hba->dev_cmd.query.descriptor = NULL; | ||
| 1372 | *buf_len = be16_to_cpu(response->upiu_res.length); | ||
| 1373 | |||
| 1374 | out_unlock: | ||
| 1375 | mutex_unlock(&hba->dev_cmd.lock); | ||
| 1376 | out: | ||
| 1377 | return err; | ||
| 1378 | } | ||
| 1379 | |||
| 1380 | /** | ||
| 1278 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | 1381 | * ufshcd_memory_alloc - allocate memory for host memory space data structures |
| 1279 | * @hba: per adapter instance | 1382 | * @hba: per adapter instance |
| 1280 | * | 1383 | * |
| @@ -1878,6 +1981,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
| 1878 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | 1981 | static int ufshcd_slave_alloc(struct scsi_device *sdev) |
| 1879 | { | 1982 | { |
| 1880 | struct ufs_hba *hba; | 1983 | struct ufs_hba *hba; |
| 1984 | int lun_qdepth; | ||
| 1881 | 1985 | ||
| 1882 | hba = shost_priv(sdev->host); | 1986 | hba = shost_priv(sdev->host); |
| 1883 | sdev->tagged_supported = 1; | 1987 | sdev->tagged_supported = 1; |
| @@ -1889,14 +1993,68 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
| 1889 | /* allow SCSI layer to restart the device in case of errors */ | 1993 | /* allow SCSI layer to restart the device in case of errors */ |
| 1890 | sdev->allow_restart = 1; | 1994 | sdev->allow_restart = 1; |
| 1891 | 1995 | ||
| 1892 | /* | 1996 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
| 1893 | * Inform SCSI Midlayer that the LUN queue depth is same as the | 1997 | sdev->no_report_opcodes = 1; |
| 1894 | * controller queue depth. If a LUN queue depth is less than the | 1998 | |
| 1895 | * controller queue depth and if the LUN reports | 1999 | lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev); |
| 1896 | * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted | 2000 | if (lun_qdepth <= 0) |
| 1897 | * with scsi_adjust_queue_depth. | 2001 | /* eventually, we can figure out the real queue depth */ |
| 1898 | */ | 2002 | lun_qdepth = hba->nutrs; |
| 1899 | scsi_activate_tcq(sdev, hba->nutrs); | 2003 | else |
| 2004 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | ||
| 2005 | |||
| 2006 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | ||
| 2007 | __func__, lun_qdepth); | ||
| 2008 | scsi_activate_tcq(sdev, lun_qdepth); | ||
| 2009 | |||
| 2010 | return 0; | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | /** | ||
| 2014 | * ufshcd_change_queue_depth - change queue depth | ||
| 2015 | * @sdev: pointer to SCSI device | ||
| 2016 | * @depth: required depth to set | ||
| 2017 | * @reason: reason for changing the depth | ||
| 2018 | * | ||
| 2019 | * Change queue depth according to the reason and make sure | ||
| 2020 | * the max. limits are not crossed. | ||
| 2021 | */ | ||
| 2022 | static int ufshcd_change_queue_depth(struct scsi_device *sdev, | ||
| 2023 | int depth, int reason) | ||
| 2024 | { | ||
| 2025 | struct ufs_hba *hba = shost_priv(sdev->host); | ||
| 2026 | |||
| 2027 | if (depth > hba->nutrs) | ||
| 2028 | depth = hba->nutrs; | ||
| 2029 | |||
| 2030 | switch (reason) { | ||
| 2031 | case SCSI_QDEPTH_DEFAULT: | ||
| 2032 | case SCSI_QDEPTH_RAMP_UP: | ||
| 2033 | if (!sdev->tagged_supported) | ||
| 2034 | depth = 1; | ||
| 2035 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); | ||
| 2036 | break; | ||
| 2037 | case SCSI_QDEPTH_QFULL: | ||
| 2038 | scsi_track_queue_full(sdev, depth); | ||
| 2039 | break; | ||
| 2040 | default: | ||
| 2041 | return -EOPNOTSUPP; | ||
| 2042 | } | ||
| 2043 | |||
| 2044 | return depth; | ||
| 2045 | } | ||
| 2046 | |||
| 2047 | /** | ||
| 2048 | * ufshcd_slave_configure - adjust SCSI device configurations | ||
| 2049 | * @sdev: pointer to SCSI device | ||
| 2050 | */ | ||
| 2051 | static int ufshcd_slave_configure(struct scsi_device *sdev) | ||
| 2052 | { | ||
| 2053 | struct request_queue *q = sdev->request_queue; | ||
| 2054 | |||
| 2055 | blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); | ||
| 2056 | blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX); | ||
| 2057 | |||
| 1900 | return 0; | 2058 | return 0; |
| 1901 | } | 2059 | } |
| 1902 | 2060 | ||
| @@ -1953,42 +2111,6 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) | |||
| 1953 | } | 2111 | } |
| 1954 | 2112 | ||
| 1955 | /** | 2113 | /** |
| 1956 | * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with | ||
| 1957 | * SAM_STAT_TASK_SET_FULL SCSI command status. | ||
| 1958 | * @cmd: pointer to SCSI command | ||
| 1959 | */ | ||
| 1960 | static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd) | ||
| 1961 | { | ||
| 1962 | struct ufs_hba *hba; | ||
| 1963 | int i; | ||
| 1964 | int lun_qdepth = 0; | ||
| 1965 | |||
| 1966 | hba = shost_priv(cmd->device->host); | ||
| 1967 | |||
| 1968 | /* | ||
| 1969 | * LUN queue depth can be obtained by counting outstanding commands | ||
| 1970 | * on the LUN. | ||
| 1971 | */ | ||
| 1972 | for (i = 0; i < hba->nutrs; i++) { | ||
| 1973 | if (test_bit(i, &hba->outstanding_reqs)) { | ||
| 1974 | |||
| 1975 | /* | ||
| 1976 | * Check if the outstanding command belongs | ||
| 1977 | * to the LUN which reported SAM_STAT_TASK_SET_FULL. | ||
| 1978 | */ | ||
| 1979 | if (cmd->device->lun == hba->lrb[i].lun) | ||
| 1980 | lun_qdepth++; | ||
| 1981 | } | ||
| 1982 | } | ||
| 1983 | |||
| 1984 | /* | ||
| 1985 | * LUN queue depth will be total outstanding commands, except the | ||
| 1986 | * command for which the LUN reported SAM_STAT_TASK_SET_FULL. | ||
| 1987 | */ | ||
| 1988 | scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1); | ||
| 1989 | } | ||
| 1990 | |||
| 1991 | /** | ||
| 1992 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status | 2114 | * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status |
| 1993 | * @lrb: pointer to local reference block of completed command | 2115 | * @lrb: pointer to local reference block of completed command |
| 1994 | * @scsi_status: SCSI command status | 2116 | * @scsi_status: SCSI command status |
| @@ -2009,12 +2131,6 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |||
| 2009 | scsi_status; | 2131 | scsi_status; |
| 2010 | break; | 2132 | break; |
| 2011 | case SAM_STAT_TASK_SET_FULL: | 2133 | case SAM_STAT_TASK_SET_FULL: |
| 2012 | /* | ||
| 2013 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue | ||
| 2014 | * depth needs to be adjusted to the exact number of | ||
| 2015 | * outstanding commands the LUN can handle at any given time. | ||
| 2016 | */ | ||
| 2017 | ufshcd_adjust_lun_qdepth(lrbp->cmd); | ||
| 2018 | case SAM_STAT_BUSY: | 2134 | case SAM_STAT_BUSY: |
| 2019 | case SAM_STAT_TASK_ABORTED: | 2135 | case SAM_STAT_TASK_ABORTED: |
| 2020 | ufshcd_copy_sense_data(lrbp); | 2136 | ufshcd_copy_sense_data(lrbp); |
| @@ -2134,47 +2250,42 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |||
| 2134 | u32 tr_doorbell; | 2250 | u32 tr_doorbell; |
| 2135 | int result; | 2251 | int result; |
| 2136 | int index; | 2252 | int index; |
| 2137 | bool int_aggr_reset = false; | 2253 | |
| 2254 | /* Resetting interrupt aggregation counters first and reading the | ||
| 2255 | * DOOR_BELL afterward allows us to handle all the completed requests. | ||
| 2256 | * In order to prevent other interrupts starvation the DB is read once | ||
| 2257 | * after reset. The down side of this solution is the possibility of | ||
| 2258 | * false interrupt if device completes another request after resetting | ||
| 2259 | * aggregation and before reading the DB. | ||
| 2260 | */ | ||
| 2261 | ufshcd_reset_intr_aggr(hba); | ||
| 2138 | 2262 | ||
| 2139 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | 2263 | tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
| 2140 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; | 2264 | completed_reqs = tr_doorbell ^ hba->outstanding_reqs; |
| 2141 | 2265 | ||
| 2142 | for (index = 0; index < hba->nutrs; index++) { | 2266 | for_each_set_bit(index, &completed_reqs, hba->nutrs) { |
| 2143 | if (test_bit(index, &completed_reqs)) { | 2267 | lrbp = &hba->lrb[index]; |
| 2144 | lrbp = &hba->lrb[index]; | 2268 | cmd = lrbp->cmd; |
| 2145 | cmd = lrbp->cmd; | 2269 | if (cmd) { |
| 2146 | /* | 2270 | result = ufshcd_transfer_rsp_status(hba, lrbp); |
| 2147 | * Don't skip resetting interrupt aggregation counters | 2271 | scsi_dma_unmap(cmd); |
| 2148 | * if a regular command is present. | 2272 | cmd->result = result; |
| 2149 | */ | 2273 | /* Mark completed command as NULL in LRB */ |
| 2150 | int_aggr_reset |= !lrbp->intr_cmd; | 2274 | lrbp->cmd = NULL; |
| 2151 | 2275 | clear_bit_unlock(index, &hba->lrb_in_use); | |
| 2152 | if (cmd) { | 2276 | /* Do not touch lrbp after scsi done */ |
| 2153 | result = ufshcd_transfer_rsp_status(hba, lrbp); | 2277 | cmd->scsi_done(cmd); |
| 2154 | scsi_dma_unmap(cmd); | 2278 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { |
| 2155 | cmd->result = result; | 2279 | if (hba->dev_cmd.complete) |
| 2156 | /* Mark completed command as NULL in LRB */ | 2280 | complete(hba->dev_cmd.complete); |
| 2157 | lrbp->cmd = NULL; | 2281 | } |
| 2158 | clear_bit_unlock(index, &hba->lrb_in_use); | 2282 | } |
| 2159 | /* Do not touch lrbp after scsi done */ | ||
| 2160 | cmd->scsi_done(cmd); | ||
| 2161 | } else if (lrbp->command_type == | ||
| 2162 | UTP_CMD_TYPE_DEV_MANAGE) { | ||
| 2163 | if (hba->dev_cmd.complete) | ||
| 2164 | complete(hba->dev_cmd.complete); | ||
| 2165 | } | ||
| 2166 | } /* end of if */ | ||
| 2167 | } /* end of for */ | ||
| 2168 | 2283 | ||
| 2169 | /* clear corresponding bits of completed commands */ | 2284 | /* clear corresponding bits of completed commands */ |
| 2170 | hba->outstanding_reqs ^= completed_reqs; | 2285 | hba->outstanding_reqs ^= completed_reqs; |
| 2171 | 2286 | ||
| 2172 | /* we might have free'd some tags above */ | 2287 | /* we might have free'd some tags above */ |
| 2173 | wake_up(&hba->dev_cmd.tag_wq); | 2288 | wake_up(&hba->dev_cmd.tag_wq); |
| 2174 | |||
| 2175 | /* Reset interrupt aggregation counters */ | ||
| 2176 | if (int_aggr_reset) | ||
| 2177 | ufshcd_reset_intr_aggr(hba); | ||
| 2178 | } | 2289 | } |
| 2179 | 2290 | ||
| 2180 | /** | 2291 | /** |
| @@ -2779,6 +2890,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
| 2779 | int poll_cnt; | 2890 | int poll_cnt; |
| 2780 | u8 resp = 0xF; | 2891 | u8 resp = 0xF; |
| 2781 | struct ufshcd_lrb *lrbp; | 2892 | struct ufshcd_lrb *lrbp; |
| 2893 | u32 reg; | ||
| 2782 | 2894 | ||
| 2783 | host = cmd->device->host; | 2895 | host = cmd->device->host; |
| 2784 | hba = shost_priv(host); | 2896 | hba = shost_priv(host); |
| @@ -2788,6 +2900,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
| 2788 | if (!(test_bit(tag, &hba->outstanding_reqs))) | 2900 | if (!(test_bit(tag, &hba->outstanding_reqs))) |
| 2789 | goto out; | 2901 | goto out; |
| 2790 | 2902 | ||
| 2903 | reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); | ||
| 2904 | if (!(reg & (1 << tag))) { | ||
| 2905 | dev_err(hba->dev, | ||
| 2906 | "%s: cmd was completed, but without a notifying intr, tag = %d", | ||
| 2907 | __func__, tag); | ||
| 2908 | } | ||
| 2909 | |||
| 2791 | lrbp = &hba->lrb[tag]; | 2910 | lrbp = &hba->lrb[tag]; |
| 2792 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { | 2911 | for (poll_cnt = 100; poll_cnt; poll_cnt--) { |
| 2793 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, | 2912 | err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, |
| @@ -2796,8 +2915,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
| 2796 | /* cmd pending in the device */ | 2915 | /* cmd pending in the device */ |
| 2797 | break; | 2916 | break; |
| 2798 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { | 2917 | } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { |
| 2799 | u32 reg; | ||
| 2800 | |||
| 2801 | /* | 2918 | /* |
| 2802 | * cmd not pending in the device, check if it is | 2919 | * cmd not pending in the device, check if it is |
| 2803 | * in transition. | 2920 | * in transition. |
| @@ -2971,6 +3088,38 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |||
| 2971 | } | 3088 | } |
| 2972 | 3089 | ||
| 2973 | /** | 3090 | /** |
| 3091 | * ufshcd_read_sdev_qdepth - read the lun command queue depth | ||
| 3092 | * @hba: Pointer to adapter instance | ||
| 3093 | * @sdev: pointer to SCSI device | ||
| 3094 | * | ||
| 3095 | * Return in case of success the lun's queue depth else error. | ||
| 3096 | */ | ||
| 3097 | static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, | ||
| 3098 | struct scsi_device *sdev) | ||
| 3099 | { | ||
| 3100 | int ret; | ||
| 3101 | int buff_len = UNIT_DESC_MAX_SIZE; | ||
| 3102 | u8 desc_buf[UNIT_DESC_MAX_SIZE]; | ||
| 3103 | |||
| 3104 | ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, | ||
| 3105 | QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len); | ||
| 3106 | |||
| 3107 | if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) { | ||
| 3108 | dev_err(hba->dev, | ||
| 3109 | "%s:Failed reading unit descriptor. len = %d ret = %d" | ||
| 3110 | , __func__, buff_len, ret); | ||
| 3111 | if (!ret) | ||
| 3112 | ret = -EINVAL; | ||
| 3113 | |||
| 3114 | goto out; | ||
| 3115 | } | ||
| 3116 | |||
| 3117 | ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF; | ||
| 3118 | out: | ||
| 3119 | return ret; | ||
| 3120 | } | ||
| 3121 | |||
| 3122 | /** | ||
| 2974 | * ufshcd_async_scan - asynchronous execution for link startup | 3123 | * ufshcd_async_scan - asynchronous execution for link startup |
| 2975 | * @data: data pointer to pass to this function | 3124 | * @data: data pointer to pass to this function |
| 2976 | * @cookie: cookie data | 3125 | * @cookie: cookie data |
| @@ -3012,7 +3161,9 @@ static struct scsi_host_template ufshcd_driver_template = { | |||
| 3012 | .proc_name = UFSHCD, | 3161 | .proc_name = UFSHCD, |
| 3013 | .queuecommand = ufshcd_queuecommand, | 3162 | .queuecommand = ufshcd_queuecommand, |
| 3014 | .slave_alloc = ufshcd_slave_alloc, | 3163 | .slave_alloc = ufshcd_slave_alloc, |
| 3164 | .slave_configure = ufshcd_slave_configure, | ||
| 3015 | .slave_destroy = ufshcd_slave_destroy, | 3165 | .slave_destroy = ufshcd_slave_destroy, |
| 3166 | .change_queue_depth = ufshcd_change_queue_depth, | ||
| 3016 | .eh_abort_handler = ufshcd_abort, | 3167 | .eh_abort_handler = ufshcd_abort, |
| 3017 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, | 3168 | .eh_device_reset_handler = ufshcd_eh_device_reset_handler, |
| 3018 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, | 3169 | .eh_host_reset_handler = ufshcd_eh_host_reset_handler, |
| @@ -3110,6 +3261,22 @@ void ufshcd_remove(struct ufs_hba *hba) | |||
| 3110 | EXPORT_SYMBOL_GPL(ufshcd_remove); | 3261 | EXPORT_SYMBOL_GPL(ufshcd_remove); |
| 3111 | 3262 | ||
| 3112 | /** | 3263 | /** |
| 3264 | * ufshcd_set_dma_mask - Set dma mask based on the controller | ||
| 3265 | * addressing capability | ||
| 3266 | * @hba: per adapter instance | ||
| 3267 | * | ||
| 3268 | * Returns 0 for success, non-zero for failure | ||
| 3269 | */ | ||
| 3270 | static int ufshcd_set_dma_mask(struct ufs_hba *hba) | ||
| 3271 | { | ||
| 3272 | if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { | ||
| 3273 | if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) | ||
| 3274 | return 0; | ||
| 3275 | } | ||
| 3276 | return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); | ||
| 3277 | } | ||
| 3278 | |||
| 3279 | /** | ||
| 3113 | * ufshcd_init - Driver initialization routine | 3280 | * ufshcd_init - Driver initialization routine |
| 3114 | * @dev: pointer to device handle | 3281 | * @dev: pointer to device handle |
| 3115 | * @hba_handle: driver private handle | 3282 | * @hba_handle: driver private handle |
| @@ -3160,6 +3327,12 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
| 3160 | /* Get Interrupt bit mask per version */ | 3327 | /* Get Interrupt bit mask per version */ |
| 3161 | hba->intr_mask = ufshcd_get_intr_mask(hba); | 3328 | hba->intr_mask = ufshcd_get_intr_mask(hba); |
| 3162 | 3329 | ||
| 3330 | err = ufshcd_set_dma_mask(hba); | ||
| 3331 | if (err) { | ||
| 3332 | dev_err(hba->dev, "set dma mask failed\n"); | ||
| 3333 | goto out_disable; | ||
| 3334 | } | ||
| 3335 | |||
| 3163 | /* Allocate memory for host memory space */ | 3336 | /* Allocate memory for host memory space */ |
| 3164 | err = ufshcd_memory_alloc(hba); | 3337 | err = ufshcd_memory_alloc(hba); |
| 3165 | if (err) { | 3338 | if (err) { |
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 9abc7e32b43d..e1b844bc9460 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h | |||
| @@ -296,6 +296,11 @@ enum { | |||
| 296 | MASK_OCS = 0x0F, | 296 | MASK_OCS = 0x0F, |
| 297 | }; | 297 | }; |
| 298 | 298 | ||
| 299 | /* The maximum length of the data byte count field in the PRDT is 256KB */ | ||
| 300 | #define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024) | ||
| 301 | /* The granularity of the data byte count field in the PRDT is 32-bit */ | ||
| 302 | #define PRDT_DATA_BYTE_COUNT_PAD 4 | ||
| 303 | |||
| 299 | /** | 304 | /** |
| 300 | * struct ufshcd_sg_entry - UFSHCI PRD Entry | 305 | * struct ufshcd_sg_entry - UFSHCI PRD Entry |
| 301 | * @base_addr: Lower 32bit physical address DW-0 | 306 | * @base_addr: Lower 32bit physical address DW-0 |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 308256b5e4cb..eee1bc0b506e 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | #include <scsi/scsi_host.h> | 27 | #include <scsi/scsi_host.h> |
| 28 | #include <scsi/scsi_device.h> | 28 | #include <scsi/scsi_device.h> |
| 29 | #include <scsi/scsi_cmnd.h> | 29 | #include <scsi/scsi_cmnd.h> |
| 30 | #include <scsi/scsi_tcq.h> | ||
| 31 | #include <linux/seqlock.h> | ||
| 30 | 32 | ||
| 31 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 | 33 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 |
| 32 | #define VIRTIO_SCSI_EVENT_LEN 8 | 34 | #define VIRTIO_SCSI_EVENT_LEN 8 |
| @@ -75,18 +77,16 @@ struct virtio_scsi_vq { | |||
| 75 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues | 77 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues |
| 76 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). | 78 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). |
| 77 | * | 79 | * |
| 78 | * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq | 80 | * tgt_seq is held to serialize reading and writing req_vq. |
| 79 | * could be done locklessly, but we do not do it yet. | ||
| 80 | * | 81 | * |
| 81 | * Decrements of reqs are never concurrent with writes of req_vq: before the | 82 | * Decrements of reqs are never concurrent with writes of req_vq: before the |
| 82 | * decrement reqs will be != 0; after the decrement the virtqueue completion | 83 | * decrement reqs will be != 0; after the decrement the virtqueue completion |
| 83 | * routine will not use the req_vq so it can be changed by a new request. | 84 | * routine will not use the req_vq so it can be changed by a new request. |
| 84 | * Thus they can happen outside the tgt_lock, provided of course we make reqs | 85 | * Thus they can happen outside the tgt_seq, provided of course we make reqs |
| 85 | * an atomic_t. | 86 | * an atomic_t. |
| 86 | */ | 87 | */ |
| 87 | struct virtio_scsi_target_state { | 88 | struct virtio_scsi_target_state { |
| 88 | /* This spinlock never held at the same time as vq_lock. */ | 89 | seqcount_t tgt_seq; |
| 89 | spinlock_t tgt_lock; | ||
| 90 | 90 | ||
| 91 | /* Count of outstanding requests. */ | 91 | /* Count of outstanding requests. */ |
| 92 | atomic_t reqs; | 92 | atomic_t reqs; |
| @@ -559,19 +559,33 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, | |||
| 559 | unsigned long flags; | 559 | unsigned long flags; |
| 560 | u32 queue_num; | 560 | u32 queue_num; |
| 561 | 561 | ||
| 562 | spin_lock_irqsave(&tgt->tgt_lock, flags); | 562 | local_irq_save(flags); |
| 563 | if (atomic_inc_return(&tgt->reqs) > 1) { | ||
| 564 | unsigned long seq; | ||
| 565 | |||
| 566 | do { | ||
| 567 | seq = read_seqcount_begin(&tgt->tgt_seq); | ||
| 568 | vq = tgt->req_vq; | ||
| 569 | } while (read_seqcount_retry(&tgt->tgt_seq, seq)); | ||
| 570 | } else { | ||
| 571 | /* no writes can be concurrent because of atomic_t */ | ||
| 572 | write_seqcount_begin(&tgt->tgt_seq); | ||
| 573 | |||
| 574 | /* keep previous req_vq if a reader just arrived */ | ||
| 575 | if (unlikely(atomic_read(&tgt->reqs) > 1)) { | ||
| 576 | vq = tgt->req_vq; | ||
| 577 | goto unlock; | ||
| 578 | } | ||
| 563 | 579 | ||
| 564 | if (atomic_inc_return(&tgt->reqs) > 1) | ||
| 565 | vq = tgt->req_vq; | ||
| 566 | else { | ||
| 567 | queue_num = smp_processor_id(); | 580 | queue_num = smp_processor_id(); |
| 568 | while (unlikely(queue_num >= vscsi->num_queues)) | 581 | while (unlikely(queue_num >= vscsi->num_queues)) |
| 569 | queue_num -= vscsi->num_queues; | 582 | queue_num -= vscsi->num_queues; |
| 570 | |||
| 571 | tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; | 583 | tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; |
| 584 | unlock: | ||
| 585 | write_seqcount_end(&tgt->tgt_seq); | ||
| 572 | } | 586 | } |
| 587 | local_irq_restore(flags); | ||
| 573 | 588 | ||
| 574 | spin_unlock_irqrestore(&tgt->tgt_lock, flags); | ||
| 575 | return vq; | 589 | return vq; |
| 576 | } | 590 | } |
| 577 | 591 | ||
| @@ -641,6 +655,36 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) | |||
| 641 | return virtscsi_tmf(vscsi, cmd); | 655 | return virtscsi_tmf(vscsi, cmd); |
| 642 | } | 656 | } |
| 643 | 657 | ||
| 658 | /** | ||
| 659 | * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth | ||
| 660 | * @sdev: Virtscsi target whose queue depth to change | ||
| 661 | * @qdepth: New queue depth | ||
| 662 | * @reason: Reason for the queue depth change. | ||
| 663 | */ | ||
| 664 | static int virtscsi_change_queue_depth(struct scsi_device *sdev, | ||
| 665 | int qdepth, | ||
| 666 | int reason) | ||
| 667 | { | ||
| 668 | struct Scsi_Host *shost = sdev->host; | ||
| 669 | int max_depth = shost->cmd_per_lun; | ||
| 670 | |||
| 671 | switch (reason) { | ||
| 672 | case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */ | ||
| 673 | scsi_track_queue_full(sdev, qdepth); | ||
| 674 | break; | ||
| 675 | case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */ | ||
| 676 | case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */ | ||
| 677 | scsi_adjust_queue_depth(sdev, | ||
| 678 | scsi_get_tag_type(sdev), | ||
| 679 | min(max_depth, qdepth)); | ||
| 680 | break; | ||
| 681 | default: | ||
| 682 | return -EOPNOTSUPP; | ||
| 683 | } | ||
| 684 | |||
| 685 | return sdev->queue_depth; | ||
| 686 | } | ||
| 687 | |||
| 644 | static int virtscsi_abort(struct scsi_cmnd *sc) | 688 | static int virtscsi_abort(struct scsi_cmnd *sc) |
| 645 | { | 689 | { |
| 646 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | 690 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); |
| @@ -667,14 +711,17 @@ static int virtscsi_abort(struct scsi_cmnd *sc) | |||
| 667 | 711 | ||
| 668 | static int virtscsi_target_alloc(struct scsi_target *starget) | 712 | static int virtscsi_target_alloc(struct scsi_target *starget) |
| 669 | { | 713 | { |
| 714 | struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); | ||
| 715 | struct virtio_scsi *vscsi = shost_priv(sh); | ||
| 716 | |||
| 670 | struct virtio_scsi_target_state *tgt = | 717 | struct virtio_scsi_target_state *tgt = |
| 671 | kmalloc(sizeof(*tgt), GFP_KERNEL); | 718 | kmalloc(sizeof(*tgt), GFP_KERNEL); |
| 672 | if (!tgt) | 719 | if (!tgt) |
| 673 | return -ENOMEM; | 720 | return -ENOMEM; |
| 674 | 721 | ||
| 675 | spin_lock_init(&tgt->tgt_lock); | 722 | seqcount_init(&tgt->tgt_seq); |
| 676 | atomic_set(&tgt->reqs, 0); | 723 | atomic_set(&tgt->reqs, 0); |
| 677 | tgt->req_vq = NULL; | 724 | tgt->req_vq = &vscsi->req_vqs[0]; |
| 678 | 725 | ||
| 679 | starget->hostdata = tgt; | 726 | starget->hostdata = tgt; |
| 680 | return 0; | 727 | return 0; |
| @@ -693,6 +740,7 @@ static struct scsi_host_template virtscsi_host_template_single = { | |||
| 693 | .this_id = -1, | 740 | .this_id = -1, |
| 694 | .cmd_size = sizeof(struct virtio_scsi_cmd), | 741 | .cmd_size = sizeof(struct virtio_scsi_cmd), |
| 695 | .queuecommand = virtscsi_queuecommand_single, | 742 | .queuecommand = virtscsi_queuecommand_single, |
| 743 | .change_queue_depth = virtscsi_change_queue_depth, | ||
| 696 | .eh_abort_handler = virtscsi_abort, | 744 | .eh_abort_handler = virtscsi_abort, |
| 697 | .eh_device_reset_handler = virtscsi_device_reset, | 745 | .eh_device_reset_handler = virtscsi_device_reset, |
| 698 | 746 | ||
| @@ -710,6 +758,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { | |||
| 710 | .this_id = -1, | 758 | .this_id = -1, |
| 711 | .cmd_size = sizeof(struct virtio_scsi_cmd), | 759 | .cmd_size = sizeof(struct virtio_scsi_cmd), |
| 712 | .queuecommand = virtscsi_queuecommand_multi, | 760 | .queuecommand = virtscsi_queuecommand_multi, |
| 761 | .change_queue_depth = virtscsi_change_queue_depth, | ||
| 713 | .eh_abort_handler = virtscsi_abort, | 762 | .eh_abort_handler = virtscsi_abort, |
| 714 | .eh_device_reset_handler = virtscsi_device_reset, | 763 | .eh_device_reset_handler = virtscsi_device_reset, |
| 715 | 764 | ||
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index c88e1468aad7..598f65efaaec 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c | |||
| @@ -1194,7 +1194,7 @@ static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, | |||
| 1194 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; | 1194 | struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; |
| 1195 | int ret; | 1195 | int ret; |
| 1196 | 1196 | ||
| 1197 | ret = pci_enable_msix(adapter->dev, &entry, 1); | 1197 | ret = pci_enable_msix_exact(adapter->dev, &entry, 1); |
| 1198 | if (ret) | 1198 | if (ret) |
| 1199 | return ret; | 1199 | return ret; |
| 1200 | 1200 | ||
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index 41883a87931d..c0506de4f3b6 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
| @@ -502,7 +502,8 @@ wd33c93_execute(struct Scsi_Host *instance) | |||
| 502 | cmd = (struct scsi_cmnd *) hostdata->input_Q; | 502 | cmd = (struct scsi_cmnd *) hostdata->input_Q; |
| 503 | prev = NULL; | 503 | prev = NULL; |
| 504 | while (cmd) { | 504 | while (cmd) { |
| 505 | if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) | 505 | if (!(hostdata->busy[cmd->device->id] & |
| 506 | (1 << (cmd->device->lun & 0xff)))) | ||
| 506 | break; | 507 | break; |
| 507 | prev = cmd; | 508 | prev = cmd; |
| 508 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 509 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
| @@ -593,10 +594,10 @@ wd33c93_execute(struct Scsi_Host *instance) | |||
| 593 | 594 | ||
| 594 | write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); | 595 | write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); |
| 595 | 596 | ||
| 596 | write_wd33c93(regs, WD_TARGET_LUN, cmd->device->lun); | 597 | write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun); |
| 597 | write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, | 598 | write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, |
| 598 | hostdata->sync_xfer[cmd->device->id]); | 599 | hostdata->sync_xfer[cmd->device->id]); |
| 599 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 600 | hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF)); |
| 600 | 601 | ||
| 601 | if ((hostdata->level2 == L2_NONE) || | 602 | if ((hostdata->level2 == L2_NONE) || |
| 602 | (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { | 603 | (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { |
| @@ -862,7 +863,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 862 | } | 863 | } |
| 863 | 864 | ||
| 864 | cmd->result = DID_NO_CONNECT << 16; | 865 | cmd->result = DID_NO_CONNECT << 16; |
| 865 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 866 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 866 | hostdata->state = S_UNCONNECTED; | 867 | hostdata->state = S_UNCONNECTED; |
| 867 | cmd->scsi_done(cmd); | 868 | cmd->scsi_done(cmd); |
| 868 | 869 | ||
| @@ -895,7 +896,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 895 | 896 | ||
| 896 | /* construct an IDENTIFY message with correct disconnect bit */ | 897 | /* construct an IDENTIFY message with correct disconnect bit */ |
| 897 | 898 | ||
| 898 | hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun); | 899 | hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun); |
| 899 | if (cmd->SCp.phase) | 900 | if (cmd->SCp.phase) |
| 900 | hostdata->outgoing_msg[0] |= 0x40; | 901 | hostdata->outgoing_msg[0] |= 0x40; |
| 901 | 902 | ||
| @@ -1179,7 +1180,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1179 | lun = read_wd33c93(regs, WD_TARGET_LUN); | 1180 | lun = read_wd33c93(regs, WD_TARGET_LUN); |
| 1180 | DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) | 1181 | DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) |
| 1181 | hostdata->connected = NULL; | 1182 | hostdata->connected = NULL; |
| 1182 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 1183 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 1183 | hostdata->state = S_UNCONNECTED; | 1184 | hostdata->state = S_UNCONNECTED; |
| 1184 | if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) | 1185 | if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) |
| 1185 | cmd->SCp.Status = lun; | 1186 | cmd->SCp.Status = lun; |
| @@ -1268,7 +1269,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1268 | } | 1269 | } |
| 1269 | DB(DB_INTR, printk("UNEXP_DISC")) | 1270 | DB(DB_INTR, printk("UNEXP_DISC")) |
| 1270 | hostdata->connected = NULL; | 1271 | hostdata->connected = NULL; |
| 1271 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 1272 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 1272 | hostdata->state = S_UNCONNECTED; | 1273 | hostdata->state = S_UNCONNECTED; |
| 1273 | if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) | 1274 | if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) |
| 1274 | cmd->result = | 1275 | cmd->result = |
| @@ -1300,7 +1301,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1300 | switch (hostdata->state) { | 1301 | switch (hostdata->state) { |
| 1301 | case S_PRE_CMP_DISC: | 1302 | case S_PRE_CMP_DISC: |
| 1302 | hostdata->connected = NULL; | 1303 | hostdata->connected = NULL; |
| 1303 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 1304 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 1304 | hostdata->state = S_UNCONNECTED; | 1305 | hostdata->state = S_UNCONNECTED; |
| 1305 | DB(DB_INTR, printk(":%d", cmd->SCp.Status)) | 1306 | DB(DB_INTR, printk(":%d", cmd->SCp.Status)) |
| 1306 | if (cmd->cmnd[0] == REQUEST_SENSE | 1307 | if (cmd->cmnd[0] == REQUEST_SENSE |
| @@ -1353,7 +1354,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1353 | if (hostdata->selecting) { | 1354 | if (hostdata->selecting) { |
| 1354 | cmd = (struct scsi_cmnd *) hostdata->selecting; | 1355 | cmd = (struct scsi_cmnd *) hostdata->selecting; |
| 1355 | hostdata->selecting = NULL; | 1356 | hostdata->selecting = NULL; |
| 1356 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 1357 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 1357 | cmd->host_scribble = | 1358 | cmd->host_scribble = |
| 1358 | (uchar *) hostdata->input_Q; | 1359 | (uchar *) hostdata->input_Q; |
| 1359 | hostdata->input_Q = cmd; | 1360 | hostdata->input_Q = cmd; |
| @@ -1365,7 +1366,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1365 | if (cmd) { | 1366 | if (cmd) { |
| 1366 | if (phs == 0x00) { | 1367 | if (phs == 0x00) { |
| 1367 | hostdata->busy[cmd->device->id] &= | 1368 | hostdata->busy[cmd->device->id] &= |
| 1368 | ~(1 << cmd->device->lun); | 1369 | ~(1 << (cmd->device->lun & 0xff)); |
| 1369 | cmd->host_scribble = | 1370 | cmd->host_scribble = |
| 1370 | (uchar *) hostdata->input_Q; | 1371 | (uchar *) hostdata->input_Q; |
| 1371 | hostdata->input_Q = cmd; | 1372 | hostdata->input_Q = cmd; |
| @@ -1448,7 +1449,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1448 | cmd = (struct scsi_cmnd *) hostdata->disconnected_Q; | 1449 | cmd = (struct scsi_cmnd *) hostdata->disconnected_Q; |
| 1449 | patch = NULL; | 1450 | patch = NULL; |
| 1450 | while (cmd) { | 1451 | while (cmd) { |
| 1451 | if (id == cmd->device->id && lun == cmd->device->lun) | 1452 | if (id == cmd->device->id && lun == (u8)cmd->device->lun) |
| 1452 | break; | 1453 | break; |
| 1453 | patch = cmd; | 1454 | patch = cmd; |
| 1454 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 1455 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
| @@ -1459,7 +1460,7 @@ wd33c93_intr(struct Scsi_Host *instance) | |||
| 1459 | if (!cmd) { | 1460 | if (!cmd) { |
| 1460 | printk | 1461 | printk |
| 1461 | ("---TROUBLE: target %d.%d not in disconnect queue---", | 1462 | ("---TROUBLE: target %d.%d not in disconnect queue---", |
| 1462 | id, lun); | 1463 | id, (u8)lun); |
| 1463 | spin_unlock_irqrestore(&hostdata->lock, flags); | 1464 | spin_unlock_irqrestore(&hostdata->lock, flags); |
| 1464 | return; | 1465 | return; |
| 1465 | } | 1466 | } |
| @@ -1705,7 +1706,7 @@ wd33c93_abort(struct scsi_cmnd * cmd) | |||
| 1705 | sr = read_wd33c93(regs, WD_SCSI_STATUS); | 1706 | sr = read_wd33c93(regs, WD_SCSI_STATUS); |
| 1706 | printk("asr=%02x, sr=%02x.", asr, sr); | 1707 | printk("asr=%02x, sr=%02x.", asr, sr); |
| 1707 | 1708 | ||
| 1708 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 1709 | hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); |
| 1709 | hostdata->connected = NULL; | 1710 | hostdata->connected = NULL; |
| 1710 | hostdata->state = S_UNCONNECTED; | 1711 | hostdata->state = S_UNCONNECTED; |
| 1711 | cmd->result = DID_ABORT << 16; | 1712 | cmd->result = DID_ABORT << 16; |
| @@ -2169,7 +2170,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2169 | seq_printf(m, "\nconnected: "); | 2170 | seq_printf(m, "\nconnected: "); |
| 2170 | if (hd->connected) { | 2171 | if (hd->connected) { |
| 2171 | cmd = (struct scsi_cmnd *) hd->connected; | 2172 | cmd = (struct scsi_cmnd *) hd->connected; |
| 2172 | seq_printf(m, " %d:%d(%02x)", | 2173 | seq_printf(m, " %d:%llu(%02x)", |
| 2173 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2174 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2174 | } | 2175 | } |
| 2175 | } | 2176 | } |
| @@ -2177,7 +2178,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2177 | seq_printf(m, "\ninput_Q: "); | 2178 | seq_printf(m, "\ninput_Q: "); |
| 2178 | cmd = (struct scsi_cmnd *) hd->input_Q; | 2179 | cmd = (struct scsi_cmnd *) hd->input_Q; |
| 2179 | while (cmd) { | 2180 | while (cmd) { |
| 2180 | seq_printf(m, " %d:%d(%02x)", | 2181 | seq_printf(m, " %d:%llu(%02x)", |
| 2181 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2182 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2182 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 2183 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
| 2183 | } | 2184 | } |
| @@ -2186,7 +2187,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
| 2186 | seq_printf(m, "\ndisconnected_Q:"); | 2187 | seq_printf(m, "\ndisconnected_Q:"); |
| 2187 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; | 2188 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; |
| 2188 | while (cmd) { | 2189 | while (cmd) { |
| 2189 | seq_printf(m, " %d:%d(%02x)", | 2190 | seq_printf(m, " %d:%llu(%02x)", |
| 2190 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2191 | cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
| 2191 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 2192 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
| 2192 | } | 2193 | } |
