aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/53c700.c7
-rw-r--r--drivers/scsi/BusLogic.c12
-rw-r--r--drivers/scsi/Kconfig59
-rw-r--r--drivers/scsi/Makefile7
-rw-r--r--drivers/scsi/NCR5380.c11
-rw-r--r--drivers/scsi/NCR5380.h4
-rw-r--r--drivers/scsi/NCR53c406a.c5
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commsup.c23
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1740.c10
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c121
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/ibmvscsi/Makefile2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c960
-rw-r--r--drivers/scsi/imm.c12
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c322
-rw-r--r--drivers/scsi/ipr.h83
-rw-r--r--drivers/scsi/ips.c28
-rw-r--r--drivers/scsi/ips.h9
-rw-r--r--drivers/scsi/libiscsi.c7
-rw-r--r--drivers/scsi/libsas/sas_discover.c22
-rw-r--r--drivers/scsi/libsas/sas_event.c14
-rw-r--r--drivers/scsi/libsas/sas_expander.c36
-rw-r--r--drivers/scsi/libsas/sas_init.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_phy.c45
-rw-r--r--drivers/scsi/libsas/sas_port.c30
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c92
-rw-r--r--drivers/scsi/libsrp.c441
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c229
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h35
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c4
-rw-r--r--drivers/scsi/ncr53c8xx.c19
-rw-r--r--drivers/scsi/oktagon_esp.c6
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c7
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c5
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c6
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c11
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c12
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c8
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h105
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c47
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c70
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c124
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h7
-rw-r--r--drivers/scsi/scsi.c45
-rw-r--r--drivers/scsi/scsi_error.c33
-rw-r--r--drivers/scsi/scsi_lib.c346
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c232
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_tgt_if.c352
-rw-r--r--drivers/scsi/scsi_tgt_lib.c745
-rw-r--r--drivers/scsi/scsi_tgt_priv.h25
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c31
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/st.c16
-rw-r--r--drivers/scsi/stex.c130
-rw-r--r--drivers/scsi/t128.h39
93 files changed, 4640 insertions, 1239 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 562432d017b0..335a25540c08 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -622,8 +622,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 622 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
623 /* restore the old result if the request sense was 623 /* restore the old result if the request sense was
624 * successful */ 624 * successful */
625 if(result == 0) 625 if (result == 0)
626 result = cmnd[7]; 626 result = cmnd[7];
627 /* restore the original length */
628 SCp->cmd_len = cmnd[8];
627 } else 629 } else
628 NCR_700_unmap(hostdata, SCp, slot); 630 NCR_700_unmap(hostdata, SCp, slot);
629 631
@@ -1007,6 +1009,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1007 * of the command */ 1009 * of the command */
1008 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; 1010 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1009 cmnd[7] = hostdata->status[0]; 1011 cmnd[7] = hostdata->status[0];
1012 cmnd[8] = SCp->cmd_len;
1013 SCp->cmd_len = 6; /* command length for
1014 * REQUEST_SENSE */
1010 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); 1015 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1011 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); 1016 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1012 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); 1017 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index cdd033724786..3075204915c8 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void)
2186 2186
2187 if (BusLogic_ProbeOptions.NoProbe) 2187 if (BusLogic_ProbeOptions.NoProbe)
2188 return -ENODEV; 2188 return -ENODEV;
2189 BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *) 2189 BusLogic_ProbeInfoList =
2190 kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC); 2190 kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
2191 if (BusLogic_ProbeInfoList == NULL) { 2191 if (BusLogic_ProbeInfoList == NULL) {
2192 BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL); 2192 BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
2193 return -ENOMEM; 2193 return -ENOMEM;
2194 } 2194 }
2195 memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo)); 2195
2196 PrototypeHostAdapter = (struct BusLogic_HostAdapter *) 2196 PrototypeHostAdapter =
2197 kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC); 2197 kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
2198 if (PrototypeHostAdapter == NULL) { 2198 if (PrototypeHostAdapter == NULL) {
2199 kfree(BusLogic_ProbeInfoList); 2199 kfree(BusLogic_ProbeInfoList);
2200 BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL); 2200 BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
2201 return -ENOMEM; 2201 return -ENOMEM;
2202 } 2202 }
2203 memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter)); 2203
2204#ifdef MODULE 2204#ifdef MODULE
2205 if (BusLogic != NULL) 2205 if (BusLogic != NULL)
2206 BusLogic_Setup(BusLogic); 2206 BusLogic_Setup(BusLogic);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9540eb8efdcb..69569096dae5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -29,6 +29,13 @@ config SCSI
29 However, do not compile this as a module if your root file system 29 However, do not compile this as a module if your root file system
30 (the one containing the directory /) is located on a SCSI device. 30 (the one containing the directory /) is located on a SCSI device.
31 31
32config SCSI_TGT
33 tristate "SCSI target support"
34 depends on SCSI && EXPERIMENTAL
35 ---help---
36 If you want to use SCSI target mode drivers enable this option.
37 If you choose M, the module will be called scsi_tgt.
38
32config SCSI_NETLINK 39config SCSI_NETLINK
33 bool 40 bool
34 default n 41 default n
@@ -216,6 +223,23 @@ config SCSI_LOGGING
216 there should be no noticeable performance impact as long as you have 223 there should be no noticeable performance impact as long as you have
217 logging turned off. 224 logging turned off.
218 225
226config SCSI_SCAN_ASYNC
227 bool "Asynchronous SCSI scanning"
228 depends on SCSI
229 help
230 The SCSI subsystem can probe for devices while the rest of the
231 system continues booting, and even probe devices on different
232 busses in parallel, leading to a significant speed-up.
233 If you have built SCSI as modules, enabling this option can
234 be a problem as the devices may not have been found by the
235 time your system expects them to have been. You can load the
236 scsi_wait_scan module to ensure that all scans have completed.
237 If you build your SCSI drivers into the kernel, then everything
238 will work fine if you say Y here.
239
240 You can override this choice by specifying scsi_mod.scan="sync"
241 or "async" on the kernel's command line.
242
219menu "SCSI Transports" 243menu "SCSI Transports"
220 depends on SCSI 244 depends on SCSI
221 245
@@ -797,6 +821,20 @@ config SCSI_IBMVSCSI
797 To compile this driver as a module, choose M here: the 821 To compile this driver as a module, choose M here: the
798 module will be called ibmvscsic. 822 module will be called ibmvscsic.
799 823
824config SCSI_IBMVSCSIS
825 tristate "IBM Virtual SCSI Server support"
826 depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
827 help
828 This is the SRP target driver for IBM pSeries virtual environments.
829
830 The userspace component needed to initialize the driver and
831 documentation can be found:
832
833 http://stgt.berlios.de/
834
835 To compile this driver as a module, choose M here: the
836 module will be called ibmvstgt.
837
800config SCSI_INITIO 838config SCSI_INITIO
801 tristate "Initio 9100U(W) support" 839 tristate "Initio 9100U(W) support"
802 depends on PCI && SCSI 840 depends on PCI && SCSI
@@ -944,8 +982,13 @@ config SCSI_STEX
944 tristate "Promise SuperTrak EX Series support" 982 tristate "Promise SuperTrak EX Series support"
945 depends on PCI && SCSI 983 depends on PCI && SCSI
946 ---help--- 984 ---help---
947 This driver supports Promise SuperTrak EX8350/8300/16350/16300 985 This driver supports Promise SuperTrak EX series storage controllers.
948 Storage controllers. 986
987 Promise provides Linux RAID configuration utility for these
988 controllers. Please visit <http://www.promise.com> to download.
989
990 To compile this driver as a module, choose M here: the
991 module will be called stex.
949 992
950config SCSI_SYM53C8XX_2 993config SCSI_SYM53C8XX_2
951 tristate "SYM53C8XX Version 2 SCSI support" 994 tristate "SYM53C8XX Version 2 SCSI support"
@@ -1026,6 +1069,7 @@ config SCSI_IPR
1026config SCSI_IPR_TRACE 1069config SCSI_IPR_TRACE
1027 bool "enable driver internal trace" 1070 bool "enable driver internal trace"
1028 depends on SCSI_IPR 1071 depends on SCSI_IPR
1072 default y
1029 help 1073 help
1030 If you say Y here, the driver will trace all commands issued 1074 If you say Y here, the driver will trace all commands issued
1031 to the adapter. Performance impact is minimal. Trace can be 1075 to the adapter. Performance impact is minimal. Trace can be
@@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE
1034config SCSI_IPR_DUMP 1078config SCSI_IPR_DUMP
1035 bool "enable adapter dump support" 1079 bool "enable adapter dump support"
1036 depends on SCSI_IPR 1080 depends on SCSI_IPR
1081 default y
1037 help 1082 help
1038 If you say Y here, the driver will support adapter crash dump. 1083 If you say Y here, the driver will support adapter crash dump.
1039 If you enable this support, the iprdump daemon can be used 1084 If you enable this support, the iprdump daemon can be used
@@ -1734,6 +1779,16 @@ config ZFCP
1734 called zfcp. If you want to compile it as a module, say M here 1779 called zfcp. If you want to compile it as a module, say M here
1735 and read <file:Documentation/modules.txt>. 1780 and read <file:Documentation/modules.txt>.
1736 1781
1782config SCSI_SRP
1783 tristate "SCSI RDMA Protocol helper library"
1784 depends on SCSI && PCI
1785 select SCSI_TGT
1786 help
1787 If you wish to use SRP target drivers, say Y.
1788
1789 To compile this driver as a module, choose M here: the
1790 module will be called libsrp.
1791
1737endmenu 1792endmenu
1738 1793
1739source "drivers/scsi/pcmcia/Kconfig" 1794source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bcca39c3bcbf..bd7c9888f7f4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
21subdir-$(CONFIG_PCMCIA) += pcmcia 21subdir-$(CONFIG_PCMCIA) += pcmcia
22 22
23obj-$(CONFIG_SCSI) += scsi_mod.o 23obj-$(CONFIG_SCSI) += scsi_mod.o
24obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
24 25
25obj-$(CONFIG_RAID_ATTRS) += raid_class.o 26obj-$(CONFIG_RAID_ATTRS) += raid_class.o
26 27
@@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
125obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 126obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
126obj-$(CONFIG_SCSI_NSP32) += nsp32.o 127obj-$(CONFIG_SCSI_NSP32) += nsp32.o
127obj-$(CONFIG_SCSI_IPR) += ipr.o 128obj-$(CONFIG_SCSI_IPR) += ipr.o
129obj-$(CONFIG_SCSI_SRP) += libsrp.o
128obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 130obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
131obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
129obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 132obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
130obj-$(CONFIG_SCSI_STEX) += stex.o 133obj-$(CONFIG_SCSI_STEX) += stex.o
131 134
@@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
141# This goes last, so that "real" scsi devices probe earlier 144# This goes last, so that "real" scsi devices probe earlier
142obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 145obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
143 146
147obj-$(CONFIG_SCSI) += scsi_wait_scan.o
148
144scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 149scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
145 scsicam.o scsi_error.o scsi_lib.o \ 150 scsicam.o scsi_error.o scsi_lib.o \
146 scsi_scan.o scsi_sysfs.o \ 151 scsi_scan.o scsi_sysfs.o \
@@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
149scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 154scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
150scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 155scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
151 156
157scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
158
152sd_mod-objs := sd.o 159sd_mod-objs := sd.o
153sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 160sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
154ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 161ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa91072880..bb3cb3360541 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
849 hostdata->issue_queue = NULL; 849 hostdata->issue_queue = NULL;
850 hostdata->disconnected_queue = NULL; 850 hostdata->disconnected_queue = NULL;
851 851
852 INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); 852 INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
853 853
854#ifdef NCR5380_STATS 854#ifdef NCR5380_STATS
855 for (i = 0; i < 8; ++i) { 855 for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1016 1016
1017 /* Run the coroutine if it isn't already running. */ 1017 /* Run the coroutine if it isn't already running. */
1018 /* Kick off command processing */ 1018 /* Kick off command processing */
1019 schedule_work(&hostdata->coroutine); 1019 schedule_delayed_work(&hostdata->coroutine, 0);
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1033 * host lock and called routines may take the isa dma lock. 1033 * host lock and called routines may take the isa dma lock.
1034 */ 1034 */
1035 1035
1036static void NCR5380_main(void *p) 1036static void NCR5380_main(struct work_struct *work)
1037{ 1037{
1038 struct NCR5380_hostdata *hostdata = p; 1038 struct NCR5380_hostdata *hostdata =
1039 container_of(work, struct NCR5380_hostdata, coroutine.work);
1039 struct Scsi_Host *instance = hostdata->host; 1040 struct Scsi_Host *instance = hostdata->host;
1040 Scsi_Cmnd *tmp, *prev; 1041 Scsi_Cmnd *tmp, *prev;
1041 int done; 1042 int done;
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1221 } /* if BASR_IRQ */ 1222 } /* if BASR_IRQ */
1222 spin_unlock_irqrestore(instance->host_lock, flags); 1223 spin_unlock_irqrestore(instance->host_lock, flags);
1223 if(!done) 1224 if(!done)
1224 schedule_work(&hostdata->coroutine); 1225 schedule_delayed_work(&hostdata->coroutine, 0);
1225 } while (!done); 1226 } while (!done);
1226 return IRQ_HANDLED; 1227 return IRQ_HANDLED;
1227} 1228}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de496b0..713a108c02ef 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@ struct NCR5380_hostdata {
271 unsigned long time_expires; /* in jiffies, set prior to sleeping */ 271 unsigned long time_expires; /* in jiffies, set prior to sleeping */
272 int select_time; /* timer in select for target response */ 272 int select_time; /* timer in select for target response */
273 volatile Scsi_Cmnd *selecting; 273 volatile Scsi_Cmnd *selecting;
274 struct work_struct coroutine; /* our co-routine */ 274 struct delayed_work coroutine; /* our co-routine */
275#ifdef NCR5380_STATS 275#ifdef NCR5380_STATS
276 unsigned timebase; /* Base for time calcs */ 276 unsigned timebase; /* Base for time calcs */
277 long time_read[8]; /* time to do reads */ 277 long time_read[8]; /* time to do reads */
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
298#ifndef DONT_USE_INTR 298#ifndef DONT_USE_INTR
299static irqreturn_t NCR5380_intr(int irq, void *dev_id); 299static irqreturn_t NCR5380_intr(int irq, void *dev_id);
300#endif 300#endif
301static void NCR5380_main(void *ptr); 301static void NCR5380_main(struct work_struct *work);
302static void NCR5380_print_options(struct Scsi_Host *instance); 302static void NCR5380_print_options(struct Scsi_Host *instance);
303#ifdef NDEBUG 303#ifdef NDEBUG
304static void NCR5380_print_phase(struct Scsi_Host *instance); 304static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index d4613815f685..8578555d58fd 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -220,9 +220,11 @@ static void *addresses[] = {
220static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 }; 220static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
221#define PORT_COUNT ARRAY_SIZE(ports) 221#define PORT_COUNT ARRAY_SIZE(ports)
222 222
223#ifndef MODULE
223/* possible interrupt channels */ 224/* possible interrupt channels */
224static unsigned short intrs[] = { 10, 11, 12, 15 }; 225static unsigned short intrs[] = { 10, 11, 12, 15 };
225#define INTR_COUNT ARRAY_SIZE(intrs) 226#define INTR_COUNT ARRAY_SIZE(intrs)
227#endif /* !MODULE */
226 228
227/* signatures for NCR 53c406a based controllers */ 229/* signatures for NCR 53c406a based controllers */
228#if USE_BIOS 230#if USE_BIOS
@@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
605 return 0; 607 return 0;
606} 608}
607 609
610#ifndef MODULE
608/* called from init/main.c */ 611/* called from init/main.c */
609static int __init NCR53c406a_setup(char *str) 612static int __init NCR53c406a_setup(char *str)
610{ 613{
@@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str)
661 664
662__setup("ncr53c406a=", NCR53c406a_setup); 665__setup("ncr53c406a=", NCR53c406a_setup);
663 666
667#endif /* !MODULE */
668
664static const char *NCR53c406a_info(struct Scsi_Host *SChost) 669static const char *NCR53c406a_info(struct Scsi_Host *SChost)
665{ 670{
666 DEB(printk("NCR53c406a_info called\n")); 671 DEB(printk("NCR53c406a_info called\n"));
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index eb3ed91bac79..4f8b4c53d435 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -11,8 +11,8 @@
11 *----------------------------------------------------------------------------*/ 11 *----------------------------------------------------------------------------*/
12 12
13#ifndef AAC_DRIVER_BUILD 13#ifndef AAC_DRIVER_BUILD
14# define AAC_DRIVER_BUILD 2409 14# define AAC_DRIVER_BUILD 2423
15# define AAC_DRIVER_BRANCH "-mh2" 15# define AAC_DRIVER_BRANCH "-mh3"
16#endif 16#endif
17#define MAXIMUM_NUM_CONTAINERS 32 17#define MAXIMUM_NUM_CONTAINERS 32
18 18
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 19e42ac07cb2..4893a6d06a33 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
518 */ 518 */
519 unsigned long count = 36000000L; /* 3 minutes */ 519 unsigned long count = 36000000L; /* 3 minutes */
520 while (down_trylock(&fibptr->event_wait)) { 520 while (down_trylock(&fibptr->event_wait)) {
521 int blink;
521 if (--count == 0) { 522 if (--count == 0) {
522 spin_lock_irqsave(q->lock, qflags); 523 spin_lock_irqsave(q->lock, qflags);
523 q->numpending--; 524 q->numpending--;
@@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
530 } 531 }
531 return -ETIMEDOUT; 532 return -ETIMEDOUT;
532 } 533 }
534 if ((blink = aac_adapter_check_health(dev)) > 0) {
535 if (wait == -1) {
536 printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
537 "Usually a result of a serious unrecoverable hardware problem\n",
538 blink);
539 }
540 return -EFAULT;
541 }
533 udelay(5); 542 udelay(5);
534 } 543 }
535 } else if (down_interruptible(&fibptr->event_wait)) { 544 } else if (down_interruptible(&fibptr->event_wait)) {
@@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1093 goto out; 1102 goto out;
1094 } 1103 }
1095 1104
1105 /*
1106 * Loop through the fibs, close the synchronous FIBS
1107 */
1108 for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1109 struct fib *fib = &aac->fibs[index];
1110 if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1111 (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
1112 unsigned long flagv;
1113 spin_lock_irqsave(&fib->event_lock, flagv);
1114 up(&fib->event_wait);
1115 spin_unlock_irqrestore(&fib->event_lock, flagv);
1116 schedule();
1117 }
1118 }
1096 index = aac->cardtype; 1119 index = aac->cardtype;
1097 1120
1098 /* 1121 /*
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b85a55..0cec742d12e9 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq;
1443 * Run service completions on the card with interrupts enabled. 1443 * Run service completions on the card with interrupts enabled.
1444 * 1444 *
1445 */ 1445 */
1446static void run(void) 1446static void run(struct work_struct *work)
1447{ 1447{
1448 struct aha152x_hostdata *hd; 1448 struct aha152x_hostdata *hd;
1449 1449
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id)
1499 HOSTDATA(shpnt)->service=1; 1499 HOSTDATA(shpnt)->service=1;
1500 1500
1501 /* Poke the BH handler */ 1501 /* Poke the BH handler */
1502 INIT_WORK(&aha152x_tq, (void *) run, NULL); 1502 INIT_WORK(&aha152x_tq, run);
1503 schedule_work(&aha152x_tq); 1503 schedule_work(&aha152x_tq);
1504 } 1504 }
1505 DO_UNLOCK(flags); 1505 DO_UNLOCK(flags);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index c3c38a7e8d32..d7af9c63a04d 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = {
586 586
587static int aha1740_probe (struct device *dev) 587static int aha1740_probe (struct device *dev)
588{ 588{
589 int slotbase; 589 int slotbase, rc;
590 unsigned int irq_level, irq_type, translation; 590 unsigned int irq_level, irq_type, translation;
591 struct Scsi_Host *shpnt; 591 struct Scsi_Host *shpnt;
592 struct aha1740_hostdata *host; 592 struct aha1740_hostdata *host;
@@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev)
641 } 641 }
642 642
643 eisa_set_drvdata (edev, shpnt); 643 eisa_set_drvdata (edev, shpnt);
644 scsi_add_host (shpnt, dev); /* XXX handle failure */ 644
645 rc = scsi_add_host (shpnt, dev);
646 if (rc)
647 goto err_irq;
648
645 scsi_scan_host (shpnt); 649 scsi_scan_host (shpnt);
646 return 0; 650 return 0;
647 651
652 err_irq:
653 free_irq(irq_level, shpnt);
648 err_unmap: 654 err_unmap:
649 dma_unmap_single (&edev->dev, host->ecb_dma_addr, 655 dma_unmap_single (&edev->dev, host->ecb_dma_addr,
650 sizeof (host->ecb), DMA_BIDIRECTIONAL); 656 sizeof (host->ecb), DMA_BIDIRECTIONAL);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 2001fe890e71..1a3ab6aa856b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
62 /* aic7901 based controllers */ 62 /* aic7901 based controllers */
63 ID(ID_AHA_29320A), 63 ID(ID_AHA_29320A),
64 ID(ID_AHA_29320ALP), 64 ID(ID_AHA_29320ALP),
65 ID(ID_AHA_29320LPE),
65 /* aic7902 based controllers */ 66 /* aic7902 based controllers */
66 ID(ID_AHA_29320), 67 ID(ID_AHA_29320),
67 ID(ID_AHA_29320B), 68 ID(ID_AHA_29320B),
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c07735819cd1..2cf7bb3123f0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] =
109 { 109 {
110 ID_AHA_29320ALP, 110 ID_AHA_29320ALP,
111 ID_ALL_MASK, 111 ID_ALL_MASK,
112 "Adaptec 29320ALP Ultra320 SCSI adapter", 112 "Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
113 ahd_aic7901_setup
114 },
115 {
116 ID_AHA_29320LPE,
117 ID_ALL_MASK,
118 "Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
113 ahd_aic7901_setup 119 ahd_aic7901_setup
114 }, 120 },
115 /* aic7901A based controllers */ 121 /* aic7901A based controllers */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h
index da45153668c7..16b7c70a673c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.h
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.h
@@ -51,6 +51,7 @@
51#define ID_AIC7901 0x800F9005FFFF9005ull 51#define ID_AIC7901 0x800F9005FFFF9005ull
52#define ID_AHA_29320A 0x8000900500609005ull 52#define ID_AHA_29320A 0x8000900500609005ull
53#define ID_AHA_29320ALP 0x8017900500449005ull 53#define ID_AHA_29320ALP 0x8017900500449005ull
54#define ID_AHA_29320LPE 0x8017900500459005ull
54 55
55#define ID_AIC7901A 0x801E9005FFFF9005ull 56#define ID_AIC7901A 0x801E9005FFFF9005ull
56#define ID_AHA_29320LP 0x8014900500449005ull 57#define ID_AHA_29320LP 0x8014900500449005ull
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 57c5ba4043f2..42302ef05ee5 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
724 724
725 list_for_each_safe(pos, n, &pending) { 725 list_for_each_safe(pos, n, &pending) {
726 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); 726 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
727 /*
728 * Delete unexpired ascb timers. This may happen if we issue
729 * a CONTROL PHY scb to an adapter and rmmod before the scb
730 * times out. Apparently we don't wait for the CONTROL PHY
731 * to complete, so it doesn't matter if we kill the timer.
732 */
733 del_timer_sync(&ascb->timer);
734 WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
735
727 list_del_init(pos); 736 list_del_init(pos);
728 ASD_DPRINTK("freeing from pending\n"); 737 ASD_DPRINTK("freeing from pending\n");
729 asd_ascb_free(ascb); 738 asd_ascb_free(ascb);
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index b15caf1c8fa2..75ed6b0569d1 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <scsi/scsi_host.h>
28 29
29#include "aic94xx.h" 30#include "aic94xx.h"
30#include "aic94xx_reg.h" 31#include "aic94xx_reg.h"
@@ -412,6 +413,40 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
412 } 413 }
413} 414}
414 415
416/* hard reset a phy later */
417static void do_phy_reset_later(struct work_struct *work)
418{
419 struct sas_phy *sas_phy =
420 container_of(work, struct sas_phy, reset_work);
421 int error;
422
423 ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
424 sas_phy->identify.phy_identifier);
425 /* Reset device port */
426 error = sas_phy_reset(sas_phy, 1);
427 if (error)
428 ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
429 __FUNCTION__, sas_phy->identify.phy_identifier, error);
430}
431
432static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
433{
434 INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
435 queue_work(shost->work_q, &sas_phy->reset_work);
436}
437
438/* start up the ABORT TASK tmf... */
439static void task_kill_later(struct asd_ascb *ascb)
440{
441 struct asd_ha_struct *asd_ha = ascb->ha;
442 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
443 struct Scsi_Host *shost = sas_ha->core.shost;
444 struct sas_task *task = ascb->uldd_task;
445
446 INIT_WORK(&task->abort_work, sas_task_abort);
447 queue_work(shost->work_q, &task->abort_work);
448}
449
415static void escb_tasklet_complete(struct asd_ascb *ascb, 450static void escb_tasklet_complete(struct asd_ascb *ascb,
416 struct done_list_struct *dl) 451 struct done_list_struct *dl)
417{ 452{
@@ -439,6 +474,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
439 ascb->scb->header.opcode); 474 ascb->scb->header.opcode);
440 } 475 }
441 476
477 /* Catch these before we mask off the sb_opcode bits */
478 switch (sb_opcode) {
479 case REQ_TASK_ABORT: {
480 struct asd_ascb *a, *b;
481 u16 tc_abort;
482
483 tc_abort = *((u16*)(&dl->status_block[1]));
484 tc_abort = le16_to_cpu(tc_abort);
485
486 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
487 __FUNCTION__, dl->status_block[3]);
488
489 /* Find the pending task and abort it. */
490 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
491 if (a->tc_index == tc_abort) {
492 task_kill_later(a);
493 break;
494 }
495 goto out;
496 }
497 case REQ_DEVICE_RESET: {
498 struct Scsi_Host *shost = sas_ha->core.shost;
499 struct sas_phy *dev_phy;
500 struct asd_ascb *a;
501 u16 conn_handle;
502
503 conn_handle = *((u16*)(&dl->status_block[1]));
504 conn_handle = le16_to_cpu(conn_handle);
505
506 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
507 dl->status_block[3]);
508
509 /* Kill all pending tasks and reset the device */
510 dev_phy = NULL;
511 list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
512 struct sas_task *task;
513 struct domain_device *dev;
514 u16 x;
515
516 task = a->uldd_task;
517 if (!task)
518 continue;
519 dev = task->dev;
520
521 x = (unsigned long)dev->lldd_dev;
522 if (x == conn_handle) {
523 dev_phy = dev->port->phy;
524 task_kill_later(a);
525 }
526 }
527
528 /* Reset device port */
529 if (!dev_phy) {
530 ASD_DPRINTK("%s: No pending commands; can't reset.\n",
531 __FUNCTION__);
532 goto out;
533 }
534 phy_reset_later(dev_phy, shost);
535 goto out;
536 }
537 case SIGNAL_NCQ_ERROR:
538 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
539 goto out;
540 case CLEAR_NCQ_ERROR:
541 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
542 goto out;
543 }
544
442 sb_opcode &= ~DL_PHY_MASK; 545 sb_opcode &= ~DL_PHY_MASK;
443 546
444 switch (sb_opcode) { 547 switch (sb_opcode) {
@@ -469,22 +572,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
469 asd_deform_port(asd_ha, phy); 572 asd_deform_port(asd_ha, phy);
470 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); 573 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
471 break; 574 break;
472 case REQ_TASK_ABORT:
473 ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
474 phy_id);
475 break;
476 case REQ_DEVICE_RESET:
477 ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
478 phy_id);
479 break;
480 case SIGNAL_NCQ_ERROR:
481 ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
482 phy_id);
483 break;
484 case CLEAR_NCQ_ERROR:
485 ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
486 phy_id);
487 break;
488 default: 575 default:
489 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, 576 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
490 phy_id, sb_opcode); 577 phy_id, sb_opcode);
@@ -504,7 +591,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
504 591
505 break; 592 break;
506 } 593 }
507 594out:
508 asd_invalidate_edb(ascb, edb); 595 asd_invalidate_edb(ascb, edb);
509} 596}
510 597
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index ef8285c326e4..668569e8856b 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
294static int user_fifo_count = 0; 294static int user_fifo_count = 0;
295static int user_fifo_size = 0; 295static int user_fifo_size = 0;
296 296
297#ifndef MODULE
297static int __init fd_mcs_setup(char *str) 298static int __init fd_mcs_setup(char *str)
298{ 299{
299 static int done_setup = 0; 300 static int done_setup = 0;
@@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str)
311} 312}
312 313
313__setup("fd_mcs=", fd_mcs_setup); 314__setup("fd_mcs=", fd_mcs_setup);
315#endif /* !MODULE */
314 316
315static void print_banner(struct Scsi_Host *shpnt) 317static void print_banner(struct Scsi_Host *shpnt)
316{ 318{
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 68ef1636678d..38c3a291efac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
263 kthread_stop(shost->ehandler); 263 kthread_stop(shost->ehandler);
264 if (shost->work_q) 264 if (shost->work_q)
265 destroy_workqueue(shost->work_q); 265 destroy_workqueue(shost->work_q);
266 if (shost->uspace_req_q) {
267 kfree(shost->uspace_req_q->queuedata);
268 scsi_free_queue(shost->uspace_req_q);
269 }
266 270
267 scsi_destroy_command_freelist(shost); 271 scsi_destroy_command_freelist(shost);
268 if (shost->bqt) 272 if (shost->bqt)
@@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
301 if (!shost) 305 if (!shost)
302 return NULL; 306 return NULL;
303 307
304 spin_lock_init(&shost->default_lock); 308 shost->host_lock = &shost->default_lock;
305 scsi_assign_lock(shost, &shost->default_lock); 309 spin_lock_init(shost->host_lock);
306 shost->shost_state = SHOST_CREATED; 310 shost->shost_state = SHOST_CREATED;
307 INIT_LIST_HEAD(&shost->__devices); 311 INIT_LIST_HEAD(&shost->__devices);
308 INIT_LIST_HEAD(&shost->__targets); 312 INIT_LIST_HEAD(&shost->__targets);
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 4e247b6b8700..6ac0633d5452 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
3ibmvscsic-y += ibmvscsi.o 3ibmvscsic-y += ibmvscsi.o
4ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o 4ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o 5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
6
7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
new file mode 100644
index 000000000000..e28260f05d6b
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -0,0 +1,960 @@
1/*
2 * IBM eServer i/pSeries Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 */
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_tgt.h>
29#include <scsi/libsrp.h>
30#include <asm/hvcall.h>
31#include <asm/iommu.h>
32#include <asm/prom.h>
33#include <asm/vio.h>
34
35#include "ibmvscsi.h"
36
37#define INITIAL_SRP_LIMIT 16
38#define DEFAULT_MAX_SECTORS 512
39
40#define TGT_NAME "ibmvstgt"
41
42/*
43 * Hypervisor calls.
44 */
45#define h_copy_rdma(l, sa, sb, da, db) \
46 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
47#define h_send_crq(ua, l, h) \
48 plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
49#define h_reg_crq(ua, tok, sz)\
50 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
51#define h_free_crq(ua) \
52 plpar_hcall_norets(H_FREE_CRQ, ua);
53
54/* tmp - will replace with SCSI logging stuff */
55#define eprintk(fmt, args...) \
56do { \
57 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
58} while (0)
59/* #define dprintk eprintk */
60#define dprintk(fmt, args...)
61
62struct vio_port {
63 struct vio_dev *dma_dev;
64
65 struct crq_queue crq_queue;
66 struct work_struct crq_work;
67
68 unsigned long liobn;
69 unsigned long riobn;
70 struct srp_target *target;
71};
72
73static struct workqueue_struct *vtgtd;
74
75/*
76 * These are fixed for the system and come from the Open Firmware device tree.
77 * We just store them here to save getting them every time.
78 */
79static char system_id[64] = "";
80static char partition_name[97] = "UNKNOWN";
81static unsigned int partition_number = -1;
82
83static struct vio_port *target_to_port(struct srp_target *target)
84{
85 return (struct vio_port *) target->ldata;
86}
87
88static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
89{
90 return (union viosrp_iu *) (iue->sbuf->buf);
91}
92
93static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
94{
95 struct srp_target *target = iue->target;
96 struct vio_port *vport = target_to_port(target);
97 long rc, rc1;
98 union {
99 struct viosrp_crq cooked;
100 uint64_t raw[2];
101 } crq;
102
103 /* First copy the SRP */
104 rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
105 vport->riobn, iue->remote_token);
106
107 if (rc)
108 eprintk("Error %ld transferring data\n", rc);
109
110 crq.cooked.valid = 0x80;
111 crq.cooked.format = format;
112 crq.cooked.reserved = 0x00;
113 crq.cooked.timeout = 0x00;
114 crq.cooked.IU_length = length;
115 crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
116
117 if (rc == 0)
118 crq.cooked.status = 0x99; /* Just needs to be non-zero */
119 else
120 crq.cooked.status = 0x00;
121
122 rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
123
124 if (rc1) {
125 eprintk("%ld sending response\n", rc1);
126 return rc1;
127 }
128
129 return rc;
130}
131
132#define SRP_RSP_SENSE_DATA_LEN 18
133
134static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
135 unsigned char status, unsigned char asc)
136{
137 union viosrp_iu *iu = vio_iu(iue);
138 uint64_t tag = iu->srp.rsp.tag;
139
140 /* If the linked bit is on and status is good */
141 if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
142 status = 0x10;
143
144 memset(iu, 0, sizeof(struct srp_rsp));
145 iu->srp.rsp.opcode = SRP_RSP;
146 iu->srp.rsp.req_lim_delta = 1;
147 iu->srp.rsp.tag = tag;
148
149 if (test_bit(V_DIOVER, &iue->flags))
150 iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
151
152 iu->srp.rsp.data_in_res_cnt = 0;
153 iu->srp.rsp.data_out_res_cnt = 0;
154
155 iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
156
157 iu->srp.rsp.resp_data_len = 0;
158 iu->srp.rsp.status = status;
159 if (status) {
160 uint8_t *sense = iu->srp.rsp.data;
161
162 if (sc) {
163 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
164 iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
165 memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
166 } else {
167 iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
168 iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
169 iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
170
171 /* Valid bit and 'current errors' */
172 sense[0] = (0x1 << 7 | 0x70);
173 /* Sense key */
174 sense[2] = status;
175 /* Additional sense length */
176 sense[7] = 0xa; /* 10 bytes */
177 /* Additional sense code */
178 sense[12] = asc;
179 }
180 }
181
182 send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
183 VIOSRP_SRP_FORMAT);
184
185 return 0;
186}
187
188static void handle_cmd_queue(struct srp_target *target)
189{
190 struct Scsi_Host *shost = target->shost;
191 struct iu_entry *iue;
192 struct srp_cmd *cmd;
193 unsigned long flags;
194 int err;
195
196retry:
197 spin_lock_irqsave(&target->lock, flags);
198
199 list_for_each_entry(iue, &target->cmd_queue, ilist) {
200 if (!test_and_set_bit(V_FLYING, &iue->flags)) {
201 spin_unlock_irqrestore(&target->lock, flags);
202 cmd = iue->sbuf->buf;
203 err = srp_cmd_queue(shost, cmd, iue, 0);
204 if (err) {
205 eprintk("cannot queue cmd %p %d\n", cmd, err);
206 srp_iu_put(iue);
207 }
208 goto retry;
209 }
210 }
211
212 spin_unlock_irqrestore(&target->lock, flags);
213}
214
215static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
216 struct srp_direct_buf *md, int nmd,
217 enum dma_data_direction dir, unsigned int rest)
218{
219 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
220 struct srp_target *target = iue->target;
221 struct vio_port *vport = target_to_port(target);
222 dma_addr_t token;
223 long err;
224 unsigned int done = 0;
225 int i, sidx, soff;
226
227 sidx = soff = 0;
228 token = sg_dma_address(sg + sidx);
229
230 for (i = 0; i < nmd && rest; i++) {
231 unsigned int mdone, mlen;
232
233 mlen = min(rest, md[i].len);
234 for (mdone = 0; mlen;) {
235 int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
236
237 if (dir == DMA_TO_DEVICE)
238 err = h_copy_rdma(slen,
239 vport->riobn,
240 md[i].va + mdone,
241 vport->liobn,
242 token + soff);
243 else
244 err = h_copy_rdma(slen,
245 vport->liobn,
246 token + soff,
247 vport->riobn,
248 md[i].va + mdone);
249
250 if (err != H_SUCCESS) {
251 eprintk("rdma error %d %d\n", dir, slen);
252 goto out;
253 }
254
255 mlen -= slen;
256 mdone += slen;
257 soff += slen;
258 done += slen;
259
260 if (soff == sg_dma_len(sg + sidx)) {
261 sidx++;
262 soff = 0;
263 token = sg_dma_address(sg + sidx);
264
265 if (sidx > nsg) {
266 eprintk("out of sg %p %d %d\n",
267 iue, sidx, nsg);
268 goto out;
269 }
270 }
271 };
272
273 rest -= mlen;
274 }
275out:
276
277 return 0;
278}
279
280static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
281 void (*done)(struct scsi_cmnd *))
282{
283 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
284 int err;
285
286 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
287
288 done(sc);
289
290 return err;
291}
292
293static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
294 void (*done)(struct scsi_cmnd *))
295{
296 unsigned long flags;
297 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
298 struct srp_target *target = iue->target;
299
300 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
301
302 spin_lock_irqsave(&target->lock, flags);
303 list_del(&iue->ilist);
304 spin_unlock_irqrestore(&target->lock, flags);
305
306 if (sc->result != SAM_STAT_GOOD) {
307 eprintk("operation failed %p %d %x\n",
308 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
309 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
310 } else
311 send_rsp(iue, sc, NO_SENSE, 0x00);
312
313 done(sc);
314 srp_iu_put(iue);
315 return 0;
316}
317
318int send_adapter_info(struct iu_entry *iue,
319 dma_addr_t remote_buffer, uint16_t length)
320{
321 struct srp_target *target = iue->target;
322 struct vio_port *vport = target_to_port(target);
323 struct Scsi_Host *shost = target->shost;
324 dma_addr_t data_token;
325 struct mad_adapter_info_data *info;
326 int err;
327
328 info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
329 GFP_KERNEL);
330 if (!info) {
331 eprintk("bad dma_alloc_coherent %p\n", target);
332 return 1;
333 }
334
335 /* Get remote info */
336 err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
337 vport->liobn, data_token);
338 if (err == H_SUCCESS) {
339 dprintk("Client connect: %s (%d)\n",
340 info->partition_name, info->partition_number);
341 }
342
343 memset(info, 0, sizeof(*info));
344
345 strcpy(info->srp_version, "16.a");
346 strncpy(info->partition_name, partition_name,
347 sizeof(info->partition_name));
348 info->partition_number = partition_number;
349 info->mad_version = 1;
350 info->os_type = 2;
351 info->port_max_txu[0] = shost->hostt->max_sectors << 9;
352
353 /* Send our info to remote */
354 err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
355 vport->riobn, remote_buffer);
356
357 dma_free_coherent(target->dev, sizeof(*info), info, data_token);
358
359 if (err != H_SUCCESS) {
360 eprintk("Error sending adapter info %d\n", err);
361 return 1;
362 }
363
364 return 0;
365}
366
367static void process_login(struct iu_entry *iue)
368{
369 union viosrp_iu *iu = vio_iu(iue);
370 struct srp_login_rsp *rsp = &iu->srp.login_rsp;
371 uint64_t tag = iu->srp.rsp.tag;
372
373 /* TODO handle case that requested size is wrong and
374 * buffer format is wrong
375 */
376 memset(iu, 0, sizeof(struct srp_login_rsp));
377 rsp->opcode = SRP_LOGIN_RSP;
378 rsp->req_lim_delta = INITIAL_SRP_LIMIT;
379 rsp->tag = tag;
380 rsp->max_it_iu_len = sizeof(union srp_iu);
381 rsp->max_ti_iu_len = sizeof(union srp_iu);
382 /* direct and indirect */
383 rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
384
385 send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
386}
387
388static inline void queue_cmd(struct iu_entry *iue)
389{
390 struct srp_target *target = iue->target;
391 unsigned long flags;
392
393 spin_lock_irqsave(&target->lock, flags);
394 list_add_tail(&iue->ilist, &target->cmd_queue);
395 spin_unlock_irqrestore(&target->lock, flags);
396}
397
398static int process_tsk_mgmt(struct iu_entry *iue)
399{
400 union viosrp_iu *iu = vio_iu(iue);
401 int fn;
402
403 dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
404
405 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
406 case SRP_TSK_ABORT_TASK:
407 fn = ABORT_TASK;
408 break;
409 case SRP_TSK_ABORT_TASK_SET:
410 fn = ABORT_TASK_SET;
411 break;
412 case SRP_TSK_CLEAR_TASK_SET:
413 fn = CLEAR_TASK_SET;
414 break;
415 case SRP_TSK_LUN_RESET:
416 fn = LOGICAL_UNIT_RESET;
417 break;
418 case SRP_TSK_CLEAR_ACA:
419 fn = CLEAR_ACA;
420 break;
421 default:
422 fn = 0;
423 }
424 if (fn)
425 scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
426 iu->srp.tsk_mgmt.task_tag,
427 (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
428 iue);
429 else
430 send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
431
432 return !fn;
433}
434
435static int process_mad_iu(struct iu_entry *iue)
436{
437 union viosrp_iu *iu = vio_iu(iue);
438 struct viosrp_adapter_info *info;
439 struct viosrp_host_config *conf;
440
441 switch (iu->mad.empty_iu.common.type) {
442 case VIOSRP_EMPTY_IU_TYPE:
443 eprintk("%s\n", "Unsupported EMPTY MAD IU");
444 break;
445 case VIOSRP_ERROR_LOG_TYPE:
446 eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
447 iu->mad.error_log.common.status = 1;
448 send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
449 break;
450 case VIOSRP_ADAPTER_INFO_TYPE:
451 info = &iu->mad.adapter_info;
452 info->common.status = send_adapter_info(iue, info->buffer,
453 info->common.length);
454 send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
455 break;
456 case VIOSRP_HOST_CONFIG_TYPE:
457 conf = &iu->mad.host_config;
458 conf->common.status = 1;
459 send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
460 break;
461 default:
462 eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
463 }
464
465 return 1;
466}
467
468static int process_srp_iu(struct iu_entry *iue)
469{
470 union viosrp_iu *iu = vio_iu(iue);
471 int done = 1;
472 u8 opcode = iu->srp.rsp.opcode;
473
474 switch (opcode) {
475 case SRP_LOGIN_REQ:
476 process_login(iue);
477 break;
478 case SRP_TSK_MGMT:
479 done = process_tsk_mgmt(iue);
480 break;
481 case SRP_CMD:
482 queue_cmd(iue);
483 done = 0;
484 break;
485 case SRP_LOGIN_RSP:
486 case SRP_I_LOGOUT:
487 case SRP_T_LOGOUT:
488 case SRP_RSP:
489 case SRP_CRED_REQ:
490 case SRP_CRED_RSP:
491 case SRP_AER_REQ:
492 case SRP_AER_RSP:
493 eprintk("Unsupported type %u\n", opcode);
494 break;
495 default:
496 eprintk("Unknown type %u\n", opcode);
497 }
498
499 return done;
500}
501
502static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
503{
504 struct vio_port *vport = target_to_port(target);
505 struct iu_entry *iue;
506 long err, done;
507
508 iue = srp_iu_get(target);
509 if (!iue) {
510 eprintk("Error getting IU from pool, %p\n", target);
511 return;
512 }
513
514 iue->remote_token = crq->IU_data_ptr;
515
516 err = h_copy_rdma(crq->IU_length, vport->riobn,
517 iue->remote_token, vport->liobn, iue->sbuf->dma);
518
519 if (err != H_SUCCESS) {
520 eprintk("%ld transferring data error %p\n", err, iue);
521 done = 1;
522 goto out;
523 }
524
525 if (crq->format == VIOSRP_MAD_FORMAT)
526 done = process_mad_iu(iue);
527 else
528 done = process_srp_iu(iue);
529out:
530 if (done)
531 srp_iu_put(iue);
532}
533
534static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
535{
536 struct srp_target *target = (struct srp_target *) data;
537 struct vio_port *vport = target_to_port(target);
538
539 vio_disable_interrupts(vport->dma_dev);
540 queue_work(vtgtd, &vport->crq_work);
541
542 return IRQ_HANDLED;
543}
544
545static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
546{
547 int err;
548 struct vio_port *vport = target_to_port(target);
549
550 queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
551 if (!queue->msgs)
552 goto malloc_failed;
553 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
554
555 queue->msg_token = dma_map_single(target->dev, queue->msgs,
556 queue->size * sizeof(*queue->msgs),
557 DMA_BIDIRECTIONAL);
558
559 if (dma_mapping_error(queue->msg_token))
560 goto map_failed;
561
562 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
563 PAGE_SIZE);
564
565 /* If the adapter was left active for some reason (like kexec)
566 * try freeing and re-registering
567 */
568 if (err == H_RESOURCE) {
569 do {
570 err = h_free_crq(vport->dma_dev->unit_address);
571 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
572
573 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
574 PAGE_SIZE);
575 }
576
577 if (err != H_SUCCESS && err != 2) {
578 eprintk("Error 0x%x opening virtual adapter\n", err);
579 goto reg_crq_failed;
580 }
581
582 err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
583 SA_INTERRUPT, "ibmvstgt", target);
584 if (err)
585 goto req_irq_failed;
586
587 vio_enable_interrupts(vport->dma_dev);
588
589 h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
590
591 queue->cur = 0;
592 spin_lock_init(&queue->lock);
593
594 return 0;
595
596req_irq_failed:
597 do {
598 err = h_free_crq(vport->dma_dev->unit_address);
599 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
600
601reg_crq_failed:
602 dma_unmap_single(target->dev, queue->msg_token,
603 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
604map_failed:
605 free_page((unsigned long) queue->msgs);
606
607malloc_failed:
608 return -ENOMEM;
609}
610
611static void crq_queue_destroy(struct srp_target *target)
612{
613 struct vio_port *vport = target_to_port(target);
614 struct crq_queue *queue = &vport->crq_queue;
615 int err;
616
617 free_irq(vport->dma_dev->irq, target);
618 do {
619 err = h_free_crq(vport->dma_dev->unit_address);
620 } while (err == H_BUSY || H_IS_LONG_BUSY(err));
621
622 dma_unmap_single(target->dev, queue->msg_token,
623 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
624
625 free_page((unsigned long) queue->msgs);
626}
627
628static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
629{
630 struct vio_port *vport = target_to_port(target);
631 dprintk("%x %x\n", crq->valid, crq->format);
632
633 switch (crq->valid) {
634 case 0xC0:
635 /* initialization */
636 switch (crq->format) {
637 case 0x01:
638 h_send_crq(vport->dma_dev->unit_address,
639 0xC002000000000000, 0);
640 break;
641 case 0x02:
642 break;
643 default:
644 eprintk("Unknown format %u\n", crq->format);
645 }
646 break;
647 case 0xFF:
648 /* transport event */
649 break;
650 case 0x80:
651 /* real payload */
652 switch (crq->format) {
653 case VIOSRP_SRP_FORMAT:
654 case VIOSRP_MAD_FORMAT:
655 process_iu(crq, target);
656 break;
657 case VIOSRP_OS400_FORMAT:
658 case VIOSRP_AIX_FORMAT:
659 case VIOSRP_LINUX_FORMAT:
660 case VIOSRP_INLINE_FORMAT:
661 eprintk("Unsupported format %u\n", crq->format);
662 break;
663 default:
664 eprintk("Unknown format %u\n", crq->format);
665 }
666 break;
667 default:
668 eprintk("unknown message type 0x%02x!?\n", crq->valid);
669 }
670}
671
672static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
673{
674 struct viosrp_crq *crq;
675 unsigned long flags;
676
677 spin_lock_irqsave(&queue->lock, flags);
678 crq = &queue->msgs[queue->cur];
679 if (crq->valid & 0x80) {
680 if (++queue->cur == queue->size)
681 queue->cur = 0;
682 } else
683 crq = NULL;
684 spin_unlock_irqrestore(&queue->lock, flags);
685
686 return crq;
687}
688
689static void handle_crq(struct work_struct *work)
690{
691 struct vio_port *vport = container_of(work, struct vio_port, crq_work);
692 struct srp_target *target = vport->target;
693 struct viosrp_crq *crq;
694 int done = 0;
695
696 while (!done) {
697 while ((crq = next_crq(&vport->crq_queue)) != NULL) {
698 process_crq(crq, target);
699 crq->valid = 0x00;
700 }
701
702 vio_enable_interrupts(vport->dma_dev);
703
704 crq = next_crq(&vport->crq_queue);
705 if (crq) {
706 vio_disable_interrupts(vport->dma_dev);
707 process_crq(crq, target);
708 crq->valid = 0x00;
709 } else
710 done = 1;
711 }
712
713 handle_cmd_queue(target);
714}
715
716
717static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
718{
719 unsigned long flags;
720 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
721 struct srp_target *target = iue->target;
722
723 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
724
725 spin_lock_irqsave(&target->lock, flags);
726 list_del(&iue->ilist);
727 spin_unlock_irqrestore(&target->lock, flags);
728
729 srp_iu_put(iue);
730
731 return 0;
732}
733
734static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
735{
736 struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
737 union viosrp_iu *iu = vio_iu(iue);
738 unsigned char status, asc;
739
740 eprintk("%p %d\n", iue, result);
741 status = NO_SENSE;
742 asc = 0;
743
744 switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
745 case SRP_TSK_ABORT_TASK:
746 asc = 0x14;
747 if (result)
748 status = ABORTED_COMMAND;
749 break;
750 default:
751 break;
752 }
753
754 send_rsp(iue, NULL, status, asc);
755 srp_iu_put(iue);
756
757 return 0;
758}
759
760static ssize_t system_id_show(struct class_device *cdev, char *buf)
761{
762 return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
763}
764
765static ssize_t partition_number_show(struct class_device *cdev, char *buf)
766{
767 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
768}
769
770static ssize_t unit_address_show(struct class_device *cdev, char *buf)
771{
772 struct Scsi_Host *shost = class_to_shost(cdev);
773 struct srp_target *target = host_to_srp_target(shost);
774 struct vio_port *vport = target_to_port(target);
775 return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
776}
777
778static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
779static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
780static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
781
782static struct class_device_attribute *ibmvstgt_attrs[] = {
783 &class_device_attr_system_id,
784 &class_device_attr_partition_number,
785 &class_device_attr_unit_address,
786 NULL,
787};
788
789static struct scsi_host_template ibmvstgt_sht = {
790 .name = TGT_NAME,
791 .module = THIS_MODULE,
792 .can_queue = INITIAL_SRP_LIMIT,
793 .sg_tablesize = SG_ALL,
794 .use_clustering = DISABLE_CLUSTERING,
795 .max_sectors = DEFAULT_MAX_SECTORS,
796 .transfer_response = ibmvstgt_cmd_done,
797 .transfer_data = ibmvstgt_transfer_data,
798 .eh_abort_handler = ibmvstgt_eh_abort_handler,
799 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
800 .shost_attrs = ibmvstgt_attrs,
801 .proc_name = TGT_NAME,
802};
803
804static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
805{
806 struct Scsi_Host *shost;
807 struct srp_target *target;
808 struct vio_port *vport;
809 unsigned int *dma, dma_size;
810 int err = -ENOMEM;
811
812 vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
813 if (!vport)
814 return err;
815 shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
816 if (!shost)
817 goto free_vport;
818 err = scsi_tgt_alloc_queue(shost);
819 if (err)
820 goto put_host;
821
822 target = host_to_srp_target(shost);
823 target->shost = shost;
824 vport->dma_dev = dev;
825 target->ldata = vport;
826 vport->target = target;
827 err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
828 SRP_MAX_IU_LEN);
829 if (err)
830 goto put_host;
831
832 dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
833 &dma_size);
834 if (!dma || dma_size != 40) {
835 eprintk("Couldn't get window property %d\n", dma_size);
836 err = -EIO;
837 goto free_srp_target;
838 }
839 vport->liobn = dma[0];
840 vport->riobn = dma[5];
841
842 INIT_WORK(&vport->crq_work, handle_crq);
843
844 err = crq_queue_create(&vport->crq_queue, target);
845 if (err)
846 goto free_srp_target;
847
848 err = scsi_add_host(shost, target->dev);
849 if (err)
850 goto destroy_queue;
851 return 0;
852
853destroy_queue:
854 crq_queue_destroy(target);
855free_srp_target:
856 srp_target_free(target);
857put_host:
858 scsi_host_put(shost);
859free_vport:
860 kfree(vport);
861 return err;
862}
863
864static int ibmvstgt_remove(struct vio_dev *dev)
865{
866 struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
867 struct Scsi_Host *shost = target->shost;
868 struct vio_port *vport = target->ldata;
869
870 crq_queue_destroy(target);
871 scsi_remove_host(shost);
872 scsi_tgt_free_queue(shost);
873 srp_target_free(target);
874 kfree(vport);
875 scsi_host_put(shost);
876 return 0;
877}
878
879static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
880 {"v-scsi-host", "IBM,v-scsi-host"},
881 {"",""}
882};
883
884MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
885
886static struct vio_driver ibmvstgt_driver = {
887 .id_table = ibmvstgt_device_table,
888 .probe = ibmvstgt_probe,
889 .remove = ibmvstgt_remove,
890 .driver = {
891 .name = "ibmvscsis",
892 .owner = THIS_MODULE,
893 }
894};
895
896static int get_system_info(void)
897{
898 struct device_node *rootdn;
899 const char *id, *model, *name;
900 unsigned int *num;
901
902 rootdn = find_path_device("/");
903 if (!rootdn)
904 return -ENOENT;
905
906 model = get_property(rootdn, "model", NULL);
907 id = get_property(rootdn, "system-id", NULL);
908 if (model && id)
909 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
910
911 name = get_property(rootdn, "ibm,partition-name", NULL);
912 if (name)
913 strncpy(partition_name, name, sizeof(partition_name));
914
915 num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
916 if (num)
917 partition_number = *num;
918
919 return 0;
920}
921
922static int ibmvstgt_init(void)
923{
924 int err = -ENOMEM;
925
926 printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
927
928 vtgtd = create_workqueue("ibmvtgtd");
929 if (!vtgtd)
930 return err;
931
932 err = get_system_info();
933 if (err)
934 goto destroy_wq;
935
936 err = vio_register_driver(&ibmvstgt_driver);
937 if (err)
938 goto destroy_wq;
939
940 return 0;
941
942destroy_wq:
943 destroy_workqueue(vtgtd);
944 return err;
945}
946
947static void ibmvstgt_exit(void)
948{
949 printk("Unregister IBM virtual SCSI driver\n");
950
951 destroy_workqueue(vtgtd);
952 vio_unregister_driver(&ibmvstgt_driver);
953}
954
955MODULE_DESCRIPTION("IBM Virtual SCSI Target");
956MODULE_AUTHOR("Santiago Leon");
957MODULE_LICENSE("GPL");
958
959module_init(ibmvstgt_init);
960module_exit(ibmvstgt_exit);
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f6122106f..0464c182c577 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@ typedef struct {
36 int base_hi; /* Hi Base address for ECP-ISA chipset */ 36 int base_hi; /* Hi Base address for ECP-ISA chipset */
37 int mode; /* Transfer mode */ 37 int mode; /* Transfer mode */
38 struct scsi_cmnd *cur_cmd; /* Current queued command */ 38 struct scsi_cmnd *cur_cmd; /* Current queued command */
39 struct work_struct imm_tq; /* Polling interrupt stuff */ 39 struct delayed_work imm_tq; /* Polling interrupt stuff */
40 unsigned long jstart; /* Jiffies at start */ 40 unsigned long jstart; /* Jiffies at start */
41 unsigned failed:1; /* Failure flag */ 41 unsigned failed:1; /* Failure flag */
42 unsigned dp:1; /* Data phase present */ 42 unsigned dp:1; /* Data phase present */
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd)
733 * the scheduler's task queue to generate a stream of call-backs and 733 * the scheduler's task queue to generate a stream of call-backs and
734 * complete the request when the drive is ready. 734 * complete the request when the drive is ready.
735 */ 735 */
736static void imm_interrupt(void *data) 736static void imm_interrupt(struct work_struct *work)
737{ 737{
738 imm_struct *dev = (imm_struct *) data; 738 imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
739 struct scsi_cmnd *cmd = dev->cur_cmd; 739 struct scsi_cmnd *cmd = dev->cur_cmd;
740 struct Scsi_Host *host = cmd->device->host; 740 struct Scsi_Host *host = cmd->device->host;
741 unsigned long flags; 741 unsigned long flags;
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data)
745 return; 745 return;
746 } 746 }
747 if (imm_engine(dev, cmd)) { 747 if (imm_engine(dev, cmd)) {
748 INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
749 schedule_delayed_work(&dev->imm_tq, 1); 748 schedule_delayed_work(&dev->imm_tq, 1);
750 return; 749 return;
751 } 750 }
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd,
953 cmd->result = DID_ERROR << 16; /* default return code */ 952 cmd->result = DID_ERROR << 16; /* default return code */
954 cmd->SCp.phase = 0; /* bus free */ 953 cmd->SCp.phase = 0; /* bus free */
955 954
956 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 955 schedule_delayed_work(&dev->imm_tq, 0);
957 schedule_work(&dev->imm_tq);
958 956
959 imm_pb_claim(dev); 957 imm_pb_claim(dev);
960 958
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb)
1225 else 1223 else
1226 ports = 8; 1224 ports = 8;
1227 1225
1228 INIT_WORK(&dev->imm_tq, imm_interrupt, dev); 1226 INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
1229 1227
1230 err = -ENOMEM; 1228 err = -ENOMEM;
1231 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); 1229 host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index afed293dd7b9..f160357e37a6 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -170,7 +170,7 @@ static int setup_debug = 0;
170static void i91uSCBPost(BYTE * pHcb, BYTE * pScb); 170static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
171 171
172/* PCI Devices supported by this driver */ 172/* PCI Devices supported by this driver */
173static struct pci_device_id i91u_pci_devices[] __devinitdata = { 173static struct pci_device_id i91u_pci_devices[] = {
174 { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 174 { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
175 { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 175 { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
176 { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 176 { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2dde821025f3..ccd4dafce8e2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -79,7 +79,6 @@
79#include <scsi/scsi_tcq.h> 79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h> 80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h> 81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_transport.h>
83#include "ipr.h" 82#include "ipr.h"
84 83
85/* 84/*
@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
98 97
99/* This table describes the differences between DMA controller chips */ 98/* This table describes the differences between DMA controller chips */
100static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
101 { /* Gemstone, Citrine, and Obsidian */ 100 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
102 .mailbox = 0x0042C, 101 .mailbox = 0x0042C,
103 .cache_line_size = 0x20, 102 .cache_line_size = 0x20,
104 { 103 {
@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, 134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, 135 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, 136 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, 138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } 139 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
140}; 140};
@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1249 1249
1250/** 1250/**
1251 * ipr_log_hex_data - Log additional hex IOA error data. 1251 * ipr_log_hex_data - Log additional hex IOA error data.
1252 * @ioa_cfg: ioa config struct
1252 * @data: IOA error data 1253 * @data: IOA error data
1253 * @len: data length 1254 * @len: data length
1254 * 1255 *
1255 * Return value: 1256 * Return value:
1256 * none 1257 * none
1257 **/ 1258 **/
1258static void ipr_log_hex_data(u32 *data, int len) 1259static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1259{ 1260{
1260 int i; 1261 int i;
1261 1262
1262 if (len == 0) 1263 if (len == 0)
1263 return; 1264 return;
1264 1265
1266 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1267 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1268
1265 for (i = 0; i < len / 4; i += 4) { 1269 for (i = 0; i < len / 4; i += 4) {
1266 ipr_err("%08X: %08X %08X %08X %08X\n", i*4, 1270 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1267 be32_to_cpu(data[i]), 1271 be32_to_cpu(data[i]),
@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1290 ipr_err("%s\n", error->failure_reason); 1294 ipr_err("%s\n", error->failure_reason);
1291 ipr_err("Remote Adapter VPD:\n"); 1295 ipr_err("Remote Adapter VPD:\n");
1292 ipr_log_ext_vpd(&error->vpd); 1296 ipr_log_ext_vpd(&error->vpd);
1293 ipr_log_hex_data(error->data, 1297 ipr_log_hex_data(ioa_cfg, error->data,
1294 be32_to_cpu(hostrcb->hcam.length) - 1298 be32_to_cpu(hostrcb->hcam.length) -
1295 (offsetof(struct ipr_hostrcb_error, u) + 1299 (offsetof(struct ipr_hostrcb_error, u) +
1296 offsetof(struct ipr_hostrcb_type_17_error, data))); 1300 offsetof(struct ipr_hostrcb_type_17_error, data)));
@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1315 ipr_err("%s\n", error->failure_reason); 1319 ipr_err("%s\n", error->failure_reason);
1316 ipr_err("Remote Adapter VPD:\n"); 1320 ipr_err("Remote Adapter VPD:\n");
1317 ipr_log_vpd(&error->vpd); 1321 ipr_log_vpd(&error->vpd);
1318 ipr_log_hex_data(error->data, 1322 ipr_log_hex_data(ioa_cfg, error->data,
1319 be32_to_cpu(hostrcb->hcam.length) - 1323 be32_to_cpu(hostrcb->hcam.length) -
1320 (offsetof(struct ipr_hostrcb_error, u) + 1324 (offsetof(struct ipr_hostrcb_error, u) +
1321 offsetof(struct ipr_hostrcb_type_07_error, data))); 1325 offsetof(struct ipr_hostrcb_type_07_error, data)));
1322} 1326}
1323 1327
1328static const struct {
1329 u8 active;
1330 char *desc;
1331} path_active_desc[] = {
1332 { IPR_PATH_NO_INFO, "Path" },
1333 { IPR_PATH_ACTIVE, "Active path" },
1334 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1335};
1336
1337static const struct {
1338 u8 state;
1339 char *desc;
1340} path_state_desc[] = {
1341 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1342 { IPR_PATH_HEALTHY, "is healthy" },
1343 { IPR_PATH_DEGRADED, "is degraded" },
1344 { IPR_PATH_FAILED, "is failed" }
1345};
1346
1347/**
1348 * ipr_log_fabric_path - Log a fabric path error
1349 * @hostrcb: hostrcb struct
1350 * @fabric: fabric descriptor
1351 *
1352 * Return value:
1353 * none
1354 **/
1355static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1356 struct ipr_hostrcb_fabric_desc *fabric)
1357{
1358 int i, j;
1359 u8 path_state = fabric->path_state;
1360 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1361 u8 state = path_state & IPR_PATH_STATE_MASK;
1362
1363 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1364 if (path_active_desc[i].active != active)
1365 continue;
1366
1367 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1368 if (path_state_desc[j].state != state)
1369 continue;
1370
1371 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1372 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1373 path_active_desc[i].desc, path_state_desc[j].desc,
1374 fabric->ioa_port);
1375 } else if (fabric->cascaded_expander == 0xff) {
1376 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1377 path_active_desc[i].desc, path_state_desc[j].desc,
1378 fabric->ioa_port, fabric->phy);
1379 } else if (fabric->phy == 0xff) {
1380 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1381 path_active_desc[i].desc, path_state_desc[j].desc,
1382 fabric->ioa_port, fabric->cascaded_expander);
1383 } else {
1384 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1385 path_active_desc[i].desc, path_state_desc[j].desc,
1386 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1387 }
1388 return;
1389 }
1390 }
1391
1392 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1393 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1394}
1395
1396static const struct {
1397 u8 type;
1398 char *desc;
1399} path_type_desc[] = {
1400 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1401 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1402 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1403 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1404};
1405
1406static const struct {
1407 u8 status;
1408 char *desc;
1409} path_status_desc[] = {
1410 { IPR_PATH_CFG_NO_PROB, "Functional" },
1411 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1412 { IPR_PATH_CFG_FAILED, "Failed" },
1413 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1414 { IPR_PATH_NOT_DETECTED, "Missing" },
1415 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1416};
1417
1418static const char *link_rate[] = {
1419 "unknown",
1420 "disabled",
1421 "phy reset problem",
1422 "spinup hold",
1423 "port selector",
1424 "unknown",
1425 "unknown",
1426 "unknown",
1427 "1.5Gbps",
1428 "3.0Gbps",
1429 "unknown",
1430 "unknown",
1431 "unknown",
1432 "unknown",
1433 "unknown",
1434 "unknown"
1435};
1436
1437/**
1438 * ipr_log_path_elem - Log a fabric path element.
1439 * @hostrcb: hostrcb struct
1440 * @cfg: fabric path element struct
1441 *
1442 * Return value:
1443 * none
1444 **/
1445static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1446 struct ipr_hostrcb_config_element *cfg)
1447{
1448 int i, j;
1449 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1450 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1451
1452 if (type == IPR_PATH_CFG_NOT_EXIST)
1453 return;
1454
1455 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1456 if (path_type_desc[i].type != type)
1457 continue;
1458
1459 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1460 if (path_status_desc[j].status != status)
1461 continue;
1462
1463 if (type == IPR_PATH_CFG_IOA_PORT) {
1464 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1465 path_status_desc[j].desc, path_type_desc[i].desc,
1466 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1467 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1468 } else {
1469 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1470 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1471 path_status_desc[j].desc, path_type_desc[i].desc,
1472 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1473 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1474 } else if (cfg->cascaded_expander == 0xff) {
1475 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1476 "WWN=%08X%08X\n", path_status_desc[j].desc,
1477 path_type_desc[i].desc, cfg->phy,
1478 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1479 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1480 } else if (cfg->phy == 0xff) {
1481 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1482 "WWN=%08X%08X\n", path_status_desc[j].desc,
1483 path_type_desc[i].desc, cfg->cascaded_expander,
1484 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1485 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1486 } else {
1487 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1488 "WWN=%08X%08X\n", path_status_desc[j].desc,
1489 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1490 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1491 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1492 }
1493 }
1494 return;
1495 }
1496 }
1497
1498 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1499 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1500 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1501 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1502}
1503
1504/**
1505 * ipr_log_fabric_error - Log a fabric error.
1506 * @ioa_cfg: ioa config struct
1507 * @hostrcb: hostrcb struct
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1513 struct ipr_hostrcb *hostrcb)
1514{
1515 struct ipr_hostrcb_type_20_error *error;
1516 struct ipr_hostrcb_fabric_desc *fabric;
1517 struct ipr_hostrcb_config_element *cfg;
1518 int i, add_len;
1519
1520 error = &hostrcb->hcam.u.error.u.type_20_error;
1521 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1522 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
1523
1524 add_len = be32_to_cpu(hostrcb->hcam.length) -
1525 (offsetof(struct ipr_hostrcb_error, u) +
1526 offsetof(struct ipr_hostrcb_type_20_error, desc));
1527
1528 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
1529 ipr_log_fabric_path(hostrcb, fabric);
1530 for_each_fabric_cfg(fabric, cfg)
1531 ipr_log_path_elem(hostrcb, cfg);
1532
1533 add_len -= be16_to_cpu(fabric->length);
1534 fabric = (struct ipr_hostrcb_fabric_desc *)
1535 ((unsigned long)fabric + be16_to_cpu(fabric->length));
1536 }
1537
1538 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
1539}
1540
1324/** 1541/**
1325 * ipr_log_generic_error - Log an adapter error. 1542 * ipr_log_generic_error - Log an adapter error.
1326 * @ioa_cfg: ioa config struct 1543 * @ioa_cfg: ioa config struct
@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1332static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, 1549static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1333 struct ipr_hostrcb *hostrcb) 1550 struct ipr_hostrcb *hostrcb)
1334{ 1551{
1335 ipr_log_hex_data(hostrcb->hcam.u.raw.data, 1552 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
1336 be32_to_cpu(hostrcb->hcam.length)); 1553 be32_to_cpu(hostrcb->hcam.length));
1337} 1554}
1338 1555
@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1394 if (!ipr_error_table[error_index].log_hcam) 1611 if (!ipr_error_table[error_index].log_hcam)
1395 return; 1612 return;
1396 1613
1397 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { 1614 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1398 ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1399 "%s\n", ipr_error_table[error_index].error);
1400 } else {
1401 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1402 ipr_error_table[error_index].error);
1403 }
1404 1615
1405 /* Set indication we have logged an error */ 1616 /* Set indication we have logged an error */
1406 ioa_cfg->errors_logged++; 1617 ioa_cfg->errors_logged++;
@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1437 case IPR_HOST_RCB_OVERLAY_ID_17: 1648 case IPR_HOST_RCB_OVERLAY_ID_17:
1438 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); 1649 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1439 break; 1650 break;
1651 case IPR_HOST_RCB_OVERLAY_ID_20:
1652 ipr_log_fabric_error(ioa_cfg, hostrcb);
1653 break;
1440 case IPR_HOST_RCB_OVERLAY_ID_1: 1654 case IPR_HOST_RCB_OVERLAY_ID_1:
1441 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 1655 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1442 default: 1656 default:
@@ -2093,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref)
2093 2307
2094/** 2308/**
2095 * ipr_worker_thread - Worker thread 2309 * ipr_worker_thread - Worker thread
2096 * @data: ioa config struct 2310 * @work: ioa config struct
2097 * 2311 *
2098 * Called at task level from a work thread. This function takes care 2312 * Called at task level from a work thread. This function takes care
2099 * of adding and removing device from the mid-layer as configuration 2313 * of adding and removing device from the mid-layer as configuration
@@ -2102,13 +2316,14 @@ static void ipr_release_dump(struct kref *kref)
2102 * Return value: 2316 * Return value:
2103 * nothing 2317 * nothing
2104 **/ 2318 **/
2105static void ipr_worker_thread(void *data) 2319static void ipr_worker_thread(struct work_struct *work)
2106{ 2320{
2107 unsigned long lock_flags; 2321 unsigned long lock_flags;
2108 struct ipr_resource_entry *res; 2322 struct ipr_resource_entry *res;
2109 struct scsi_device *sdev; 2323 struct scsi_device *sdev;
2110 struct ipr_dump *dump; 2324 struct ipr_dump *dump;
2111 struct ipr_ioa_cfg *ioa_cfg = data; 2325 struct ipr_ioa_cfg *ioa_cfg =
2326 container_of(work, struct ipr_ioa_cfg, work_q);
2112 u8 bus, target, lun; 2327 u8 bus, target, lun;
2113 int did_work; 2328 int did_work;
2114 2329
@@ -2969,7 +3184,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2969 struct ipr_dump *dump; 3184 struct ipr_dump *dump;
2970 unsigned long lock_flags = 0; 3185 unsigned long lock_flags = 0;
2971 3186
2972 ENTER;
2973 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); 3187 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2974 3188
2975 if (!dump) { 3189 if (!dump) {
@@ -2996,7 +3210,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2996 } 3210 }
2997 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998 3212
2999 LEAVE;
3000 return 0; 3213 return 0;
3001} 3214}
3002 3215
@@ -3573,6 +3786,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
3573 3786
3574 ENTER; 3787 ENTER;
3575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3788 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3789 while(ioa_cfg->in_reset_reload) {
3790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3792 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3793 }
3794
3576 res = sata_port->res; 3795 res = sata_port->res;
3577 if (res) { 3796 if (res) {
3578 rc = ipr_device_reset(ioa_cfg, res); 3797 rc = ipr_device_reset(ioa_cfg, res);
@@ -3636,6 +3855,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3636 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3855 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3637 if (ipr_cmd->scsi_cmd) 3856 if (ipr_cmd->scsi_cmd)
3638 ipr_cmd->done = ipr_scsi_eh_done; 3857 ipr_cmd->done = ipr_scsi_eh_done;
3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
3861 }
3639 } 3862 }
3640 } 3863 }
3641 3864
@@ -3770,7 +3993,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3770 */ 3993 */
3771 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 3994 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3772 return FAILED; 3995 return FAILED;
3773 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) 3996 if (!res || !ipr_is_gscsi(res))
3774 return FAILED; 3997 return FAILED;
3775 3998
3776 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 3999 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -4615,7 +4838,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4615 * Return value: 4838 * Return value:
4616 * 0 on success / other on failure 4839 * 0 on success / other on failure
4617 **/ 4840 **/
4618int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 4841static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4619{ 4842{
4620 struct ipr_resource_entry *res; 4843 struct ipr_resource_entry *res;
4621 4844
@@ -4648,40 +4871,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
4648 return buffer; 4871 return buffer;
4649} 4872}
4650 4873
4651/**
4652 * ipr_scsi_timed_out - Handle scsi command timeout
4653 * @scsi_cmd: scsi command struct
4654 *
4655 * Return value:
4656 * EH_NOT_HANDLED
4657 **/
4658enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
4659{
4660 struct ipr_ioa_cfg *ioa_cfg;
4661 struct ipr_cmnd *ipr_cmd;
4662 unsigned long flags;
4663
4664 ENTER;
4665 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4666 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4667
4668 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4669 if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
4670 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4671 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4672 break;
4673 }
4674 }
4675
4676 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4677 LEAVE;
4678 return EH_NOT_HANDLED;
4679}
4680
4681static struct scsi_transport_template ipr_transport_template = {
4682 .eh_timed_out = ipr_scsi_timed_out
4683};
4684
4685static struct scsi_host_template driver_template = { 4874static struct scsi_host_template driver_template = {
4686 .module = THIS_MODULE, 4875 .module = THIS_MODULE,
4687 .name = "IPR", 4876 .name = "IPR",
@@ -4776,6 +4965,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
4776 unsigned long flags; 4965 unsigned long flags;
4777 4966
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 4967 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4968 while(ioa_cfg->in_reset_reload) {
4969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
4970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4971 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4972 }
4973
4779 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4974 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4780 if (ipr_cmd->qc == qc) { 4975 if (ipr_cmd->qc == qc) {
4781 ipr_device_reset(ioa_cfg, sata_port->res); 4976 ipr_device_reset(ioa_cfg, sata_port->res);
@@ -6832,6 +7027,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6832 7027
6833 ioa_cfg->hostrcb[i]->hostrcb_dma = 7028 ioa_cfg->hostrcb[i]->hostrcb_dma =
6834 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); 7029 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
7030 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
6835 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); 7031 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6836 } 7032 }
6837 7033
@@ -6926,7 +7122,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6926 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 7122 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6927 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 7123 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6928 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 7124 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6929 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); 7125 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
6930 init_waitqueue_head(&ioa_cfg->reset_wait_q); 7126 init_waitqueue_head(&ioa_cfg->reset_wait_q);
6931 ioa_cfg->sdt_state = INACTIVE; 7127 ioa_cfg->sdt_state = INACTIVE;
6932 if (ipr_enable_cache) 7128 if (ipr_enable_cache)
@@ -7017,7 +7213,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7017 7213
7018 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 7214 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
7019 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 7215 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
7020 host->transportt = &ipr_transport_template;
7021 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 7216 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
7022 sata_port_info.flags, &ipr_sata_ops); 7217 sata_port_info.flags, &ipr_sata_ops);
7023 7218
@@ -7351,12 +7546,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7351 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7352 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7353 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7548 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7549 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7550 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7551 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7354 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7355 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
7356 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7554 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7357 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7555 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7358 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 7556 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
7359 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 7557 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
7560 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7561 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7562 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
7563 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
7566 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
7360 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7567 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7361 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 7568 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
7362 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7569 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@@ -7366,6 +7573,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7366 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7573 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7367 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 7574 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
7368 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 7575 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7576 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7577 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
7578 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
7369 { } 7579 { }
7370}; 7580};
7371MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7581MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6d035283af08..9f62a1d4d511 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.2.0" 40#define IPR_DRIVER_VERSION "2.3.0"
41#define IPR_DRIVER_DATE "(September 25, 2006)" 41#define IPR_DRIVER_DATE "(November 8, 2006)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -54,6 +54,8 @@
54 */ 54 */
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58
57#define IPR_SUBS_DEV_ID_2780 0x0264 59#define IPR_SUBS_DEV_ID_2780 0x0264
58#define IPR_SUBS_DEV_ID_5702 0x0266 60#define IPR_SUBS_DEV_ID_5702 0x0266
59#define IPR_SUBS_DEV_ID_5703 0x0278 61#define IPR_SUBS_DEV_ID_5703 0x0278
@@ -66,7 +68,11 @@
66#define IPR_SUBS_DEV_ID_571F 0x02D5 68#define IPR_SUBS_DEV_ID_571F 0x02D5
67#define IPR_SUBS_DEV_ID_572A 0x02C1 69#define IPR_SUBS_DEV_ID_572A 0x02C1
68#define IPR_SUBS_DEV_ID_572B 0x02C2 70#define IPR_SUBS_DEV_ID_572B 0x02C2
71#define IPR_SUBS_DEV_ID_572F 0x02C3
69#define IPR_SUBS_DEV_ID_575B 0x030D 72#define IPR_SUBS_DEV_ID_575B 0x030D
73#define IPR_SUBS_DEV_ID_575C 0x0338
74#define IPR_SUBS_DEV_ID_57B7 0x0360
75#define IPR_SUBS_DEV_ID_57B8 0x02C2
70 76
71#define IPR_NAME "ipr" 77#define IPR_NAME "ipr"
72 78
@@ -98,6 +104,7 @@
98#define IPR_IOASC_IOA_WAS_RESET 0x10000001 104#define IPR_IOASC_IOA_WAS_RESET 0x10000001
99#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 105#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
100 106
107#define IPR_DEFAULT_MAX_ERROR_DUMP 984
101#define IPR_NUM_LOG_HCAMS 2 108#define IPR_NUM_LOG_HCAMS 2
102#define IPR_NUM_CFG_CHG_HCAMS 2 109#define IPR_NUM_CFG_CHG_HCAMS 2
103#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 110#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
@@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error {
731 u32 data[476]; 738 u32 data[476];
732}__attribute__((packed, aligned (4))); 739}__attribute__((packed, aligned (4)));
733 740
741struct ipr_hostrcb_config_element {
742 u8 type_status;
743#define IPR_PATH_CFG_TYPE_MASK 0xF0
744#define IPR_PATH_CFG_NOT_EXIST 0x00
745#define IPR_PATH_CFG_IOA_PORT 0x10
746#define IPR_PATH_CFG_EXP_PORT 0x20
747#define IPR_PATH_CFG_DEVICE_PORT 0x30
748#define IPR_PATH_CFG_DEVICE_LUN 0x40
749
750#define IPR_PATH_CFG_STATUS_MASK 0x0F
751#define IPR_PATH_CFG_NO_PROB 0x00
752#define IPR_PATH_CFG_DEGRADED 0x01
753#define IPR_PATH_CFG_FAILED 0x02
754#define IPR_PATH_CFG_SUSPECT 0x03
755#define IPR_PATH_NOT_DETECTED 0x04
756#define IPR_PATH_INCORRECT_CONN 0x05
757
758 u8 cascaded_expander;
759 u8 phy;
760 u8 link_rate;
761#define IPR_PHY_LINK_RATE_MASK 0x0F
762
763 __be32 wwid[2];
764}__attribute__((packed, aligned (4)));
765
766struct ipr_hostrcb_fabric_desc {
767 __be16 length;
768 u8 ioa_port;
769 u8 cascaded_expander;
770 u8 phy;
771 u8 path_state;
772#define IPR_PATH_ACTIVE_MASK 0xC0
773#define IPR_PATH_NO_INFO 0x00
774#define IPR_PATH_ACTIVE 0x40
775#define IPR_PATH_NOT_ACTIVE 0x80
776
777#define IPR_PATH_STATE_MASK 0x0F
778#define IPR_PATH_STATE_NO_INFO 0x00
779#define IPR_PATH_HEALTHY 0x01
780#define IPR_PATH_DEGRADED 0x02
781#define IPR_PATH_FAILED 0x03
782
783 __be16 num_entries;
784 struct ipr_hostrcb_config_element elem[1];
785}__attribute__((packed, aligned (4)));
786
787#define for_each_fabric_cfg(fabric, cfg) \
788 for (cfg = (fabric)->elem; \
789 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
790 cfg++)
791
792struct ipr_hostrcb_type_20_error {
793 u8 failure_reason[64];
794 u8 reserved[3];
795 u8 num_entries;
796 struct ipr_hostrcb_fabric_desc desc[1];
797}__attribute__((packed, aligned (4)));
798
734struct ipr_hostrcb_error { 799struct ipr_hostrcb_error {
735 __be32 failing_dev_ioasc; 800 __be32 failing_dev_ioasc;
736 struct ipr_res_addr failing_dev_res_addr; 801 struct ipr_res_addr failing_dev_res_addr;
@@ -747,6 +812,7 @@ struct ipr_hostrcb_error {
747 struct ipr_hostrcb_type_13_error type_13_error; 812 struct ipr_hostrcb_type_13_error type_13_error;
748 struct ipr_hostrcb_type_14_error type_14_error; 813 struct ipr_hostrcb_type_14_error type_14_error;
749 struct ipr_hostrcb_type_17_error type_17_error; 814 struct ipr_hostrcb_type_17_error type_17_error;
815 struct ipr_hostrcb_type_20_error type_20_error;
750 } u; 816 } u;
751}__attribute__((packed, aligned (4))); 817}__attribute__((packed, aligned (4)));
752 818
@@ -786,6 +852,7 @@ struct ipr_hcam {
786#define IPR_HOST_RCB_OVERLAY_ID_14 0x14 852#define IPR_HOST_RCB_OVERLAY_ID_14 0x14
787#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 853#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
788#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 854#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
855#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
789#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 856#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
790 857
791 u8 reserved1[3]; 858 u8 reserved1[3];
@@ -805,6 +872,7 @@ struct ipr_hostrcb {
805 struct ipr_hcam hcam; 872 struct ipr_hcam hcam;
806 dma_addr_t hostrcb_dma; 873 dma_addr_t hostrcb_dma;
807 struct list_head queue; 874 struct list_head queue;
875 struct ipr_ioa_cfg *ioa_cfg;
808}; 876};
809 877
810/* IPR smart dump table structures */ 878/* IPR smart dump table structures */
@@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header {
1283 } \ 1351 } \
1284} 1352}
1285 1353
1354#define ipr_hcam_err(hostrcb, fmt, ...) \
1355{ \
1356 if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
1357 ipr_ra_err((hostrcb)->ioa_cfg, \
1358 (hostrcb)->hcam.u.error.failing_dev_res_addr, \
1359 fmt, ##__VA_ARGS__); \
1360 } else { \
1361 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
1362 } \
1363}
1364
1286#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1365#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
1287 __FILE__, __FUNCTION__, __LINE__) 1366 __FILE__, __FUNCTION__, __LINE__)
1288 1367
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index f06a06ae6092..8b704f73055a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha)
5001 break; 5001 break;
5002 5002
5003 /* Delay for 1 Second */ 5003 /* Delay for 1 Second */
5004 msleep(IPS_ONE_SEC); 5004 MDELAY(IPS_ONE_SEC);
5005 } 5005 }
5006 5006
5007 if (j >= 45) 5007 if (j >= 45)
@@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha)
5027 break; 5027 break;
5028 5028
5029 /* Delay for 1 Second */ 5029 /* Delay for 1 Second */
5030 msleep(IPS_ONE_SEC); 5030 MDELAY(IPS_ONE_SEC);
5031 } 5031 }
5032 5032
5033 if (j >= 240) 5033 if (j >= 240)
@@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha)
5045 break; 5045 break;
5046 5046
5047 /* Delay for 1 Second */ 5047 /* Delay for 1 Second */
5048 msleep(IPS_ONE_SEC); 5048 MDELAY(IPS_ONE_SEC);
5049 } 5049 }
5050 5050
5051 if (i >= 240) 5051 if (i >= 240)
@@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5095 break; 5095 break;
5096 5096
5097 /* Delay for 1 Second */ 5097 /* Delay for 1 Second */
5098 msleep(IPS_ONE_SEC); 5098 MDELAY(IPS_ONE_SEC);
5099 } 5099 }
5100 5100
5101 if (j >= 45) 5101 if (j >= 45)
@@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5121 break; 5121 break;
5122 5122
5123 /* Delay for 1 Second */ 5123 /* Delay for 1 Second */
5124 msleep(IPS_ONE_SEC); 5124 MDELAY(IPS_ONE_SEC);
5125 } 5125 }
5126 5126
5127 if (j >= 240) 5127 if (j >= 240)
@@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
5139 break; 5139 break;
5140 5140
5141 /* Delay for 1 Second */ 5141 /* Delay for 1 Second */
5142 msleep(IPS_ONE_SEC); 5142 MDELAY(IPS_ONE_SEC);
5143 } 5143 }
5144 5144
5145 if (i >= 240) 5145 if (i >= 240)
@@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha)
5191 break; 5191 break;
5192 5192
5193 /* Delay for 1 Second */ 5193 /* Delay for 1 Second */
5194 msleep(IPS_ONE_SEC); 5194 MDELAY(IPS_ONE_SEC);
5195 } 5195 }
5196 5196
5197 if (i >= 45) { 5197 if (i >= 45) {
@@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha)
5217 if (Post != 0x4F00) 5217 if (Post != 0x4F00)
5218 break; 5218 break;
5219 /* Delay for 1 Second */ 5219 /* Delay for 1 Second */
5220 msleep(IPS_ONE_SEC); 5220 MDELAY(IPS_ONE_SEC);
5221 } 5221 }
5222 5222
5223 if (i >= 120) { 5223 if (i >= 120) {
@@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha)
5247 break; 5247 break;
5248 5248
5249 /* Delay for 1 Second */ 5249 /* Delay for 1 Second */
5250 msleep(IPS_ONE_SEC); 5250 MDELAY(IPS_ONE_SEC);
5251 } 5251 }
5252 5252
5253 if (i >= 240) { 5253 if (i >= 240) {
@@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha)
5307 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); 5307 outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
5308 5308
5309 /* Delay for 1 Second */ 5309 /* Delay for 1 Second */
5310 msleep(IPS_ONE_SEC); 5310 MDELAY(IPS_ONE_SEC);
5311 5311
5312 outb(0, ha->io_addr + IPS_REG_SCPR); 5312 outb(0, ha->io_addr + IPS_REG_SCPR);
5313 5313
5314 /* Delay for 1 Second */ 5314 /* Delay for 1 Second */
5315 msleep(IPS_ONE_SEC); 5315 MDELAY(IPS_ONE_SEC);
5316 5316
5317 if ((*ha->func.init) (ha)) 5317 if ((*ha->func.init) (ha))
5318 break; 5318 break;
@@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
5352 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); 5352 writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
5353 5353
5354 /* Delay for 1 Second */ 5354 /* Delay for 1 Second */
5355 msleep(IPS_ONE_SEC); 5355 MDELAY(IPS_ONE_SEC);
5356 5356
5357 writeb(0, ha->mem_ptr + IPS_REG_SCPR); 5357 writeb(0, ha->mem_ptr + IPS_REG_SCPR);
5358 5358
5359 /* Delay for 1 Second */ 5359 /* Delay for 1 Second */
5360 msleep(IPS_ONE_SEC); 5360 MDELAY(IPS_ONE_SEC);
5361 5361
5362 if ((*ha->func.init) (ha)) 5362 if ((*ha->func.init) (ha))
5363 break; 5363 break;
@@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha)
5398 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); 5398 writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
5399 5399
5400 /* Delay for 5 Seconds */ 5400 /* Delay for 5 Seconds */
5401 msleep(5 * IPS_ONE_SEC); 5401 MDELAY(5 * IPS_ONE_SEC);
5402 5402
5403 /* Do a PCI config read to wait for adapter */ 5403 /* Do a PCI config read to wait for adapter */
5404 pci_read_config_byte(ha->pcidev, 4, &junk); 5404 pci_read_config_byte(ha->pcidev, 4, &junk);
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 34680f3dd452..b726dcc424b1 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -51,6 +51,7 @@
51 #define _IPS_H_ 51 #define _IPS_H_
52 52
53#include <linux/version.h> 53#include <linux/version.h>
54#include <linux/nmi.h>
54 #include <asm/uaccess.h> 55 #include <asm/uaccess.h>
55 #include <asm/io.h> 56 #include <asm/io.h>
56 57
@@ -116,9 +117,11 @@
116 dev_printk(level , &((pcidev)->dev) , format , ## arg) 117 dev_printk(level , &((pcidev)->dev) , format , ## arg)
117 #endif 118 #endif
118 119
119 #ifndef MDELAY 120 #define MDELAY(n) \
120 #define MDELAY mdelay 121 do { \
121 #endif 122 mdelay(n); \
123 touch_nmi_watchdog(); \
124 } while (0)
122 125
123 #ifndef min 126 #ifndef min
124 #define min(x,y) ((x) < (y) ? x : y) 127 #define min(x,y) ((x) < (y) ? x : y)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d8862189485..e11b23c641e2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@ again:
719 return rc; 719 return rc;
720} 720}
721 721
722static void iscsi_xmitworker(void *data) 722static void iscsi_xmitworker(struct work_struct *work)
723{ 723{
724 struct iscsi_conn *conn = data; 724 struct iscsi_conn *conn =
725 container_of(work, struct iscsi_conn, xmitwork);
725 int rc; 726 int rc;
726 /* 727 /*
727 * serialize Xmit worker on a per-connection basis. 728 * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1512 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1513 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
1513 goto mgmtqueue_alloc_fail; 1514 goto mgmtqueue_alloc_fail;
1514 1515
1515 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); 1516 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1516 1517
1517 /* allocate login_mtask used for the login/text sequences */ 1518 /* allocate login_mtask used for the login/text sequences */
1518 spin_lock_bh(&session->lock); 1519 spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd492d8d..fb7df7b75811 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
647 * Discover process only interrogates devices in order to discover the 647 * Discover process only interrogates devices in order to discover the
648 * domain. 648 * domain.
649 */ 649 */
650static void sas_discover_domain(void *data) 650static void sas_discover_domain(struct work_struct *work)
651{ 651{
652 int error = 0; 652 int error = 0;
653 struct asd_sas_port *port = data; 653 struct sas_discovery_event *ev =
654 container_of(work, struct sas_discovery_event, work);
655 struct asd_sas_port *port = ev->port;
654 656
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, 657 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending); 658 &port->disc.pending);
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data)
692 current->pid, error); 694 current->pid, error);
693} 695}
694 696
695static void sas_revalidate_domain(void *data) 697static void sas_revalidate_domain(struct work_struct *work)
696{ 698{
697 int res = 0; 699 int res = 0;
698 struct asd_sas_port *port = data; 700 struct sas_discovery_event *ev =
701 container_of(work, struct sas_discovery_event, work);
702 struct asd_sas_port *port = ev->port;
699 703
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, 704 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending); 705 &port->disc.pending);
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
722 BUG_ON(ev >= DISC_NUM_EVENTS); 726 BUG_ON(ev >= DISC_NUM_EVENTS);
723 727
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 728 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost); 729 &disc->disc_work[ev].work, port->ha->core.shost);
726 730
727 return 0; 731 return 0;
728} 732}
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{ 741{
738 int i; 742 int i;
739 743
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { 744 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 745 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 746 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 }; 747 };
744 748
745 spin_lock_init(&disc->disc_event_lock); 749 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0; 750 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++) 751 for (i = 0; i < DISC_NUM_EVENTS; i++) {
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); 752 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
753 disc->disc_work[i].port = port;
754 }
749} 755}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed1c89c..d83392ee6823 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
31 BUG_ON(event >= HA_NUM_EVENTS); 31 BUG_ON(event >= HA_NUM_EVENTS);
32 32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost); 34 &sas_ha->ha_events[event].work, sas_ha->core.shost);
35} 35}
36 36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) 37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
41 BUG_ON(event >= PORT_NUM_EVENTS); 41 BUG_ON(event >= PORT_NUM_EVENTS);
42 42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost); 44 &phy->port_events[event].work, ha->core.shost);
45} 45}
46 46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
51 BUG_ON(event >= PHY_NUM_EVENTS); 51 BUG_ON(event >= PHY_NUM_EVENTS);
52 52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost); 54 &phy->phy_events[event].work, ha->core.shost);
55} 55}
56 56
57int sas_init_events(struct sas_ha_struct *sas_ha) 57int sas_init_events(struct sas_ha_struct *sas_ha)
58{ 58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { 59 static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
60 [HAE_RESET] = sas_hae_reset, 60 [HAE_RESET] = sas_hae_reset,
61 }; 61 };
62 62
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
64 64
65 spin_lock_init(&sas_ha->event_lock); 65 spin_lock_init(&sas_ha->event_lock);
66 66
67 for (i = 0; i < HA_NUM_EVENTS; i++) 67 for (i = 0; i < HA_NUM_EVENTS; i++) {
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); 68 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
69 sas_ha->ha_events[i].ha = sas_ha;
70 }
69 71
70 sas_ha->notify_ha_event = notify_ha_event; 72 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event; 73 sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e34a93435497..d31e6fa466f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev(
597 child->iproto = phy->attached_iproto; 597 child->iproto = phy->attached_iproto;
598 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); 598 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
599 sas_hash_addr(child->hashed_sas_addr, child->sas_addr); 599 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
600 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); 600 if (!phy->port) {
601 BUG_ON(!phy->port); 601 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
602 /* FIXME: better error handling*/ 602 if (unlikely(!phy->port))
603 BUG_ON(sas_port_add(phy->port) != 0); 603 goto out_err;
604 if (unlikely(sas_port_add(phy->port) != 0)) {
605 sas_port_free(phy->port);
606 goto out_err;
607 }
608 }
604 sas_ex_get_linkrate(parent, child, phy); 609 sas_ex_get_linkrate(parent, child, phy);
605 610
606 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { 611 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
@@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev(
615 SAS_DPRINTK("report phy sata to %016llx:0x%x returned " 620 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
616 "0x%x\n", SAS_ADDR(parent->sas_addr), 621 "0x%x\n", SAS_ADDR(parent->sas_addr),
617 phy_id, res); 622 phy_id, res);
618 kfree(child); 623 goto out_free;
619 return NULL;
620 } 624 }
621 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, 625 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
622 sizeof(struct dev_to_host_fis)); 626 sizeof(struct dev_to_host_fis));
@@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev(
627 "%016llx:0x%x returned 0x%x\n", 631 "%016llx:0x%x returned 0x%x\n",
628 SAS_ADDR(child->sas_addr), 632 SAS_ADDR(child->sas_addr),
629 SAS_ADDR(parent->sas_addr), phy_id, res); 633 SAS_ADDR(parent->sas_addr), phy_id, res);
630 kfree(child); 634 goto out_free;
631 return NULL;
632 } 635 }
633 } else if (phy->attached_tproto & SAS_PROTO_SSP) { 636 } else if (phy->attached_tproto & SAS_PROTO_SSP) {
634 child->dev_type = SAS_END_DEV; 637 child->dev_type = SAS_END_DEV;
635 rphy = sas_end_device_alloc(phy->port); 638 rphy = sas_end_device_alloc(phy->port);
636 /* FIXME: error handling */ 639 /* FIXME: error handling */
637 BUG_ON(!rphy); 640 if (unlikely(!rphy))
641 goto out_free;
638 child->tproto = phy->attached_tproto; 642 child->tproto = phy->attached_tproto;
639 sas_init_dev(child); 643 sas_init_dev(child);
640 644
@@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev(
651 "at %016llx:0x%x returned 0x%x\n", 655 "at %016llx:0x%x returned 0x%x\n",
652 SAS_ADDR(child->sas_addr), 656 SAS_ADDR(child->sas_addr),
653 SAS_ADDR(parent->sas_addr), phy_id, res); 657 SAS_ADDR(parent->sas_addr), phy_id, res);
654 /* FIXME: this kfrees list elements without removing them */ 658 goto out_list_del;
655 //kfree(child);
656 return NULL;
657 } 659 }
658 } else { 660 } else {
659 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", 661 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
@@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev(
663 665
664 list_add_tail(&child->siblings, &parent_ex->children); 666 list_add_tail(&child->siblings, &parent_ex->children);
665 return child; 667 return child;
668
669 out_list_del:
670 list_del(&child->dev_list_node);
671 sas_rphy_free(rphy);
672 out_free:
673 sas_port_delete(phy->port);
674 out_err:
675 phy->port = NULL;
676 kfree(child);
677 return NULL;
666} 678}
667 679
668static struct domain_device *sas_ex_discover_expander( 680static struct domain_device *sas_ex_discover_expander(
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index c836a237fb79..d65bc4e0f214 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
65 65
66/* ---------- HA events ---------- */ 66/* ---------- HA events ---------- */
67 67
68void sas_hae_reset(void *data) 68void sas_hae_reset(struct work_struct *work)
69{ 69{
70 struct sas_ha_struct *ha = data; 70 struct sas_ha_event *ev =
71 container_of(work, struct sas_ha_event, work);
72 struct sas_ha_struct *ha = ev->ha;
71 73
72 sas_begin_event(HAE_RESET, &ha->event_lock, 74 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending); 75 &ha->pending);
@@ -112,6 +114,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
112 } 114 }
113 } 115 }
114 116
117 INIT_LIST_HEAD(&sas_ha->eh_done_q);
118
115 return 0; 119 return 0;
116 120
117Undo_ports: 121Undo_ports:
@@ -142,7 +146,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
142 return sas_smp_get_phy_events(phy); 146 return sas_smp_get_phy_events(phy);
143} 147}
144 148
145static int sas_phy_reset(struct sas_phy *phy, int hard_reset) 149int sas_phy_reset(struct sas_phy *phy, int hard_reset)
146{ 150{
147 int ret; 151 int ret;
148 enum phy_func reset_type; 152 enum phy_func reset_type;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee474921..137d7e496b6d 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60 60
61void sas_deform_port(struct asd_sas_phy *phy); 61void sas_deform_port(struct asd_sas_phy *phy);
62 62
63void sas_porte_bytes_dmaed(void *); 63void sas_porte_bytes_dmaed(struct work_struct *work);
64void sas_porte_broadcast_rcvd(void *); 64void sas_porte_broadcast_rcvd(struct work_struct *work);
65void sas_porte_link_reset_err(void *); 65void sas_porte_link_reset_err(struct work_struct *work);
66void sas_porte_timer_event(void *); 66void sas_porte_timer_event(struct work_struct *work);
67void sas_porte_hard_reset(void *); 67void sas_porte_hard_reset(struct work_struct *work);
68 68
69int sas_notify_lldd_dev_found(struct domain_device *); 69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *); 70void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy);
75 75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77 77
78void sas_hae_reset(void *); 78void sas_hae_reset(struct work_struct *work);
79 79
80static inline void sas_queue_event(int event, spinlock_t *lock, 80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending, 81 unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdbae4a3..b459c4b635b1 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
30 30
31/* ---------- Phy events ---------- */ 31/* ---------- Phy events ---------- */
32 32
33static void sas_phye_loss_of_signal(void *data) 33static void sas_phye_loss_of_signal(struct work_struct *work)
34{ 34{
35 struct asd_sas_phy *phy = data; 35 struct asd_sas_event *ev =
36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy;
36 38
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending); 40 &phy->phy_events_pending);
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data)
40 sas_deform_port(phy); 42 sas_deform_port(phy);
41} 43}
42 44
43static void sas_phye_oob_done(void *data) 45static void sas_phye_oob_done(struct work_struct *work)
44{ 46{
45 struct asd_sas_phy *phy = data; 47 struct asd_sas_event *ev =
48 container_of(work, struct asd_sas_event, work);
49 struct asd_sas_phy *phy = ev->phy;
46 50
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, 51 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending); 52 &phy->phy_events_pending);
49 phy->error = 0; 53 phy->error = 0;
50} 54}
51 55
52static void sas_phye_oob_error(void *data) 56static void sas_phye_oob_error(struct work_struct *work)
53{ 57{
54 struct asd_sas_phy *phy = data; 58 struct asd_sas_event *ev =
59 container_of(work, struct asd_sas_event, work);
60 struct asd_sas_phy *phy = ev->phy;
55 struct sas_ha_struct *sas_ha = phy->ha; 61 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port; 62 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i = 63 struct sas_internal *i =
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data)
80 } 86 }
81} 87}
82 88
83static void sas_phye_spinup_hold(void *data) 89static void sas_phye_spinup_hold(struct work_struct *work)
84{ 90{
85 struct asd_sas_phy *phy = data; 91 struct asd_sas_event *ev =
92 container_of(work, struct asd_sas_event, work);
93 struct asd_sas_phy *phy = ev->phy;
86 struct sas_ha_struct *sas_ha = phy->ha; 94 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i = 95 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt); 96 to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
100{ 108{
101 int i; 109 int i;
102 110
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { 111 static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, 112 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done, 113 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error, 114 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, 115 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 }; 116 };
109 117
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { 118 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, 119 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, 120 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, 121 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
122 130
123 phy->error = 0; 131 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el); 132 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++) 133 for (k = 0; k < PORT_NUM_EVENTS; k++) {
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], 134 INIT_WORK(&phy->port_events[k].work,
127 phy); 135 sas_port_event_fns[k]);
136 phy->port_events[k].phy = phy;
137 }
138
139 for (k = 0; k < PHY_NUM_EVENTS; k++) {
140 INIT_WORK(&phy->phy_events[k].work,
141 sas_phy_event_fns[k]);
142 phy->phy_events[k].phy = phy;
143 }
128 144
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL; 145 phy->port = NULL;
133 phy->ha = sas_ha; 146 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock); 147 spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf306a2..971c37ceecb4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy)
181 181
182/* ---------- SAS port events ---------- */ 182/* ---------- SAS port events ---------- */
183 183
184void sas_porte_bytes_dmaed(void *data) 184void sas_porte_bytes_dmaed(struct work_struct *work)
185{ 185{
186 struct asd_sas_phy *phy = data; 186 struct asd_sas_event *ev =
187 container_of(work, struct asd_sas_event, work);
188 struct asd_sas_phy *phy = ev->phy;
187 189
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, 190 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending); 191 &phy->port_events_pending);
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data)
191 sas_form_port(phy); 193 sas_form_port(phy);
192} 194}
193 195
194void sas_porte_broadcast_rcvd(void *data) 196void sas_porte_broadcast_rcvd(struct work_struct *work)
195{ 197{
198 struct asd_sas_event *ev =
199 container_of(work, struct asd_sas_event, work);
200 struct asd_sas_phy *phy = ev->phy;
196 unsigned long flags; 201 unsigned long flags;
197 u32 prim; 202 u32 prim;
198 struct asd_sas_phy *phy = data;
199 203
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, 204 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending); 205 &phy->port_events_pending);
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data)
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 212 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209} 213}
210 214
211void sas_porte_link_reset_err(void *data) 215void sas_porte_link_reset_err(struct work_struct *work)
212{ 216{
213 struct asd_sas_phy *phy = data; 217 struct asd_sas_event *ev =
218 container_of(work, struct asd_sas_event, work);
219 struct asd_sas_phy *phy = ev->phy;
214 220
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 221 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending); 222 &phy->port_events_pending);
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data)
218 sas_deform_port(phy); 224 sas_deform_port(phy);
219} 225}
220 226
221void sas_porte_timer_event(void *data) 227void sas_porte_timer_event(struct work_struct *work)
222{ 228{
223 struct asd_sas_phy *phy = data; 229 struct asd_sas_event *ev =
230 container_of(work, struct asd_sas_event, work);
231 struct asd_sas_phy *phy = ev->phy;
224 232
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 233 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending); 234 &phy->port_events_pending);
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data)
228 sas_deform_port(phy); 236 sas_deform_port(phy);
229} 237}
230 238
231void sas_porte_hard_reset(void *data) 239void sas_porte_hard_reset(struct work_struct *work)
232{ 240{
233 struct asd_sas_phy *phy = data; 241 struct asd_sas_event *ev =
242 container_of(work, struct asd_sas_event, work);
243 struct asd_sas_phy *phy = ev->phy;
234 244
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 245 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending); 246 &phy->port_events_pending);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index e46e79355b77..22672d54aa27 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -29,9 +29,11 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h> 31#include <scsi/scsi.h>
32#include <scsi/scsi_eh.h>
32#include <scsi/scsi_transport.h> 33#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h> 34#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h" 35#include "../scsi_sas_internal.h"
36#include "../scsi_transport_api.h"
35 37
36#include <linux/err.h> 38#include <linux/err.h>
37#include <linux/blkdev.h> 39#include <linux/blkdev.h>
@@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task)
46{ 48{
47 struct task_status_struct *ts = &task->task_status; 49 struct task_status_struct *ts = &task->task_status;
48 struct scsi_cmnd *sc = task->uldd_task; 50 struct scsi_cmnd *sc = task->uldd_task;
51 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
49 unsigned ts_flags = task->task_state_flags; 52 unsigned ts_flags = task->task_state_flags;
50 int hs = 0, stat = 0; 53 int hs = 0, stat = 0;
51 54
@@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task)
116 sas_free_task(task); 119 sas_free_task(task);
117 /* This is very ugly but this is how SCSI Core works. */ 120 /* This is very ugly but this is how SCSI Core works. */
118 if (ts_flags & SAS_TASK_STATE_ABORTED) 121 if (ts_flags & SAS_TASK_STATE_ABORTED)
119 scsi_finish_command(sc); 122 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
120 else 123 else
121 sc->scsi_done(sc); 124 sc->scsi_done(sc);
122} 125}
@@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
307 spin_unlock_irqrestore(&core->task_queue_lock, flags); 310 spin_unlock_irqrestore(&core->task_queue_lock, flags);
308 } 311 }
309 312
313 spin_lock_irqsave(&task->task_state_lock, flags);
314 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
315 spin_unlock_irqrestore(&task->task_state_lock, flags);
316 SAS_DPRINTK("%s: task 0x%p already aborted\n",
317 __FUNCTION__, task);
318 return TASK_IS_ABORTED;
319 }
320 spin_unlock_irqrestore(&task->task_state_lock, flags);
321
310 for (i = 0; i < 5; i++) { 322 for (i = 0; i < 5; i++) {
311 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 323 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
312 res = si->dft->lldd_abort_task(task); 324 res = si->dft->lldd_abort_task(task);
@@ -409,13 +421,16 @@ Again:
409 SAS_DPRINTK("going over list...\n"); 421 SAS_DPRINTK("going over list...\n");
410 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { 422 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
411 struct sas_task *task = TO_SAS_TASK(cmd); 423 struct sas_task *task = TO_SAS_TASK(cmd);
424 list_del_init(&cmd->eh_entry);
412 425
426 if (!task) {
427 SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
428 continue;
429 }
413 SAS_DPRINTK("trying to find task 0x%p\n", task); 430 SAS_DPRINTK("trying to find task 0x%p\n", task);
414 list_del_init(&cmd->eh_entry);
415 res = sas_scsi_find_task(task); 431 res = sas_scsi_find_task(task);
416 432
417 cmd->eh_eflags = 0; 433 cmd->eh_eflags = 0;
418 shost->host_failed--;
419 434
420 switch (res) { 435 switch (res) {
421 case TASK_IS_DONE: 436 case TASK_IS_DONE:
@@ -491,6 +506,7 @@ Again:
491 } 506 }
492 } 507 }
493out: 508out:
509 scsi_eh_flush_done_q(&ha->eh_done_q);
494 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); 510 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
495 return; 511 return;
496clear_q: 512clear_q:
@@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
508 unsigned long flags; 524 unsigned long flags;
509 525
510 if (!task) { 526 if (!task) {
511 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 527 SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
512 cmd, task); 528 cmd, task);
513 return EH_HANDLED; 529 return EH_HANDLED;
514 } 530 }
515 531
516 spin_lock_irqsave(&task->task_state_lock, flags); 532 spin_lock_irqsave(&task->task_state_lock, flags);
533 if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
534 spin_unlock_irqrestore(&task->task_state_lock, flags);
535 SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
536 "EH_NOT_HANDLED\n", cmd, task);
537 return EH_NOT_HANDLED;
538 }
517 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 539 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
518 spin_unlock_irqrestore(&task->task_state_lock, flags); 540 spin_unlock_irqrestore(&task->task_state_lock, flags);
519 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 541 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
@@ -777,6 +799,66 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
777 spin_unlock_irqrestore(&core->task_queue_lock, flags); 799 spin_unlock_irqrestore(&core->task_queue_lock, flags);
778} 800}
779 801
802static int do_sas_task_abort(struct sas_task *task)
803{
804 struct scsi_cmnd *sc = task->uldd_task;
805 struct sas_internal *si =
806 to_sas_internal(task->dev->port->ha->core.shost->transportt);
807 unsigned long flags;
808 int res;
809
810 spin_lock_irqsave(&task->task_state_lock, flags);
811 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
812 spin_unlock_irqrestore(&task->task_state_lock, flags);
813 SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
814 task);
815 return 0;
816 }
817
818 task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
819 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
820 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
821 spin_unlock_irqrestore(&task->task_state_lock, flags);
822
823 if (!si->dft->lldd_abort_task)
824 return -ENODEV;
825
826 res = si->dft->lldd_abort_task(task);
827 if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
828 (res == TMF_RESP_FUNC_COMPLETE))
829 {
830 /* SMP commands don't have scsi_cmds(?) */
831 if (!sc) {
832 task->task_done(task);
833 return 0;
834 }
835 scsi_req_abort_cmd(sc);
836 scsi_schedule_eh(sc->device->host);
837 return 0;
838 }
839
840 spin_lock_irqsave(&task->task_state_lock, flags);
841 task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
842 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
843 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
844 spin_unlock_irqrestore(&task->task_state_lock, flags);
845
846 return -EAGAIN;
847}
848
849void sas_task_abort(struct work_struct *work)
850{
851 struct sas_task *task =
852 container_of(work, struct sas_task, abort_work);
853 int i;
854
855 for (i = 0; i < 5; i++)
856 if (!do_sas_task_abort(task))
857 return;
858
859 SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
860}
861
780EXPORT_SYMBOL_GPL(sas_queuecommand); 862EXPORT_SYMBOL_GPL(sas_queuecommand);
781EXPORT_SYMBOL_GPL(sas_target_alloc); 863EXPORT_SYMBOL_GPL(sas_target_alloc);
782EXPORT_SYMBOL_GPL(sas_slave_configure); 864EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -784,3 +866,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
784EXPORT_SYMBOL_GPL(sas_change_queue_depth); 866EXPORT_SYMBOL_GPL(sas_change_queue_depth);
785EXPORT_SYMBOL_GPL(sas_change_queue_type); 867EXPORT_SYMBOL_GPL(sas_change_queue_type);
786EXPORT_SYMBOL_GPL(sas_bios_param); 868EXPORT_SYMBOL_GPL(sas_bios_param);
869EXPORT_SYMBOL_GPL(sas_task_abort);
870EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
new file mode 100644
index 000000000000..89403b00e042
--- /dev/null
+++ b/drivers/scsi/libsrp.c
@@ -0,0 +1,441 @@
1/*
2 * SCSI RDAM Protocol lib functions
3 *
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21#include <linux/err.h>
22#include <linux/kfifo.h>
23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h>
25#include <linux/pci.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_tcq.h>
29#include <scsi/scsi_tgt.h>
30#include <scsi/srp.h>
31#include <scsi/libsrp.h>
32
33enum srp_task_attributes {
34 SRP_SIMPLE_TASK = 0,
35 SRP_HEAD_TASK = 1,
36 SRP_ORDERED_TASK = 2,
37 SRP_ACA_TASK = 4
38};
39
40/* tmp - will replace with SCSI logging stuff */
41#define eprintk(fmt, args...) \
42do { \
43 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
44} while (0)
45/* #define dprintk eprintk */
46#define dprintk(fmt, args...)
47
48static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
49 struct srp_buf **ring)
50{
51 int i;
52 struct iu_entry *iue;
53
54 q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
55 if (!q->pool)
56 return -ENOMEM;
57 q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
58 if (!q->items)
59 goto free_pool;
60
61 spin_lock_init(&q->lock);
62 q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
63 GFP_KERNEL, &q->lock);
64 if (IS_ERR(q->queue))
65 goto free_item;
66
67 for (i = 0, iue = q->items; i < max; i++) {
68 __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
69 iue->sbuf = ring[i];
70 iue++;
71 }
72 return 0;
73
74free_item:
75 kfree(q->items);
76free_pool:
77 kfree(q->pool);
78 return -ENOMEM;
79}
80
81static void srp_iu_pool_free(struct srp_queue *q)
82{
83 kfree(q->items);
84 kfree(q->pool);
85}
86
87static struct srp_buf **srp_ring_alloc(struct device *dev,
88 size_t max, size_t size)
89{
90 int i;
91 struct srp_buf **ring;
92
93 ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
94 if (!ring)
95 return NULL;
96
97 for (i = 0; i < max; i++) {
98 ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
99 if (!ring[i])
100 goto out;
101 ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
102 GFP_KERNEL);
103 if (!ring[i]->buf)
104 goto out;
105 }
106 return ring;
107
108out:
109 for (i = 0; i < max && ring[i]; i++) {
110 if (ring[i]->buf)
111 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
112 kfree(ring[i]);
113 }
114 kfree(ring);
115
116 return NULL;
117}
118
119static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
120 size_t size)
121{
122 int i;
123
124 for (i = 0; i < max; i++) {
125 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
126 kfree(ring[i]);
127 }
128}
129
130int srp_target_alloc(struct srp_target *target, struct device *dev,
131 size_t nr, size_t iu_size)
132{
133 int err;
134
135 spin_lock_init(&target->lock);
136 INIT_LIST_HEAD(&target->cmd_queue);
137
138 target->dev = dev;
139 target->dev->driver_data = target;
140
141 target->srp_iu_size = iu_size;
142 target->rx_ring_size = nr;
143 target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
144 if (!target->rx_ring)
145 return -ENOMEM;
146 err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
147 if (err)
148 goto free_ring;
149
150 return 0;
151
152free_ring:
153 srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
154 return -ENOMEM;
155}
156EXPORT_SYMBOL_GPL(srp_target_alloc);
157
158void srp_target_free(struct srp_target *target)
159{
160 srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
161 target->srp_iu_size);
162 srp_iu_pool_free(&target->iu_queue);
163}
164EXPORT_SYMBOL_GPL(srp_target_free);
165
166struct iu_entry *srp_iu_get(struct srp_target *target)
167{
168 struct iu_entry *iue = NULL;
169
170 kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
171 if (!iue)
172 return iue;
173 iue->target = target;
174 INIT_LIST_HEAD(&iue->ilist);
175 iue->flags = 0;
176 return iue;
177}
178EXPORT_SYMBOL_GPL(srp_iu_get);
179
180void srp_iu_put(struct iu_entry *iue)
181{
182 kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
183}
184EXPORT_SYMBOL_GPL(srp_iu_put);
185
186static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
187 enum dma_data_direction dir, srp_rdma_t rdma_io,
188 int dma_map, int ext_desc)
189{
190 struct iu_entry *iue = NULL;
191 struct scatterlist *sg = NULL;
192 int err, nsg = 0, len;
193
194 if (dma_map) {
195 iue = (struct iu_entry *) sc->SCp.ptr;
196 sg = sc->request_buffer;
197
198 dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
199 md->len, sc->use_sg);
200
201 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
202 DMA_BIDIRECTIONAL);
203 if (!nsg) {
204 printk("fail to map %p %d\n", iue, sc->use_sg);
205 return 0;
206 }
207 len = min(sc->request_bufflen, md->len);
208 } else
209 len = md->len;
210
211 err = rdma_io(sc, sg, nsg, md, 1, dir, len);
212
213 if (dma_map)
214 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
215
216 return err;
217}
218
219static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
220 struct srp_indirect_buf *id,
221 enum dma_data_direction dir, srp_rdma_t rdma_io,
222 int dma_map, int ext_desc)
223{
224 struct iu_entry *iue = NULL;
225 struct srp_direct_buf *md = NULL;
226 struct scatterlist dummy, *sg = NULL;
227 dma_addr_t token = 0;
228 long err;
229 unsigned int done = 0;
230 int nmd, nsg = 0, len;
231
232 if (dma_map || ext_desc) {
233 iue = (struct iu_entry *) sc->SCp.ptr;
234 sg = sc->request_buffer;
235
236 dprintk("%p %u %u %d %d\n",
237 iue, sc->request_bufflen, id->len,
238 cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
239 }
240
241 nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
242
243 if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
244 (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
245 md = &id->desc_list[0];
246 goto rdma;
247 }
248
249 if (ext_desc && dma_map) {
250 md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
251 &token, GFP_KERNEL);
252 if (!md) {
253 eprintk("Can't get dma memory %u\n", id->table_desc.len);
254 return -ENOMEM;
255 }
256
257 sg_init_one(&dummy, md, id->table_desc.len);
258 sg_dma_address(&dummy) = token;
259 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
260 id->table_desc.len);
261 if (err < 0) {
262 eprintk("Error copying indirect table %ld\n", err);
263 goto free_mem;
264 }
265 } else {
266 eprintk("This command uses external indirect buffer\n");
267 return -EINVAL;
268 }
269
270rdma:
271 if (dma_map) {
272 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
273 if (!nsg) {
274 eprintk("fail to map %p %d\n", iue, sc->use_sg);
275 goto free_mem;
276 }
277 len = min(sc->request_bufflen, id->len);
278 } else
279 len = id->len;
280
281 err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
282
283 if (dma_map)
284 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
285
286free_mem:
287 if (token && dma_map)
288 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
289
290 return done;
291}
292
293static int data_out_desc_size(struct srp_cmd *cmd)
294{
295 int size = 0;
296 u8 fmt = cmd->buf_fmt >> 4;
297
298 switch (fmt) {
299 case SRP_NO_DATA_DESC:
300 break;
301 case SRP_DATA_DESC_DIRECT:
302 size = sizeof(struct srp_direct_buf);
303 break;
304 case SRP_DATA_DESC_INDIRECT:
305 size = sizeof(struct srp_indirect_buf) +
306 sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
307 break;
308 default:
309 eprintk("client error. Invalid data_out_format %x\n", fmt);
310 break;
311 }
312 return size;
313}
314
315/*
316 * TODO: this can be called multiple times for a single command if it
317 * has very long data.
318 */
319int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
320 srp_rdma_t rdma_io, int dma_map, int ext_desc)
321{
322 struct srp_direct_buf *md;
323 struct srp_indirect_buf *id;
324 enum dma_data_direction dir;
325 int offset, err = 0;
326 u8 format;
327
328 offset = cmd->add_cdb_len * 4;
329
330 dir = srp_cmd_direction(cmd);
331 if (dir == DMA_FROM_DEVICE)
332 offset += data_out_desc_size(cmd);
333
334 if (dir == DMA_TO_DEVICE)
335 format = cmd->buf_fmt >> 4;
336 else
337 format = cmd->buf_fmt & ((1U << 4) - 1);
338
339 switch (format) {
340 case SRP_NO_DATA_DESC:
341 break;
342 case SRP_DATA_DESC_DIRECT:
343 md = (struct srp_direct_buf *)
344 (cmd->add_data + offset);
345 err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
346 break;
347 case SRP_DATA_DESC_INDIRECT:
348 id = (struct srp_indirect_buf *)
349 (cmd->add_data + offset);
350 err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
351 ext_desc);
352 break;
353 default:
354 eprintk("Unknown format %d %x\n", dir, format);
355 break;
356 }
357
358 return err;
359}
360EXPORT_SYMBOL_GPL(srp_transfer_data);
361
362static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
363{
364 struct srp_direct_buf *md;
365 struct srp_indirect_buf *id;
366 int len = 0, offset = cmd->add_cdb_len * 4;
367 u8 fmt;
368
369 if (dir == DMA_TO_DEVICE)
370 fmt = cmd->buf_fmt >> 4;
371 else {
372 fmt = cmd->buf_fmt & ((1U << 4) - 1);
373 offset += data_out_desc_size(cmd);
374 }
375
376 switch (fmt) {
377 case SRP_NO_DATA_DESC:
378 break;
379 case SRP_DATA_DESC_DIRECT:
380 md = (struct srp_direct_buf *) (cmd->add_data + offset);
381 len = md->len;
382 break;
383 case SRP_DATA_DESC_INDIRECT:
384 id = (struct srp_indirect_buf *) (cmd->add_data + offset);
385 len = id->len;
386 break;
387 default:
388 eprintk("invalid data format %x\n", fmt);
389 break;
390 }
391 return len;
392}
393
394int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
395 u64 addr)
396{
397 enum dma_data_direction dir;
398 struct scsi_cmnd *sc;
399 int tag, len, err;
400
401 switch (cmd->task_attr) {
402 case SRP_SIMPLE_TASK:
403 tag = MSG_SIMPLE_TAG;
404 break;
405 case SRP_ORDERED_TASK:
406 tag = MSG_ORDERED_TAG;
407 break;
408 case SRP_HEAD_TASK:
409 tag = MSG_HEAD_TAG;
410 break;
411 default:
412 eprintk("Task attribute %d not supported\n", cmd->task_attr);
413 tag = MSG_ORDERED_TAG;
414 }
415
416 dir = srp_cmd_direction(cmd);
417 len = vscsis_data_length(cmd, dir);
418
419 dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
420 cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
421
422 sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
423 if (!sc)
424 return -ENOMEM;
425
426 sc->SCp.ptr = info;
427 memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
428 sc->request_bufflen = len;
429 sc->request_buffer = (void *) (unsigned long) addr;
430 sc->tag = tag;
431 err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
432 if (err)
433 scsi_host_put_command(shost, sc);
434
435 return err;
436}
437EXPORT_SYMBOL_GPL(srp_cmd_queue);
438
439MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
440MODULE_AUTHOR("FUJITA Tomonori");
441MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3f7f5f8abd75..a7de0bca5bdd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -296,13 +296,17 @@ struct lpfc_hba {
296 uint32_t cfg_cr_delay; 296 uint32_t cfg_cr_delay;
297 uint32_t cfg_cr_count; 297 uint32_t cfg_cr_count;
298 uint32_t cfg_multi_ring_support; 298 uint32_t cfg_multi_ring_support;
299 uint32_t cfg_multi_ring_rctl;
300 uint32_t cfg_multi_ring_type;
299 uint32_t cfg_fdmi_on; 301 uint32_t cfg_fdmi_on;
300 uint32_t cfg_discovery_threads; 302 uint32_t cfg_discovery_threads;
301 uint32_t cfg_max_luns; 303 uint32_t cfg_max_luns;
302 uint32_t cfg_poll; 304 uint32_t cfg_poll;
303 uint32_t cfg_poll_tmo; 305 uint32_t cfg_poll_tmo;
306 uint32_t cfg_use_msi;
304 uint32_t cfg_sg_seg_cnt; 307 uint32_t cfg_sg_seg_cnt;
305 uint32_t cfg_sg_dma_buf_size; 308 uint32_t cfg_sg_dma_buf_size;
309 uint64_t cfg_soft_wwnn;
306 uint64_t cfg_soft_wwpn; 310 uint64_t cfg_soft_wwpn;
307 311
308 uint32_t dev_loss_tmo_changed; 312 uint32_t dev_loss_tmo_changed;
@@ -355,7 +359,7 @@ struct lpfc_hba {
355#define VPD_PORT 0x8 /* valid vpd port data */ 359#define VPD_PORT 0x8 /* valid vpd port data */
356#define VPD_MASK 0xf /* mask for any vpd data */ 360#define VPD_MASK 0xf /* mask for any vpd data */
357 361
358 uint8_t soft_wwpn_enable; 362 uint8_t soft_wwn_enable;
359 363
360 struct timer_list fcp_poll_timer; 364 struct timer_list fcp_poll_timer;
361 struct timer_list els_tmofunc; 365 struct timer_list els_tmofunc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2a4e02e7a392..f247e786af99 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
553 553
554 554
555static char *lpfc_soft_wwpn_key = "C99G71SL8032A"; 555static char *lpfc_soft_wwn_key = "C99G71SL8032A";
556 556
557static ssize_t 557static ssize_t
558lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf, 558lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
559 size_t count) 559 size_t count)
560{ 560{
561 struct Scsi_Host *host = class_to_shost(cdev); 561 struct Scsi_Host *host = class_to_shost(cdev);
@@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
579 if (buf[cnt-1] == '\n') 579 if (buf[cnt-1] == '\n')
580 cnt--; 580 cnt--;
581 581
582 if ((cnt != strlen(lpfc_soft_wwpn_key)) || 582 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
583 (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0)) 583 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
584 return -EINVAL; 584 return -EINVAL;
585 585
586 phba->soft_wwpn_enable = 1; 586 phba->soft_wwn_enable = 1;
587 return count; 587 return count;
588} 588}
589static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL, 589static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
590 lpfc_soft_wwpn_enable_store); 590 lpfc_soft_wwn_enable_store);
591 591
592static ssize_t 592static ssize_t
593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) 593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
@@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
613 if (buf[cnt-1] == '\n') 613 if (buf[cnt-1] == '\n')
614 cnt--; 614 cnt--;
615 615
616 if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) || 616 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
617 ((cnt == 17) && (*buf++ != 'x')) || 617 ((cnt == 17) && (*buf++ != 'x')) ||
618 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) 618 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
619 return -EINVAL; 619 return -EINVAL;
620 620
621 phba->soft_wwpn_enable = 0; 621 phba->soft_wwn_enable = 0;
622 622
623 memset(wwpn, 0, sizeof(wwpn)); 623 memset(wwpn, 0, sizeof(wwpn));
624 624
@@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
639 } 639 }
640 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 640 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
641 fc_host_port_name(host) = phba->cfg_soft_wwpn; 641 fc_host_port_name(host) = phba->cfg_soft_wwpn;
642 if (phba->cfg_soft_wwnn)
643 fc_host_node_name(host) = phba->cfg_soft_wwnn;
642 644
643 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 645 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
644 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 646 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
664static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ 666static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
665 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); 667 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
666 668
669static ssize_t
670lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
671{
672 struct Scsi_Host *host = class_to_shost(cdev);
673 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
674 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
675 (unsigned long long)phba->cfg_soft_wwnn);
676}
677
678
679static ssize_t
680lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
681{
682 struct Scsi_Host *host = class_to_shost(cdev);
683 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
684 unsigned int i, j, cnt=count;
685 u8 wwnn[8];
686
687 /* count may include a LF at end of string */
688 if (buf[cnt-1] == '\n')
689 cnt--;
690
691 if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
692 ((cnt == 17) && (*buf++ != 'x')) ||
693 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
694 return -EINVAL;
695
696 /*
697 * Allow wwnn to be set many times, as long as the enable is set.
698 * However, once the wwpn is set, everything locks.
699 */
700
701 memset(wwnn, 0, sizeof(wwnn));
702
703 /* Validate and store the new name */
704 for (i=0, j=0; i < 16; i++) {
705 if ((*buf >= 'a') && (*buf <= 'f'))
706 j = ((j << 4) | ((*buf++ -'a') + 10));
707 else if ((*buf >= 'A') && (*buf <= 'F'))
708 j = ((j << 4) | ((*buf++ -'A') + 10));
709 else if ((*buf >= '0') && (*buf <= '9'))
710 j = ((j << 4) | (*buf++ -'0'));
711 else
712 return -EINVAL;
713 if (i % 2) {
714 wwnn[i/2] = j & 0xff;
715 j = 0;
716 }
717 }
718 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
719
720 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
721 "lpfc%d: soft_wwnn set. Value will take effect upon "
722 "setting of the soft_wwpn\n", phba->brd_no);
723
724 return count;
725}
726static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
727 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
728
667 729
668static int lpfc_poll = 0; 730static int lpfc_poll = 0;
669module_param(lpfc_poll, int, 0); 731module_param(lpfc_poll, int, 0);
@@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
802# LOG_MBOX 0x4 Mailbox events 864# LOG_MBOX 0x4 Mailbox events
803# LOG_INIT 0x8 Initialization events 865# LOG_INIT 0x8 Initialization events
804# LOG_LINK_EVENT 0x10 Link events 866# LOG_LINK_EVENT 0x10 Link events
805# LOG_IP 0x20 IP traffic history
806# LOG_FCP 0x40 FCP traffic history 867# LOG_FCP 0x40 FCP traffic history
807# LOG_NODE 0x80 Node table events 868# LOG_NODE 0x80 Node table events
808# LOG_MISC 0x400 Miscellaneous events 869# LOG_MISC 0x400 Miscellaneous events
809# LOG_SLI 0x800 SLI events 870# LOG_SLI 0x800 SLI events
810# LOG_CHK_COND 0x1000 FCP Check condition flag 871# LOG_FCP_ERROR 0x1000 Only log FCP errors
811# LOG_LIBDFC 0x2000 LIBDFC events 872# LOG_LIBDFC 0x2000 LIBDFC events
812# LOG_ALL_MSG 0xffff LOG all messages 873# LOG_ALL_MSG 0xffff LOG all messages
813*/ 874*/
@@ -916,6 +977,22 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
916 "SLI rings to spread IOCB entries across"); 977 "SLI rings to spread IOCB entries across");
917 978
918/* 979/*
980# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
981# identifies what rctl value to configure the additional ring for.
982# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
983*/
984LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
985 255, "Identifies RCTL for additional ring configuration");
986
987/*
988# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
989# identifies what type value to configure the additional ring for.
990# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
991*/
992LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
993 255, "Identifies TYPE for additional ring configuration");
994
995/*
919# lpfc_fdmi_on: controls FDMI support. 996# lpfc_fdmi_on: controls FDMI support.
920# 0 = no FDMI support 997# 0 = no FDMI support
921# 1 = support FDMI without attribute of hostname 998# 1 = support FDMI without attribute of hostname
@@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
946LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 1023LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
947 "Milliseconds driver will wait between polling FCP ring"); 1024 "Milliseconds driver will wait between polling FCP ring");
948 1025
1026/*
1027# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
1028# support this feature
1029# 0 = MSI disabled (default)
1030# 1 = MSI enabled
1031# Value range is [0,1]. Default value is 0.
1032*/
1033LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1034
949 1035
950struct class_device_attribute *lpfc_host_attrs[] = { 1036struct class_device_attribute *lpfc_host_attrs[] = {
951 &class_device_attr_info, 1037 &class_device_attr_info,
@@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
974 &class_device_attr_lpfc_cr_delay, 1060 &class_device_attr_lpfc_cr_delay,
975 &class_device_attr_lpfc_cr_count, 1061 &class_device_attr_lpfc_cr_count,
976 &class_device_attr_lpfc_multi_ring_support, 1062 &class_device_attr_lpfc_multi_ring_support,
1063 &class_device_attr_lpfc_multi_ring_rctl,
1064 &class_device_attr_lpfc_multi_ring_type,
977 &class_device_attr_lpfc_fdmi_on, 1065 &class_device_attr_lpfc_fdmi_on,
978 &class_device_attr_lpfc_max_luns, 1066 &class_device_attr_lpfc_max_luns,
979 &class_device_attr_nport_evt_cnt, 1067 &class_device_attr_nport_evt_cnt,
@@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = {
982 &class_device_attr_issue_reset, 1070 &class_device_attr_issue_reset,
983 &class_device_attr_lpfc_poll, 1071 &class_device_attr_lpfc_poll,
984 &class_device_attr_lpfc_poll_tmo, 1072 &class_device_attr_lpfc_poll_tmo,
1073 &class_device_attr_lpfc_use_msi,
1074 &class_device_attr_lpfc_soft_wwnn,
985 &class_device_attr_lpfc_soft_wwpn, 1075 &class_device_attr_lpfc_soft_wwpn,
986 &class_device_attr_lpfc_soft_wwpn_enable, 1076 &class_device_attr_lpfc_soft_wwn_enable,
987 NULL, 1077 NULL,
988}; 1078};
989 1079
@@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1771 lpfc_cr_delay_init(phba, lpfc_cr_delay); 1861 lpfc_cr_delay_init(phba, lpfc_cr_delay);
1772 lpfc_cr_count_init(phba, lpfc_cr_count); 1862 lpfc_cr_count_init(phba, lpfc_cr_count);
1773 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 1863 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
1864 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
1865 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
1774 lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth); 1866 lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
1775 lpfc_fcp_class_init(phba, lpfc_fcp_class); 1867 lpfc_fcp_class_init(phba, lpfc_fcp_class);
1776 lpfc_use_adisc_init(phba, lpfc_use_adisc); 1868 lpfc_use_adisc_init(phba, lpfc_use_adisc);
@@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1782 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1874 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1783 lpfc_max_luns_init(phba, lpfc_max_luns); 1875 lpfc_max_luns_init(phba, lpfc_max_luns);
1784 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1876 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1877 lpfc_use_msi_init(phba, lpfc_use_msi);
1785 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); 1878 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1786 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); 1879 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1787 phba->cfg_poll = lpfc_poll; 1880 phba->cfg_poll = lpfc_poll;
1881 phba->cfg_soft_wwnn = 0L;
1788 phba->cfg_soft_wwpn = 0L; 1882 phba->cfg_soft_wwpn = 0L;
1789 1883
1790 /* 1884 /*
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3add7c237859..a51a41b7f15d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
558 return; 558 return;
559} 559}
560 560
561static void
562lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
563 struct lpfc_iocbq * rspiocb)
564{
565 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
566 return;
567}
568
561void 569void
562lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) 570lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
563{ 571{
@@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
629 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 637 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
630 else if (cmdcode == SLI_CTNS_RSNN_NN) 638 else if (cmdcode == SLI_CTNS_RSNN_NN)
631 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 639 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
640 else if (cmdcode == SLI_CTNS_RFF_ID)
641 bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
632 else 642 else
633 bpl->tus.f.bdeSize = 0; 643 bpl->tus.f.bdeSize = 0;
634 bpl->tus.w = le32_to_cpu(bpl->tus.w); 644 bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
660 cmpl = lpfc_cmpl_ct_cmd_rft_id; 670 cmpl = lpfc_cmpl_ct_cmd_rft_id;
661 break; 671 break;
662 672
673 case SLI_CTNS_RFF_ID:
674 CtReq->CommandResponse.bits.CmdRsp =
675 be16_to_cpu(SLI_CTNS_RFF_ID);
676 CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
677 CtReq->un.rff.feature_res = 0;
678 CtReq->un.rff.feature_tgt = 0;
679 CtReq->un.rff.type_code = FC_FCP_DATA;
680 CtReq->un.rff.feature_init = 1;
681 cmpl = lpfc_cmpl_ct_cmd_rff_id;
682 break;
683
663 case SLI_CTNS_RNN_ID: 684 case SLI_CTNS_RNN_ID:
664 CtReq->CommandResponse.bits.CmdRsp = 685 CtReq->CommandResponse.bits.CmdRsp =
665 be16_to_cpu(SLI_CTNS_RNN_ID); 686 be16_to_cpu(SLI_CTNS_RNN_ID);
@@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
934 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 955 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
935 ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION); 956 ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
936 sprintf(ae->un.OsNameVersion, "%s %s %s", 957 sprintf(ae->un.OsNameVersion, "%s %s %s",
937 init_utsname()->sysname, init_utsname()->release, 958 init_utsname()->sysname,
959 init_utsname()->release,
938 init_utsname()->version); 960 init_utsname()->version);
939 len = strlen(ae->un.OsNameVersion); 961 len = strlen(ae->un.OsNameVersion);
940 len += (len & 3) ? (4 - (len & 3)) : 4; 962 len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 71864cdc6c71..a5f33a0dd4e7 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
243 struct serv_parm *sp, IOCB_t *irsp) 243 struct serv_parm *sp, IOCB_t *irsp)
244{ 244{
245 LPFC_MBOXQ_t *mbox; 245 LPFC_MBOXQ_t *mbox;
246 struct lpfc_dmabuf *mp;
246 int rc; 247 int rc;
247 248
248 spin_lock_irq(phba->host->host_lock); 249 spin_lock_irq(phba->host->host_lock);
@@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
307 308
308 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
309 if (rc == MBX_NOT_FINISHED) 310 if (rc == MBX_NOT_FINISHED)
310 goto fail_free_mbox; 311 goto fail_issue_reg_login;
311 312
312 return 0; 313 return 0;
313 314
315 fail_issue_reg_login:
316 mp = (struct lpfc_dmabuf *) mbox->context1;
317 lpfc_mbuf_free(phba, mp->virt, mp->phys);
318 kfree(mp);
314 fail_free_mbox: 319 fail_free_mbox:
315 mempool_free(mbox, phba->mbox_mem_pool); 320 mempool_free(mbox, phba->mbox_mem_pool);
316 fail: 321 fail:
@@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
657 uint8_t name[sizeof (struct lpfc_name)]; 662 uint8_t name[sizeof (struct lpfc_name)];
658 uint32_t rc; 663 uint32_t rc;
659 664
665 /* Fabric nodes can have the same WWPN so we don't bother searching
666 * by WWPN. Just return the ndlp that was given to us.
667 */
668 if (ndlp->nlp_type & NLP_FABRIC)
669 return ndlp;
670
660 lp = (uint32_t *) prsp->virt; 671 lp = (uint32_t *) prsp->virt;
661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 672 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
662 memset(name, 0, sizeof (struct lpfc_name)); 673 memset(name, 0, sizeof (struct lpfc_name));
@@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1122 mempool_free(mbox, 1133 mempool_free(mbox,
1123 phba->mbox_mem_pool); 1134 phba->mbox_mem_pool);
1124 lpfc_disc_flush_list(phba); 1135 lpfc_disc_flush_list(phba);
1125 psli->ring[(psli->ip_ring)]. 1136 psli->ring[(psli->extra_ring)].
1126 flag &= 1137 flag &=
1127 ~LPFC_STOP_IOCB_EVENT; 1138 ~LPFC_STOP_IOCB_EVENT;
1128 psli->ring[(psli->fcp_ring)]. 1139 psli->ring[(psli->fcp_ring)].
@@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1851 IOCB_t *irsp; 1862 IOCB_t *irsp;
1852 struct lpfc_nodelist *ndlp; 1863 struct lpfc_nodelist *ndlp;
1853 LPFC_MBOXQ_t *mbox = NULL; 1864 LPFC_MBOXQ_t *mbox = NULL;
1865 struct lpfc_dmabuf *mp;
1854 1866
1855 irsp = &rspiocb->iocb; 1867 irsp = &rspiocb->iocb;
1856 1868
@@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1862 /* Check to see if link went down during discovery */ 1874 /* Check to see if link went down during discovery */
1863 if ((lpfc_els_chk_latt(phba)) || !ndlp) { 1875 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1864 if (mbox) { 1876 if (mbox) {
1877 mp = (struct lpfc_dmabuf *) mbox->context1;
1878 if (mp) {
1879 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1880 kfree(mp);
1881 }
1865 mempool_free( mbox, phba->mbox_mem_pool); 1882 mempool_free( mbox, phba->mbox_mem_pool);
1866 } 1883 }
1867 goto out; 1884 goto out;
@@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1893 } 1910 }
1894 /* NOTE: we should have messages for unsuccessful 1911 /* NOTE: we should have messages for unsuccessful
1895 reglogin */ 1912 reglogin */
1896 mempool_free( mbox, phba->mbox_mem_pool);
1897 } else { 1913 } else {
1898 mempool_free( mbox, phba->mbox_mem_pool);
1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ 1914 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1915 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 1916 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
@@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1907 } 1922 }
1908 } 1923 }
1909 } 1924 }
1925 mp = (struct lpfc_dmabuf *) mbox->context1;
1926 if (mp) {
1927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1928 kfree(mp);
1929 }
1930 mempool_free(mbox, phba->mbox_mem_pool);
1910 } 1931 }
1911out: 1932out:
1912 if (ndlp) { 1933 if (ndlp) {
@@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
2644 ndlp->nlp_type |= NLP_FABRIC; 2665 ndlp->nlp_type |= NLP_FABRIC;
2645 ndlp->nlp_prev_state = ndlp->nlp_state; 2666 ndlp->nlp_prev_state = ndlp->nlp_state;
2646 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 2667 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2668 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2647 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 2669 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
2648 /* Wait for NameServer login cmpl before we can 2670 /* Wait for NameServer login cmpl before we can
2649 continue */ 2671 continue */
@@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3039 /* FARP-REQ received from DID <did> */ 3061 /* FARP-REQ received from DID <did> */
3040 lpfc_printf_log(phba, 3062 lpfc_printf_log(phba,
3041 KERN_INFO, 3063 KERN_INFO,
3042 LOG_IP, 3064 LOG_ELS,
3043 "%d:0601 FARP-REQ received from DID x%x\n", 3065 "%d:0601 FARP-REQ received from DID x%x\n",
3044 phba->brd_no, did); 3066 phba->brd_no, did);
3045 3067
@@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3101 /* FARP-RSP received from DID <did> */ 3123 /* FARP-RSP received from DID <did> */
3102 lpfc_printf_log(phba, 3124 lpfc_printf_log(phba,
3103 KERN_INFO, 3125 KERN_INFO,
3104 LOG_IP, 3126 LOG_ELS,
3105 "%d:0600 FARP-RSP received from DID x%x\n", 3127 "%d:0600 FARP-RSP received from DID x%x\n",
3106 phba->brd_no, did); 3128 phba->brd_no, did);
3107 3129
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 19c79a0549a7..c39564e85e94 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
525 psli = &phba->sli; 525 psli = &phba->sli;
526 mb = &pmb->mb; 526 mb = &pmb->mb;
527 /* Since we don't do discovery right now, turn these off here */ 527 /* Since we don't do discovery right now, turn these off here */
528 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 528 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
529 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 529 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
530 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 530 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
531 531
@@ -641,7 +641,7 @@ out:
641 if (rc == MBX_NOT_FINISHED) { 641 if (rc == MBX_NOT_FINISHED) {
642 mempool_free(pmb, phba->mbox_mem_pool); 642 mempool_free(pmb, phba->mbox_mem_pool);
643 lpfc_disc_flush_list(phba); 643 lpfc_disc_flush_list(phba);
644 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 644 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
645 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 645 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
646 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 646 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
647 phba->hba_state = LPFC_HBA_READY; 647 phba->hba_state = LPFC_HBA_READY;
@@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
672 672
673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
674 sizeof (struct serv_parm)); 674 sizeof (struct serv_parm));
675 if (phba->cfg_soft_wwnn)
676 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
675 if (phba->cfg_soft_wwpn) 677 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 678 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
677 memcpy((uint8_t *) & phba->fc_nodename, 679 memcpy((uint8_t *) & phba->fc_nodename,
@@ -696,7 +698,7 @@ out:
696 == MBX_NOT_FINISHED) { 698 == MBX_NOT_FINISHED) {
697 mempool_free( pmb, phba->mbox_mem_pool); 699 mempool_free( pmb, phba->mbox_mem_pool);
698 lpfc_disc_flush_list(phba); 700 lpfc_disc_flush_list(phba);
699 psli->ring[(psli->ip_ring)].flag &= 701 psli->ring[(psli->extra_ring)].flag &=
700 ~LPFC_STOP_IOCB_EVENT; 702 ~LPFC_STOP_IOCB_EVENT;
701 psli->ring[(psli->fcp_ring)].flag &= 703 psli->ring[(psli->fcp_ring)].flag &=
702 ~LPFC_STOP_IOCB_EVENT; 704 ~LPFC_STOP_IOCB_EVENT;
@@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
715{ 717{
716 int i; 718 int i;
717 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 719 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
720 struct lpfc_dmabuf *mp;
721 int rc;
722
718 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 723 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
719 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 724 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
720 725
@@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
793 if (sparam_mbox) { 798 if (sparam_mbox) {
794 lpfc_read_sparam(phba, sparam_mbox); 799 lpfc_read_sparam(phba, sparam_mbox);
795 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 800 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
796 lpfc_sli_issue_mbox(phba, sparam_mbox, 801 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
797 (MBX_NOWAIT | MBX_STOP_IOCB)); 802 (MBX_NOWAIT | MBX_STOP_IOCB));
803 if (rc == MBX_NOT_FINISHED) {
804 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
805 lpfc_mbuf_free(phba, mp->virt, mp->phys);
806 kfree(mp);
807 mempool_free(sparam_mbox, phba->mbox_mem_pool);
808 if (cfglink_mbox)
809 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
810 return;
811 }
798 } 812 }
799 813
800 if (cfglink_mbox) { 814 if (cfglink_mbox) {
801 phba->hba_state = LPFC_LOCAL_CFG_LINK; 815 phba->hba_state = LPFC_LOCAL_CFG_LINK;
802 lpfc_config_link(phba, cfglink_mbox); 816 lpfc_config_link(phba, cfglink_mbox);
803 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 817 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
804 lpfc_sli_issue_mbox(phba, cfglink_mbox, 818 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
805 (MBX_NOWAIT | MBX_STOP_IOCB)); 819 (MBX_NOWAIT | MBX_STOP_IOCB));
820 if (rc == MBX_NOT_FINISHED)
821 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
806 } 822 }
807} 823}
808 824
@@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1067 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); 1083 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1068 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); 1084 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1069 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); 1085 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1086 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
1070 } 1087 }
1071 1088
1072 phba->fc_ns_retry = 0; 1089 phba->fc_ns_retry = 0;
@@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1423 if (iocb->context1 == (uint8_t *) ndlp) 1440 if (iocb->context1 == (uint8_t *) ndlp)
1424 return 1; 1441 return 1;
1425 } 1442 }
1426 } else if (pring->ringno == psli->ip_ring) { 1443 } else if (pring->ringno == psli->extra_ring) {
1427 1444
1428 } else if (pring->ringno == psli->fcp_ring) { 1445 } else if (pring->ringno == psli->fcp_ring) {
1429 /* Skip match check if waiting to relogin to FCP target */ 1446 /* Skip match check if waiting to relogin to FCP target */
@@ -1680,112 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1680struct lpfc_nodelist * 1697struct lpfc_nodelist *
1681lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) 1698lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1682{ 1699{
1683 struct lpfc_nodelist *ndlp, *next_ndlp; 1700 struct lpfc_nodelist *ndlp;
1701 struct list_head *lists[]={&phba->fc_nlpunmap_list,
1702 &phba->fc_nlpmap_list,
1703 &phba->fc_plogi_list,
1704 &phba->fc_adisc_list,
1705 &phba->fc_reglogin_list,
1706 &phba->fc_prli_list,
1707 &phba->fc_npr_list,
1708 &phba->fc_unused_list};
1709 uint32_t search[]={NLP_SEARCH_UNMAPPED,
1710 NLP_SEARCH_MAPPED,
1711 NLP_SEARCH_PLOGI,
1712 NLP_SEARCH_ADISC,
1713 NLP_SEARCH_REGLOGIN,
1714 NLP_SEARCH_PRLI,
1715 NLP_SEARCH_NPR,
1716 NLP_SEARCH_UNUSED};
1717 int i;
1684 uint32_t data1; 1718 uint32_t data1;
1685 1719
1686 spin_lock_irq(phba->host->host_lock); 1720 spin_lock_irq(phba->host->host_lock);
1687 if (order & NLP_SEARCH_UNMAPPED) { 1721 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
1688 list_for_each_entry_safe(ndlp, next_ndlp, 1722 if (!(order & search[i]))
1689 &phba->fc_nlpunmap_list, nlp_listp) { 1723 continue;
1690 if (lpfc_matchdid(phba, ndlp, did)) { 1724 list_for_each_entry(ndlp, lists[i], nlp_listp) {
1691 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1692 ((uint32_t) ndlp->nlp_xri << 16) |
1693 ((uint32_t) ndlp->nlp_type << 8) |
1694 ((uint32_t) ndlp->nlp_rpi & 0xff));
1695 /* FIND node DID unmapped */
1696 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1697 "%d:0929 FIND node DID unmapped"
1698 " Data: x%p x%x x%x x%x\n",
1699 phba->brd_no,
1700 ndlp, ndlp->nlp_DID,
1701 ndlp->nlp_flag, data1);
1702 spin_unlock_irq(phba->host->host_lock);
1703 return ndlp;
1704 }
1705 }
1706 }
1707
1708 if (order & NLP_SEARCH_MAPPED) {
1709 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1710 nlp_listp) {
1711 if (lpfc_matchdid(phba, ndlp, did)) {
1712
1713 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1714 ((uint32_t) ndlp->nlp_xri << 16) |
1715 ((uint32_t) ndlp->nlp_type << 8) |
1716 ((uint32_t) ndlp->nlp_rpi & 0xff));
1717 /* FIND node DID mapped */
1718 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1719 "%d:0930 FIND node DID mapped "
1720 "Data: x%p x%x x%x x%x\n",
1721 phba->brd_no,
1722 ndlp, ndlp->nlp_DID,
1723 ndlp->nlp_flag, data1);
1724 spin_unlock_irq(phba->host->host_lock);
1725 return ndlp;
1726 }
1727 }
1728 }
1729
1730 if (order & NLP_SEARCH_PLOGI) {
1731 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1732 nlp_listp) {
1733 if (lpfc_matchdid(phba, ndlp, did)) {
1734
1735 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1736 ((uint32_t) ndlp->nlp_xri << 16) |
1737 ((uint32_t) ndlp->nlp_type << 8) |
1738 ((uint32_t) ndlp->nlp_rpi & 0xff));
1739 /* LOG change to PLOGI */
1740 /* FIND node DID plogi */
1741 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1742 "%d:0908 FIND node DID plogi "
1743 "Data: x%p x%x x%x x%x\n",
1744 phba->brd_no,
1745 ndlp, ndlp->nlp_DID,
1746 ndlp->nlp_flag, data1);
1747 spin_unlock_irq(phba->host->host_lock);
1748 return ndlp;
1749 }
1750 }
1751 }
1752
1753 if (order & NLP_SEARCH_ADISC) {
1754 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1755 nlp_listp) {
1756 if (lpfc_matchdid(phba, ndlp, did)) {
1757
1758 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1759 ((uint32_t) ndlp->nlp_xri << 16) |
1760 ((uint32_t) ndlp->nlp_type << 8) |
1761 ((uint32_t) ndlp->nlp_rpi & 0xff));
1762 /* LOG change to ADISC */
1763 /* FIND node DID adisc */
1764 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1765 "%d:0931 FIND node DID adisc "
1766 "Data: x%p x%x x%x x%x\n",
1767 phba->brd_no,
1768 ndlp, ndlp->nlp_DID,
1769 ndlp->nlp_flag, data1);
1770 spin_unlock_irq(phba->host->host_lock);
1771 return ndlp;
1772 }
1773 }
1774 }
1775
1776 if (order & NLP_SEARCH_REGLOGIN) {
1777 list_for_each_entry_safe(ndlp, next_ndlp,
1778 &phba->fc_reglogin_list, nlp_listp) {
1779 if (lpfc_matchdid(phba, ndlp, did)) { 1725 if (lpfc_matchdid(phba, ndlp, did)) {
1780
1781 data1 = (((uint32_t) ndlp->nlp_state << 24) | 1726 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1782 ((uint32_t) ndlp->nlp_xri << 16) | 1727 ((uint32_t) ndlp->nlp_xri << 16) |
1783 ((uint32_t) ndlp->nlp_type << 8) | 1728 ((uint32_t) ndlp->nlp_type << 8) |
1784 ((uint32_t) ndlp->nlp_rpi & 0xff)); 1729 ((uint32_t) ndlp->nlp_rpi & 0xff));
1785 /* LOG change to REGLOGIN */
1786 /* FIND node DID reglogin */
1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1730 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1788 "%d:0901 FIND node DID reglogin" 1731 "%d:0929 FIND node DID "
1789 " Data: x%p x%x x%x x%x\n", 1732 " Data: x%p x%x x%x x%x\n",
1790 phba->brd_no, 1733 phba->brd_no,
1791 ndlp, ndlp->nlp_DID, 1734 ndlp, ndlp->nlp_DID,
@@ -1795,86 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1795 } 1738 }
1796 } 1739 }
1797 } 1740 }
1798
1799 if (order & NLP_SEARCH_PRLI) {
1800 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1801 nlp_listp) {
1802 if (lpfc_matchdid(phba, ndlp, did)) {
1803
1804 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1805 ((uint32_t) ndlp->nlp_xri << 16) |
1806 ((uint32_t) ndlp->nlp_type << 8) |
1807 ((uint32_t) ndlp->nlp_rpi & 0xff));
1808 /* LOG change to PRLI */
1809 /* FIND node DID prli */
1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1811 "%d:0902 FIND node DID prli "
1812 "Data: x%p x%x x%x x%x\n",
1813 phba->brd_no,
1814 ndlp, ndlp->nlp_DID,
1815 ndlp->nlp_flag, data1);
1816 spin_unlock_irq(phba->host->host_lock);
1817 return ndlp;
1818 }
1819 }
1820 }
1821
1822 if (order & NLP_SEARCH_NPR) {
1823 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1824 nlp_listp) {
1825 if (lpfc_matchdid(phba, ndlp, did)) {
1826
1827 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1828 ((uint32_t) ndlp->nlp_xri << 16) |
1829 ((uint32_t) ndlp->nlp_type << 8) |
1830 ((uint32_t) ndlp->nlp_rpi & 0xff));
1831 /* LOG change to NPR */
1832 /* FIND node DID npr */
1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1834 "%d:0903 FIND node DID npr "
1835 "Data: x%p x%x x%x x%x\n",
1836 phba->brd_no,
1837 ndlp, ndlp->nlp_DID,
1838 ndlp->nlp_flag, data1);
1839 spin_unlock_irq(phba->host->host_lock);
1840 return ndlp;
1841 }
1842 }
1843 }
1844
1845 if (order & NLP_SEARCH_UNUSED) {
1846 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1847 nlp_listp) {
1848 if (lpfc_matchdid(phba, ndlp, did)) {
1849
1850 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1851 ((uint32_t) ndlp->nlp_xri << 16) |
1852 ((uint32_t) ndlp->nlp_type << 8) |
1853 ((uint32_t) ndlp->nlp_rpi & 0xff));
1854 /* LOG change to UNUSED */
1855 /* FIND node DID unused */
1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1857 "%d:0905 FIND node DID unused "
1858 "Data: x%p x%x x%x x%x\n",
1859 phba->brd_no,
1860 ndlp, ndlp->nlp_DID,
1861 ndlp->nlp_flag, data1);
1862 spin_unlock_irq(phba->host->host_lock);
1863 return ndlp;
1864 }
1865 }
1866 }
1867
1868 spin_unlock_irq(phba->host->host_lock); 1741 spin_unlock_irq(phba->host->host_lock);
1869 1742
1870 /* FIND node did <did> NOT FOUND */ 1743 /* FIND node did <did> NOT FOUND */
1871 lpfc_printf_log(phba, 1744 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1872 KERN_INFO,
1873 LOG_NODE,
1874 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n", 1745 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1875 phba->brd_no, did, order); 1746 phba->brd_no, did, order);
1876
1877 /* no match found */
1878 return NULL; 1747 return NULL;
1879} 1748}
1880 1749
@@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
2036 if (rc == MBX_NOT_FINISHED) { 1905 if (rc == MBX_NOT_FINISHED) {
2037 mempool_free( mbox, phba->mbox_mem_pool); 1906 mempool_free( mbox, phba->mbox_mem_pool);
2038 lpfc_disc_flush_list(phba); 1907 lpfc_disc_flush_list(phba);
2039 psli->ring[(psli->ip_ring)].flag &= 1908 psli->ring[(psli->extra_ring)].flag &=
2040 ~LPFC_STOP_IOCB_EVENT; 1909 ~LPFC_STOP_IOCB_EVENT;
2041 psli->ring[(psli->fcp_ring)].flag &= 1910 psli->ring[(psli->fcp_ring)].flag &=
2042 ~LPFC_STOP_IOCB_EVENT; 1911 ~LPFC_STOP_IOCB_EVENT;
@@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2415 2284
2416 if (clrlaerr) { 2285 if (clrlaerr) {
2417 lpfc_disc_flush_list(phba); 2286 lpfc_disc_flush_list(phba);
2418 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2287 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2419 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2288 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2420 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2289 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2421 phba->hba_state = LPFC_HBA_READY; 2290 phba->hba_state = LPFC_HBA_READY;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index eedf98801366..f79cb6136906 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -42,14 +42,14 @@
42#define FCELSSIZE 1024 /* maximum ELS transfer size */ 42#define FCELSSIZE 1024 /* maximum ELS transfer size */
43 43
44#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ 44#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
45#define LPFC_IP_RING 1 /* ring 1 for IP commands */ 45#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ 46#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
47#define LPFC_FCP_NEXT_RING 3 47#define LPFC_FCP_NEXT_RING 3
48 48
49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ 49#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ 50#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
51#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */ 51#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */
52#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */ 52#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */
53#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */ 53#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
54#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */ 54#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
55#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */ 55#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
@@ -121,6 +121,20 @@ struct lpfc_sli_ct_request {
121 121
122 uint32_t rsvd[7]; 122 uint32_t rsvd[7];
123 } rft; 123 } rft;
124 struct rff {
125 uint32_t PortId;
126 uint8_t reserved[2];
127#ifdef __BIG_ENDIAN_BITFIELD
128 uint8_t feature_res:6;
129 uint8_t feature_init:1;
130 uint8_t feature_tgt:1;
131#else /* __LITTLE_ENDIAN_BITFIELD */
132 uint8_t feature_tgt:1;
133 uint8_t feature_init:1;
134 uint8_t feature_res:6;
135#endif
136 uint8_t type_code; /* type=8 for FCP */
137 } rff;
124 struct rnn { 138 struct rnn {
125 uint32_t PortId; /* For RNN_ID requests */ 139 uint32_t PortId; /* For RNN_ID requests */
126 uint8_t wwnn[8]; 140 uint8_t wwnn[8];
@@ -136,6 +150,7 @@ struct lpfc_sli_ct_request {
136#define SLI_CT_REVISION 1 150#define SLI_CT_REVISION 1
137#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) 151#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
138#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) 152#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
153#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
139#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) 154#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
140#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) 155#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
141 156
@@ -225,6 +240,7 @@ struct lpfc_sli_ct_request {
225#define SLI_CTNS_RNN_ID 0x0213 240#define SLI_CTNS_RNN_ID 0x0213
226#define SLI_CTNS_RCS_ID 0x0214 241#define SLI_CTNS_RCS_ID 0x0214
227#define SLI_CTNS_RFT_ID 0x0217 242#define SLI_CTNS_RFT_ID 0x0217
243#define SLI_CTNS_RFF_ID 0x021F
228#define SLI_CTNS_RSPN_ID 0x0218 244#define SLI_CTNS_RSPN_ID 0x0218
229#define SLI_CTNS_RPT_ID 0x021A 245#define SLI_CTNS_RPT_ID 0x021A
230#define SLI_CTNS_RIP_NN 0x0235 246#define SLI_CTNS_RIP_NN 0x0235
@@ -1089,12 +1105,6 @@ typedef struct {
1089#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1105#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1090#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1106#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1091 1107
1092#define PCI_SUBSYSTEM_ID_LP11000S 0xfc11
1093#define PCI_SUBSYSTEM_ID_LP11002S 0xfc12
1094#define PCI_SUBSYSTEM_ID_LPE11000S 0xfc21
1095#define PCI_SUBSYSTEM_ID_LPE11002S 0xfc22
1096#define PCI_SUBSYSTEM_ID_LPE11010S 0xfc2A
1097
1098#define JEDEC_ID_ADDRESS 0x0080001c 1108#define JEDEC_ID_ADDRESS 0x0080001c
1099#define FIREFLY_JEDEC_ID 0x1ACC 1109#define FIREFLY_JEDEC_ID 0x1ACC
1100#define SUPERFLY_JEDEC_ID 0x0020 1110#define SUPERFLY_JEDEC_ID 0x0020
@@ -1284,6 +1294,10 @@ typedef struct { /* FireFly BIU registers */
1284#define CMD_FCP_IREAD_CX 0x1B 1294#define CMD_FCP_IREAD_CX 0x1B
1285#define CMD_FCP_ICMND_CR 0x1C 1295#define CMD_FCP_ICMND_CR 0x1C
1286#define CMD_FCP_ICMND_CX 0x1D 1296#define CMD_FCP_ICMND_CX 0x1D
1297#define CMD_FCP_TSEND_CX 0x1F
1298#define CMD_FCP_TRECEIVE_CX 0x21
1299#define CMD_FCP_TRSP_CX 0x23
1300#define CMD_FCP_AUTO_TRSP_CX 0x29
1287 1301
1288#define CMD_ADAPTER_MSG 0x20 1302#define CMD_ADAPTER_MSG 0x20
1289#define CMD_ADAPTER_DUMP 0x22 1303#define CMD_ADAPTER_DUMP 0x22
@@ -1310,6 +1324,9 @@ typedef struct { /* FireFly BIU registers */
1310#define CMD_FCP_IREAD64_CX 0x9B 1324#define CMD_FCP_IREAD64_CX 0x9B
1311#define CMD_FCP_ICMND64_CR 0x9C 1325#define CMD_FCP_ICMND64_CR 0x9C
1312#define CMD_FCP_ICMND64_CX 0x9D 1326#define CMD_FCP_ICMND64_CX 0x9D
1327#define CMD_FCP_TSEND64_CX 0x9F
1328#define CMD_FCP_TRECEIVE64_CX 0xA1
1329#define CMD_FCP_TRSP64_CX 0xA3
1313 1330
1314#define CMD_GEN_REQUEST64_CR 0xC2 1331#define CMD_GEN_REQUEST64_CR 0xC2
1315#define CMD_GEN_REQUEST64_CX 0xC3 1332#define CMD_GEN_REQUEST64_CX 0xC3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5723ad0a099..afca45cdbcef 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
268 kfree(mp); 268 kfree(mp);
269 pmb->context1 = NULL; 269 pmb->context1 = NULL;
270 270
271 if (phba->cfg_soft_wwnn)
272 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
271 if (phba->cfg_soft_wwpn) 273 if (phba->cfg_soft_wwpn)
272 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 274 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
273 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 275 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
@@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
349 phba->hba_state = LPFC_LINK_DOWN; 351 phba->hba_state = LPFC_LINK_DOWN;
350 352
351 /* Only process IOCBs on ring 0 till hba_state is READY */ 353 /* Only process IOCBs on ring 0 till hba_state is READY */
352 if (psli->ring[psli->ip_ring].cmdringaddr) 354 if (psli->ring[psli->extra_ring].cmdringaddr)
353 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; 355 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
354 if (psli->ring[psli->fcp_ring].cmdringaddr) 356 if (psli->ring[psli->fcp_ring].cmdringaddr)
355 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 357 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
356 if (psli->ring[psli->next_ring].cmdringaddr) 358 if (psli->ring[psli->next_ring].cmdringaddr)
@@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
517 struct lpfc_sli_ring *pring; 519 struct lpfc_sli_ring *pring;
518 uint32_t event_data; 520 uint32_t event_data;
519 521
520 if (phba->work_hs & HS_FFER6) { 522 if (phba->work_hs & HS_FFER6 ||
523 phba->work_hs & HS_FFER5) {
521 /* Re-establishing Link */ 524 /* Re-establishing Link */
522 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 525 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
523 "%d:1301 Re-establishing Link " 526 "%d:1301 Re-establishing Link "
@@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba)
611 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 614 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
612 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 615 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
613 if (rc == MBX_NOT_FINISHED) 616 if (rc == MBX_NOT_FINISHED)
614 goto lpfc_handle_latt_free_mp; 617 goto lpfc_handle_latt_free_mbuf;
615 618
616 /* Clear Link Attention in HA REG */ 619 /* Clear Link Attention in HA REG */
617 spin_lock_irq(phba->host->host_lock); 620 spin_lock_irq(phba->host->host_lock);
@@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
621 624
622 return; 625 return;
623 626
627lpfc_handle_latt_free_mbuf:
628 lpfc_mbuf_free(phba, mp->virt, mp->phys);
624lpfc_handle_latt_free_mp: 629lpfc_handle_latt_free_mp:
625 kfree(mp); 630 kfree(mp);
626lpfc_handle_latt_free_pmb: 631lpfc_handle_latt_free_pmb:
@@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
802{ 807{
803 lpfc_vpd_t *vp; 808 lpfc_vpd_t *vp;
804 uint16_t dev_id = phba->pcidev->device; 809 uint16_t dev_id = phba->pcidev->device;
805 uint16_t dev_subid = phba->pcidev->subsystem_device;
806 uint8_t hdrtype;
807 int max_speed; 810 int max_speed;
808 char * ports;
809 struct { 811 struct {
810 char * name; 812 char * name;
811 int max_speed; 813 int max_speed;
812 char * ports;
813 char * bus; 814 char * bus;
814 } m = {"<Unknown>", 0, "", ""}; 815 } m = {"<Unknown>", 0, ""};
815 816
816 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
817 ports = (hdrtype == 0x80) ? "2-port " : "";
818 if (mdp && mdp[0] != '\0' 817 if (mdp && mdp[0] != '\0'
819 && descp && descp[0] != '\0') 818 && descp && descp[0] != '\0')
820 return; 819 return;
@@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
834 833
835 switch (dev_id) { 834 switch (dev_id) {
836 case PCI_DEVICE_ID_FIREFLY: 835 case PCI_DEVICE_ID_FIREFLY:
837 m = (typeof(m)){"LP6000", max_speed, "", "PCI"}; 836 m = (typeof(m)){"LP6000", max_speed, "PCI"};
838 break; 837 break;
839 case PCI_DEVICE_ID_SUPERFLY: 838 case PCI_DEVICE_ID_SUPERFLY:
840 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 839 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
841 m = (typeof(m)){"LP7000", max_speed, "", "PCI"}; 840 m = (typeof(m)){"LP7000", max_speed, "PCI"};
842 else 841 else
843 m = (typeof(m)){"LP7000E", max_speed, "", "PCI"}; 842 m = (typeof(m)){"LP7000E", max_speed, "PCI"};
844 break; 843 break;
845 case PCI_DEVICE_ID_DRAGONFLY: 844 case PCI_DEVICE_ID_DRAGONFLY:
846 m = (typeof(m)){"LP8000", max_speed, "", "PCI"}; 845 m = (typeof(m)){"LP8000", max_speed, "PCI"};
847 break; 846 break;
848 case PCI_DEVICE_ID_CENTAUR: 847 case PCI_DEVICE_ID_CENTAUR:
849 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 848 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
850 m = (typeof(m)){"LP9002", max_speed, "", "PCI"}; 849 m = (typeof(m)){"LP9002", max_speed, "PCI"};
851 else 850 else
852 m = (typeof(m)){"LP9000", max_speed, "", "PCI"}; 851 m = (typeof(m)){"LP9000", max_speed, "PCI"};
853 break; 852 break;
854 case PCI_DEVICE_ID_RFLY: 853 case PCI_DEVICE_ID_RFLY:
855 m = (typeof(m)){"LP952", max_speed, "", "PCI"}; 854 m = (typeof(m)){"LP952", max_speed, "PCI"};
856 break; 855 break;
857 case PCI_DEVICE_ID_PEGASUS: 856 case PCI_DEVICE_ID_PEGASUS:
858 m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"}; 857 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
859 break; 858 break;
860 case PCI_DEVICE_ID_THOR: 859 case PCI_DEVICE_ID_THOR:
861 if (hdrtype == 0x80) 860 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
862 m = (typeof(m)){"LP10000DC",
863 max_speed, ports, "PCI-X"};
864 else
865 m = (typeof(m)){"LP10000",
866 max_speed, ports, "PCI-X"};
867 break; 861 break;
868 case PCI_DEVICE_ID_VIPER: 862 case PCI_DEVICE_ID_VIPER:
869 m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"}; 863 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
870 break; 864 break;
871 case PCI_DEVICE_ID_PFLY: 865 case PCI_DEVICE_ID_PFLY:
872 m = (typeof(m)){"LP982", max_speed, "", "PCI-X"}; 866 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
873 break; 867 break;
874 case PCI_DEVICE_ID_TFLY: 868 case PCI_DEVICE_ID_TFLY:
875 if (hdrtype == 0x80) 869 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
876 m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
877 else
878 m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
879 break; 870 break;
880 case PCI_DEVICE_ID_HELIOS: 871 case PCI_DEVICE_ID_HELIOS:
881 if (hdrtype == 0x80) 872 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
882 m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
883 else
884 m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
885 break; 873 break;
886 case PCI_DEVICE_ID_HELIOS_SCSP: 874 case PCI_DEVICE_ID_HELIOS_SCSP:
887 m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"}; 875 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
888 break; 876 break;
889 case PCI_DEVICE_ID_HELIOS_DCSP: 877 case PCI_DEVICE_ID_HELIOS_DCSP:
890 m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"}; 878 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
891 break; 879 break;
892 case PCI_DEVICE_ID_NEPTUNE: 880 case PCI_DEVICE_ID_NEPTUNE:
893 if (hdrtype == 0x80) 881 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
894 m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
895 else
896 m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
897 break; 882 break;
898 case PCI_DEVICE_ID_NEPTUNE_SCSP: 883 case PCI_DEVICE_ID_NEPTUNE_SCSP:
899 m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"}; 884 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
900 break; 885 break;
901 case PCI_DEVICE_ID_NEPTUNE_DCSP: 886 case PCI_DEVICE_ID_NEPTUNE_DCSP:
902 m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"}; 887 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
903 break; 888 break;
904 case PCI_DEVICE_ID_BMID: 889 case PCI_DEVICE_ID_BMID:
905 m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"}; 890 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
906 break; 891 break;
907 case PCI_DEVICE_ID_BSMB: 892 case PCI_DEVICE_ID_BSMB:
908 m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"}; 893 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
909 break; 894 break;
910 case PCI_DEVICE_ID_ZEPHYR: 895 case PCI_DEVICE_ID_ZEPHYR:
911 if (hdrtype == 0x80) 896 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
912 m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
913 else
914 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
915 break; 897 break;
916 case PCI_DEVICE_ID_ZEPHYR_SCSP: 898 case PCI_DEVICE_ID_ZEPHYR_SCSP:
917 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"}; 899 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
918 break; 900 break;
919 case PCI_DEVICE_ID_ZEPHYR_DCSP: 901 case PCI_DEVICE_ID_ZEPHYR_DCSP:
920 m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"}; 902 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
921 break; 903 break;
922 case PCI_DEVICE_ID_ZMID: 904 case PCI_DEVICE_ID_ZMID:
923 m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"}; 905 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
924 break; 906 break;
925 case PCI_DEVICE_ID_ZSMB: 907 case PCI_DEVICE_ID_ZSMB:
926 m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"}; 908 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
927 break; 909 break;
928 case PCI_DEVICE_ID_LP101: 910 case PCI_DEVICE_ID_LP101:
929 m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"}; 911 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
930 break; 912 break;
931 case PCI_DEVICE_ID_LP10000S: 913 case PCI_DEVICE_ID_LP10000S:
932 m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"}; 914 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
933 break; 915 break;
934 case PCI_DEVICE_ID_LP11000S: 916 case PCI_DEVICE_ID_LP11000S:
917 m = (typeof(m)){"LP11000-S", max_speed,
918 "PCI-X2"};
919 break;
935 case PCI_DEVICE_ID_LPE11000S: 920 case PCI_DEVICE_ID_LPE11000S:
936 switch (dev_subid) { 921 m = (typeof(m)){"LPe11000-S", max_speed,
937 case PCI_SUBSYSTEM_ID_LP11000S: 922 "PCIe"};
938 m = (typeof(m)){"LP11000-S", max_speed,
939 ports, "PCI-X2"};
940 break;
941 case PCI_SUBSYSTEM_ID_LP11002S:
942 m = (typeof(m)){"LP11002-S", max_speed,
943 ports, "PCI-X2"};
944 break;
945 case PCI_SUBSYSTEM_ID_LPE11000S:
946 m = (typeof(m)){"LPe11000-S", max_speed,
947 ports, "PCIe"};
948 break;
949 case PCI_SUBSYSTEM_ID_LPE11002S:
950 m = (typeof(m)){"LPe11002-S", max_speed,
951 ports, "PCIe"};
952 break;
953 case PCI_SUBSYSTEM_ID_LPE11010S:
954 m = (typeof(m)){"LPe11010-S", max_speed,
955 "10-port ", "PCIe"};
956 break;
957 default:
958 m = (typeof(m)){ NULL };
959 break;
960 }
961 break; 923 break;
962 default: 924 default:
963 m = (typeof(m)){ NULL }; 925 m = (typeof(m)){ NULL };
@@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
968 snprintf(mdp, 79,"%s", m.name); 930 snprintf(mdp, 79,"%s", m.name);
969 if (descp && descp[0] == '\0') 931 if (descp && descp[0] == '\0')
970 snprintf(descp, 255, 932 snprintf(descp, 255,
971 "Emulex %s %dGb %s%s Fibre Channel Adapter", 933 "Emulex %s %dGb %s Fibre Channel Adapter",
972 m.name, m.max_speed, m.ports, m.bus); 934 m.name, m.max_speed, m.bus);
973} 935}
974 936
975/**************************************************/ 937/**************************************************/
@@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1651 if (error) 1613 if (error)
1652 goto out_remove_host; 1614 goto out_remove_host;
1653 1615
1616 if (phba->cfg_use_msi) {
1617 error = pci_enable_msi(phba->pcidev);
1618 if (error)
1619 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
1620 "Enable MSI failed, continuing with "
1621 "IRQ\n", phba->brd_no);
1622 }
1623
1654 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1624 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1655 LPFC_DRIVER_NAME, phba); 1625 LPFC_DRIVER_NAME, phba);
1656 if (error) { 1626 if (error) {
@@ -1730,6 +1700,7 @@ out_free_irq:
1730 lpfc_stop_timer(phba); 1700 lpfc_stop_timer(phba);
1731 phba->work_hba_events = 0; 1701 phba->work_hba_events = 0;
1732 free_irq(phba->pcidev->irq, phba); 1702 free_irq(phba->pcidev->irq, phba);
1703 pci_disable_msi(phba->pcidev);
1733out_free_sysfs_attr: 1704out_free_sysfs_attr:
1734 lpfc_free_sysfs_attr(phba); 1705 lpfc_free_sysfs_attr(phba);
1735out_remove_host: 1706out_remove_host:
@@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1796 1767
1797 /* Release the irq reservation */ 1768 /* Release the irq reservation */
1798 free_irq(phba->pcidev->irq, phba); 1769 free_irq(phba->pcidev->irq, phba);
1770 pci_disable_msi(phba->pcidev);
1799 1771
1800 lpfc_cleanup(phba, 0); 1772 lpfc_cleanup(phba, 0);
1801 lpfc_stop_timer(phba); 1773 lpfc_stop_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 62c8ca862e9e..438cbcd9eb13 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -28,7 +28,7 @@
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x80 /* Node table events */
29#define LOG_MISC 0x400 /* Miscellaneous events */ 29#define LOG_MISC 0x400 /* Miscellaneous events */
30#define LOG_SLI 0x800 /* SLI events */ 30#define LOG_SLI 0x800 /* SLI events */
31#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */ 31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
32#define LOG_LIBDFC 0x2000 /* Libdfc events */ 32#define LOG_LIBDFC 0x2000 /* Libdfc events */
33#define LOG_ALL_MSG 0xffff /* LOG all messages */ 33#define LOG_ALL_MSG 0xffff /* LOG all messages */
34 34
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d5f415007db2..0c7e731dc45a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
739 uint32_t evt) 739 uint32_t evt)
740{ 740{
741 struct lpfc_iocbq *cmdiocb, *rspiocb; 741 struct lpfc_iocbq *cmdiocb, *rspiocb;
742 struct lpfc_dmabuf *pcmd, *prsp; 742 struct lpfc_dmabuf *pcmd, *prsp, *mp;
743 uint32_t *lp; 743 uint32_t *lp;
744 IOCB_t *irsp; 744 IOCB_t *irsp;
745 struct serv_parm *sp; 745 struct serv_parm *sp;
@@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
829 NLP_REGLOGIN_LIST); 829 NLP_REGLOGIN_LIST);
830 return ndlp->nlp_state; 830 return ndlp->nlp_state;
831 } 831 }
832 mp = (struct lpfc_dmabuf *)mbox->context1;
833 lpfc_mbuf_free(phba, mp->virt, mp->phys);
834 kfree(mp);
832 mempool_free(mbox, phba->mbox_mem_pool); 835 mempool_free(mbox, phba->mbox_mem_pool);
833 } else { 836 } else {
834 mempool_free(mbox, phba->mbox_mem_pool); 837 mempool_free(mbox, phba->mbox_mem_pool);
@@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1620 * or discovery in progress for this node. Starting discovery 1623 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads. 1624 * here will affect the counting of discovery threads.
1622 */ 1625 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && 1626 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1627 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1628 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1629 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1630 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 97ae98dc95d0..c3e68e0d8f74 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; 297 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
298 uint32_t resp_info = fcprsp->rspStatus2; 298 uint32_t resp_info = fcprsp->rspStatus2;
299 uint32_t scsi_status = fcprsp->rspStatus3; 299 uint32_t scsi_status = fcprsp->rspStatus3;
300 uint32_t *lp;
300 uint32_t host_status = DID_OK; 301 uint32_t host_status = DID_OK;
301 uint32_t rsplen = 0; 302 uint32_t rsplen = 0;
303 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
302 304
303 /* 305 /*
304 * If this is a task management command, there is no 306 * If this is a task management command, there is no
@@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
310 goto out; 312 goto out;
311 } 313 }
312 314
313 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 315 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
314 "%d:0730 FCP command failed: RSP " 316 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
315 "Data: x%x x%x x%x x%x x%x x%x\n", 317 if (snslen > SCSI_SENSE_BUFFERSIZE)
316 phba->brd_no, resp_info, scsi_status, 318 snslen = SCSI_SENSE_BUFFERSIZE;
319
320 if (resp_info & RSP_LEN_VALID)
321 rsplen = be32_to_cpu(fcprsp->rspRspLen);
322 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
323 }
324 lp = (uint32_t *)cmnd->sense_buffer;
325
326 if (!scsi_status && (resp_info & RESID_UNDER))
327 logit = LOG_FCP;
328
329 lpfc_printf_log(phba, KERN_WARNING, logit,
330 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
331 "Data: x%x x%x x%x x%x x%x\n",
332 phba->brd_no, cmnd->cmnd[0], scsi_status,
333 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
317 be32_to_cpu(fcprsp->rspResId), 334 be32_to_cpu(fcprsp->rspResId),
318 be32_to_cpu(fcprsp->rspSnsLen), 335 be32_to_cpu(fcprsp->rspSnsLen),
319 be32_to_cpu(fcprsp->rspRspLen), 336 be32_to_cpu(fcprsp->rspRspLen),
@@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
328 } 345 }
329 } 346 }
330 347
331 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
332 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
333 if (snslen > SCSI_SENSE_BUFFERSIZE)
334 snslen = SCSI_SENSE_BUFFERSIZE;
335
336 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
337 }
338
339 cmnd->resid = 0; 348 cmnd->resid = 0;
340 if (resp_info & RESID_UNDER) { 349 if (resp_info & RESID_UNDER) {
341 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 350 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
@@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
378 */ 387 */
379 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 388 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
380 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 389 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
381 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 390 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
382 "%d:0734 FCP Read Check Error Data: " 391 "%d:0734 FCP Read Check Error Data: "
383 "x%x x%x x%x x%x\n", phba->brd_no, 392 "x%x x%x x%x x%x\n", phba->brd_no,
384 be32_to_cpu(fcpcmd->fcpDl), 393 be32_to_cpu(fcpcmd->fcpDl),
@@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
670 struct lpfc_iocbq *iocbqrsp; 679 struct lpfc_iocbq *iocbqrsp;
671 int ret; 680 int ret;
672 681
682 if (!rdata->pnode)
683 return FAILED;
684
673 lpfc_cmd->rdata = rdata; 685 lpfc_cmd->rdata = rdata;
674 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 686 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
675 FCP_TARGET_RESET); 687 FCP_TARGET_RESET);
@@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
976 988
977 lpfc_block_error_handler(cmnd); 989 lpfc_block_error_handler(cmnd);
978 spin_lock_irq(shost->host_lock); 990 spin_lock_irq(shost->host_lock);
991 loopcnt = 0;
979 /* 992 /*
980 * If target is not in a MAPPED state, delay the reset until 993 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or devloss timeout expires. 994 * target is rediscovered or devloss timeout expires.
982 */ 995 */
983 while ( 1 ) { 996 while ( 1 ) {
984 if (!pnode) 997 if (!pnode)
985 break; 998 return FAILED;
986 999
987 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1000 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
988 spin_unlock_irq(phba->host->host_lock); 1001 spin_unlock_irq(phba->host->host_lock);
989 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1002 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
990 spin_lock_irq(phba->host->host_lock); 1003 spin_lock_irq(phba->host->host_lock);
1004 loopcnt++;
1005 rdata = cmnd->device->hostdata;
1006 if (!rdata ||
1007 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1008 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1009 "%d:0721 LUN Reset rport failure:"
1010 " cnt x%x rdata x%p\n",
1011 phba->brd_no, loopcnt, rdata);
1012 goto out;
1013 }
1014 pnode = rdata->pnode;
1015 if (!pnode)
1016 return FAILED;
991 } 1017 }
992 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 1018 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
993 break; 1019 break;
994 } 1020 }
995 1021
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 582f5ea4e84e..a4128e19338a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
117 case CMD_FCP_IREAD_CX: 117 case CMD_FCP_IREAD_CX:
118 case CMD_FCP_ICMND_CR: 118 case CMD_FCP_ICMND_CR:
119 case CMD_FCP_ICMND_CX: 119 case CMD_FCP_ICMND_CX:
120 case CMD_FCP_TSEND_CX:
121 case CMD_FCP_TRSP_CX:
122 case CMD_FCP_TRECEIVE_CX:
123 case CMD_FCP_AUTO_TRSP_CX:
120 case CMD_ADAPTER_MSG: 124 case CMD_ADAPTER_MSG:
121 case CMD_ADAPTER_DUMP: 125 case CMD_ADAPTER_DUMP:
122 case CMD_XMIT_SEQUENCE64_CR: 126 case CMD_XMIT_SEQUENCE64_CR:
@@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
131 case CMD_FCP_IREAD64_CX: 135 case CMD_FCP_IREAD64_CX:
132 case CMD_FCP_ICMND64_CR: 136 case CMD_FCP_ICMND64_CR:
133 case CMD_FCP_ICMND64_CX: 137 case CMD_FCP_ICMND64_CX:
138 case CMD_FCP_TSEND64_CX:
139 case CMD_FCP_TRSP64_CX:
140 case CMD_FCP_TRECEIVE64_CX:
134 case CMD_GEN_REQUEST64_CR: 141 case CMD_GEN_REQUEST64_CR:
135 case CMD_GEN_REQUEST64_CX: 142 case CMD_GEN_REQUEST64_CX:
136 case CMD_XMIT_ELS_RSP64_CX: 143 case CMD_XMIT_ELS_RSP64_CX:
@@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1098 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1105 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1099 (uint32_t *) &rspiocbq.iocb, 1106 (uint32_t *) &rspiocbq.iocb,
1100 sizeof (IOCB_t)); 1107 sizeof (IOCB_t));
1108 INIT_LIST_HEAD(&(rspiocbq.list));
1101 irsp = &rspiocbq.iocb; 1109 irsp = &rspiocbq.iocb;
1102 1110
1103 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1111 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
@@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1149 } 1157 }
1150 } 1158 }
1151 break; 1159 break;
1160 case LPFC_UNSOL_IOCB:
1161 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1162 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1163 spin_lock_irqsave(phba->host->host_lock, iflag);
1164 break;
1152 default: 1165 default:
1153 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1166 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1154 char adaptermsg[LPFC_MAX_ADPTMSG]; 1167 char adaptermsg[LPFC_MAX_ADPTMSG];
@@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2472 psli = &phba->sli; 2485 psli = &phba->sli;
2473 2486
2474 /* Adjust cmd/rsp ring iocb entries more evenly */ 2487 /* Adjust cmd/rsp ring iocb entries more evenly */
2488
2489 /* Take some away from the FCP ring */
2475 pring = &psli->ring[psli->fcp_ring]; 2490 pring = &psli->ring[psli->fcp_ring];
2476 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2491 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2477 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2492 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2478 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2493 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2479 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2494 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2480 2495
2481 pring = &psli->ring[1]; 2496 /* and give them to the extra ring */
2497 pring = &psli->ring[psli->extra_ring];
2498
2482 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2499 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2483 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2500 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2484 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2501 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
@@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2488 pring->iotag_max = 4096; 2505 pring->iotag_max = 4096;
2489 pring->num_mask = 1; 2506 pring->num_mask = 1;
2490 pring->prt[0].profile = 0; /* Mask 0 */ 2507 pring->prt[0].profile = 0; /* Mask 0 */
2491 pring->prt[0].rctl = FC_UNSOL_DATA; 2508 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2492 pring->prt[0].type = 5; 2509 pring->prt[0].type = phba->cfg_multi_ring_type;
2493 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 2510 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2494 return 0; 2511 return 0;
2495} 2512}
@@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2505 psli->sli_flag = 0; 2522 psli->sli_flag = 0;
2506 psli->fcp_ring = LPFC_FCP_RING; 2523 psli->fcp_ring = LPFC_FCP_RING;
2507 psli->next_ring = LPFC_FCP_NEXT_RING; 2524 psli->next_ring = LPFC_FCP_NEXT_RING;
2508 psli->ip_ring = LPFC_IP_RING; 2525 psli->extra_ring = LPFC_EXTRA_RING;
2509 2526
2510 psli->iocbq_lookup = NULL; 2527 psli->iocbq_lookup = NULL;
2511 psli->iocbq_lookup_len = 0; 2528 psli->iocbq_lookup_len = 0;
@@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2528 pring->fast_iotag = pring->iotag_max; 2545 pring->fast_iotag = pring->iotag_max;
2529 pring->num_mask = 0; 2546 pring->num_mask = 0;
2530 break; 2547 break;
2531 case LPFC_IP_RING: /* ring 1 - IP */ 2548 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
2532 /* numCiocb and numRiocb are used in config_port */ 2549 /* numCiocb and numRiocb are used in config_port */
2533 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2550 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2534 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2551 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
@@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id)
3238 lpfc_sli_handle_fast_ring_event(phba, 3255 lpfc_sli_handle_fast_ring_event(phba,
3239 &phba->sli.ring[LPFC_FCP_RING], 3256 &phba->sli.ring[LPFC_FCP_RING],
3240 status); 3257 status);
3258
3259 if (phba->cfg_multi_ring_support == 2) {
3260 /*
3261 * Process all events on extra ring. Take the optimized path
3262 * for extra ring IO. Any other IO is slow path and is handled
3263 * by the worker thread.
3264 */
3265 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3266 status >>= (4*LPFC_EXTRA_RING);
3267 if (status & HA_RXATT) {
3268 lpfc_sli_handle_fast_ring_event(phba,
3269 &phba->sli.ring[LPFC_EXTRA_RING],
3270 status);
3271 }
3272 }
3241 return IRQ_HANDLED; 3273 return IRQ_HANDLED;
3242 3274
3243} /* lpfc_intr_handler */ 3275} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e26de6809358..a43549959dc7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -198,7 +198,7 @@ struct lpfc_sli {
198 int fcp_ring; /* ring used for FCP initiator commands */ 198 int fcp_ring; /* ring used for FCP initiator commands */
199 int next_ring; 199 int next_ring;
200 200
201 int ip_ring; /* ring used for IP network drv cmds */ 201 int extra_ring; /* extra ring used for other protocols */
202 202
203 struct lpfc_sli_stat slistat; /* SLI statistical info */ 203 struct lpfc_sli_stat slistat; /* SLI statistical info */
204 struct list_head mboxq; 204 struct list_head mboxq;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac417908b407..a61ef3d1e7f1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.10" 21#define LPFC_DRIVER_VERSION "8.1.11"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 86099fde1b2a..77d9d3804ccf 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
73module_param(max_mbox_busy_wait, ushort, 0); 73module_param(max_mbox_busy_wait, ushort, 0);
74MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); 74MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
75 75
76#define RDINDOOR(adapter) readl((adapter)->base + 0x20) 76#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
77#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C) 77#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
78#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20) 78#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
79#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C) 79#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
80 80
81/* 81/*
82 * Global variables 82 * Global variables
@@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp)
1386 1386
1387 handled = 1; 1387 handled = 1;
1388 1388
1389 while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); 1389 while( RDINDOOR(adapter) & 0x02 )
1390 cpu_relax();
1390 1391
1391 mega_cmd_done(adapter, completed, nstatus, status); 1392 mega_cmd_done(adapter, completed, nstatus, status);
1392 1393
@@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4668 host->host_no, mega_baseport, irq); 4669 host->host_no, mega_baseport, irq);
4669 4670
4670 adapter->base = mega_baseport; 4671 adapter->base = mega_baseport;
4672 if (flag & BOARD_MEMMAP)
4673 adapter->mmio_base = (void __iomem *) mega_baseport;
4671 4674
4672 INIT_LIST_HEAD(&adapter->free_list); 4675 INIT_LIST_HEAD(&adapter->free_list);
4673 INIT_LIST_HEAD(&adapter->pending_list); 4676 INIT_LIST_HEAD(&adapter->pending_list);
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 66529f11d23c..c6e74643abe2 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -801,7 +801,8 @@ typedef struct {
801 clustering is available */ 801 clustering is available */
802 u32 flag; 802 u32 flag;
803 803
804 unsigned long base; 804 unsigned long base;
805 void __iomem *mmio_base;
805 806
806 /* mbox64 with mbox not aligned on 16-byte boundry */ 807 /* mbox64 with mbox not aligned on 16-byte boundry */
807 mbox64_t *una_mbox64; 808 mbox64_t *una_mbox64;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7e4262f2af96..046223b4ae57 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
517 * Returns the number of frames required for numnber of sge's (sge_count) 517 * Returns the number of frames required for numnber of sge's (sge_count)
518 */ 518 */
519 519
520u32 megasas_get_frame_count(u8 sge_count) 520static u32 megasas_get_frame_count(u8 sge_count)
521{ 521{
522 int num_cnt; 522 int num_cnt;
523 int sge_bytes; 523 int sge_bytes;
@@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
1733 * 1733 *
1734 * Tasklet to complete cmds 1734 * Tasklet to complete cmds
1735 */ 1735 */
1736void megasas_complete_cmd_dpc(unsigned long instance_addr) 1736static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1737{ 1737{
1738 u32 producer; 1738 u32 producer;
1739 u32 consumer; 1739 u32 consumer;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index adb8eb4f5fd1..bbf521cbc55d 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
589static struct ncr_driver_setup 589static struct ncr_driver_setup
590 driver_setup = SCSI_NCR_DRIVER_SETUP; 590 driver_setup = SCSI_NCR_DRIVER_SETUP;
591 591
592#ifndef MODULE
592#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT 593#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
593static struct ncr_driver_setup 594static struct ncr_driver_setup
594 driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP; 595 driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
595#endif 596#endif
597#endif /* !MODULE */
596 598
597#define initverbose (driver_setup.verbose) 599#define initverbose (driver_setup.verbose)
598#define bootverbose (np->verbose) 600#define bootverbose (np->verbose)
@@ -641,6 +643,13 @@ static struct ncr_driver_setup
641#define OPT_IARB 26 643#define OPT_IARB 26
642#endif 644#endif
643 645
646#ifdef MODULE
647#define ARG_SEP ' '
648#else
649#define ARG_SEP ','
650#endif
651
652#ifndef MODULE
644static char setup_token[] __initdata = 653static char setup_token[] __initdata =
645 "tags:" "mpar:" 654 "tags:" "mpar:"
646 "spar:" "disc:" 655 "spar:" "disc:"
@@ -660,12 +669,6 @@ static char setup_token[] __initdata =
660#endif 669#endif
661 ; /* DONNOT REMOVE THIS ';' */ 670 ; /* DONNOT REMOVE THIS ';' */
662 671
663#ifdef MODULE
664#define ARG_SEP ' '
665#else
666#define ARG_SEP ','
667#endif
668
669static int __init get_setup_token(char *p) 672static int __init get_setup_token(char *p)
670{ 673{
671 char *cur = setup_token; 674 char *cur = setup_token;
@@ -682,7 +685,6 @@ static int __init get_setup_token(char *p)
682 return 0; 685 return 0;
683} 686}
684 687
685
686static int __init sym53c8xx__setup(char *str) 688static int __init sym53c8xx__setup(char *str)
687{ 689{
688#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT 690#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
@@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str)
804#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ 806#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
805 return 1; 807 return 1;
806} 808}
809#endif /* !MODULE */
807 810
808/*=================================================================== 811/*===================================================================
809** 812**
@@ -8321,12 +8324,12 @@ char *ncr53c8xx; /* command line passed by insmod */
8321module_param(ncr53c8xx, charp, 0); 8324module_param(ncr53c8xx, charp, 0);
8322#endif 8325#endif
8323 8326
8327#ifndef MODULE
8324static int __init ncr53c8xx_setup(char *str) 8328static int __init ncr53c8xx_setup(char *str)
8325{ 8329{
8326 return sym53c8xx__setup(str); 8330 return sym53c8xx__setup(str);
8327} 8331}
8328 8332
8329#ifndef MODULE
8330__setup("ncr53c8xx=", ncr53c8xx_setup); 8333__setup("ncr53c8xx=", ncr53c8xx_setup);
8331#endif 8334#endif
8332 8335
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index dd67a68c5c23..c116a6ae3c54 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -72,12 +72,12 @@ static void dma_advance_sg(Scsi_Cmnd *);
72static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x); 72static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
73 73
74#ifdef USE_BOTTOM_HALF 74#ifdef USE_BOTTOM_HALF
75static void dma_commit(void *opaque); 75static void dma_commit(struct work_struct *unused);
76 76
77long oktag_to_io(long *paddr, long *addr, long len); 77long oktag_to_io(long *paddr, long *addr, long len);
78long oktag_from_io(long *addr, long *paddr, long len); 78long oktag_from_io(long *addr, long *paddr, long len);
79 79
80static DECLARE_WORK(tq_fake_dma, dma_commit, NULL); 80static DECLARE_WORK(tq_fake_dma, dma_commit);
81 81
82#define DMA_MAXTRANSFER 0x8000 82#define DMA_MAXTRANSFER 0x8000
83 83
@@ -266,7 +266,7 @@ oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
266 */ 266 */
267 267
268 268
269static void dma_commit(void *opaque) 269static void dma_commit(struct work_struct *unused)
270{ 270{
271 long wait,len2,pos; 271 long wait,len2,pos;
272 struct NCR_ESP *esp; 272 struct NCR_ESP *esp;
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index ee449b29fc82..aad362ba02e0 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -154,16 +154,11 @@ static int aha152x_config_cs(struct pcmcia_device *link)
154 154
155 DEBUG(0, "aha152x_config(0x%p)\n", link); 155 DEBUG(0, "aha152x_config(0x%p)\n", link);
156 156
157 tuple.DesiredTuple = CISTPL_CONFIG;
158 tuple.TupleData = tuple_data; 157 tuple.TupleData = tuple_data;
159 tuple.TupleDataMax = 64; 158 tuple.TupleDataMax = 64;
160 tuple.TupleOffset = 0; 159 tuple.TupleOffset = 0;
161 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
162 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
163 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
164 link->conf.ConfigBase = parse.config.base;
165
166 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 160 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
161 tuple.Attributes = 0;
167 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 162 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
168 while (1) { 163 while (1) {
169 if (pcmcia_get_tuple_data(link, &tuple) != 0 || 164 if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 85f7ffac19a0..a1c5f265069f 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -136,14 +136,9 @@ static int fdomain_config(struct pcmcia_device *link)
136 136
137 DEBUG(0, "fdomain_config(0x%p)\n", link); 137 DEBUG(0, "fdomain_config(0x%p)\n", link);
138 138
139 tuple.DesiredTuple = CISTPL_CONFIG;
140 tuple.TupleData = tuple_data; 139 tuple.TupleData = tuple_data;
141 tuple.TupleDataMax = 64; 140 tuple.TupleDataMax = 64;
142 tuple.TupleOffset = 0; 141 tuple.TupleOffset = 0;
143 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
144 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
145 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
146 link->conf.ConfigBase = parse.config.base;
147 142
148 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 143 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
149 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 144 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index f2d79c3f0b8e..d72df5dae4ee 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1685,16 +1685,10 @@ static int nsp_cs_config(struct pcmcia_device *link)
1685 1685
1686 nsp_dbg(NSP_DEBUG_INIT, "in"); 1686 nsp_dbg(NSP_DEBUG_INIT, "in");
1687 1687
1688 tuple.DesiredTuple = CISTPL_CONFIG;
1689 tuple.Attributes = 0; 1688 tuple.Attributes = 0;
1690 tuple.TupleData = tuple_data; 1689 tuple.TupleData = tuple_data;
1691 tuple.TupleDataMax = sizeof(tuple_data); 1690 tuple.TupleDataMax = sizeof(tuple_data);
1692 tuple.TupleOffset = 0; 1691 tuple.TupleOffset = 0;
1693 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
1694 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
1695 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
1696 link->conf.ConfigBase = parse.config.base;
1697 link->conf.Present = parse.config.rmask[0];
1698 1692
1699 /* Look up the current Vcc */ 1693 /* Look up the current Vcc */
1700 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf)); 1694 CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 86c2ac6ae623..9d431fe7f47f 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -208,18 +208,11 @@ static int qlogic_config(struct pcmcia_device * link)
208 208
209 DEBUG(0, "qlogic_config(0x%p)\n", link); 209 DEBUG(0, "qlogic_config(0x%p)\n", link);
210 210
211 info->manf_id = link->manf_id;
212
211 tuple.TupleData = (cisdata_t *) tuple_data; 213 tuple.TupleData = (cisdata_t *) tuple_data;
212 tuple.TupleDataMax = 64; 214 tuple.TupleDataMax = 64;
213 tuple.TupleOffset = 0; 215 tuple.TupleOffset = 0;
214 tuple.DesiredTuple = CISTPL_CONFIG;
215 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
216 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
217 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
218 link->conf.ConfigBase = parse.config.base;
219
220 tuple.DesiredTuple = CISTPL_MANFID;
221 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
222 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
223 216
224 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 217 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
225 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 218 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 72fe5d055de1..fb7acea60286 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -722,19 +722,11 @@ SYM53C500_config(struct pcmcia_device *link)
722 722
723 DEBUG(0, "SYM53C500_config(0x%p)\n", link); 723 DEBUG(0, "SYM53C500_config(0x%p)\n", link);
724 724
725 info->manf_id = link->manf_id;
726
725 tuple.TupleData = (cisdata_t *)tuple_data; 727 tuple.TupleData = (cisdata_t *)tuple_data;
726 tuple.TupleDataMax = 64; 728 tuple.TupleDataMax = 64;
727 tuple.TupleOffset = 0; 729 tuple.TupleOffset = 0;
728 tuple.DesiredTuple = CISTPL_CONFIG;
729 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
730 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
731 CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
732 link->conf.ConfigBase = parse.config.base;
733
734 tuple.DesiredTuple = CISTPL_MANFID;
735 if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
736 (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
737 info->manf_id = le16_to_cpu(tuple.TupleData[0]);
738 730
739 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 731 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
740 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 732 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f11e41..584ba4d6e038 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@ typedef struct {
31 int base; /* Actual port address */ 31 int base; /* Actual port address */
32 int mode; /* Transfer mode */ 32 int mode; /* Transfer mode */
33 struct scsi_cmnd *cur_cmd; /* Current queued command */ 33 struct scsi_cmnd *cur_cmd; /* Current queued command */
34 struct work_struct ppa_tq; /* Polling interrupt stuff */ 34 struct delayed_work ppa_tq; /* Polling interrupt stuff */
35 unsigned long jstart; /* Jiffies at start */ 35 unsigned long jstart; /* Jiffies at start */
36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ 36 unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
37 unsigned int failed:1; /* Failure flag */ 37 unsigned int failed:1; /* Failure flag */
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd)
627 * the scheduler's task queue to generate a stream of call-backs and 627 * the scheduler's task queue to generate a stream of call-backs and
628 * complete the request when the drive is ready. 628 * complete the request when the drive is ready.
629 */ 629 */
630static void ppa_interrupt(void *data) 630static void ppa_interrupt(struct work_struct *work)
631{ 631{
632 ppa_struct *dev = (ppa_struct *) data; 632 ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
633 struct scsi_cmnd *cmd = dev->cur_cmd; 633 struct scsi_cmnd *cmd = dev->cur_cmd;
634 634
635 if (!cmd) { 635 if (!cmd) {
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data)
637 return; 637 return;
638 } 638 }
639 if (ppa_engine(dev, cmd)) { 639 if (ppa_engine(dev, cmd)) {
640 dev->ppa_tq.data = (void *) dev;
641 schedule_delayed_work(&dev->ppa_tq, 1); 640 schedule_delayed_work(&dev->ppa_tq, 1);
642 return; 641 return;
643 } 642 }
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
822 cmd->result = DID_ERROR << 16; /* default return code */ 821 cmd->result = DID_ERROR << 16; /* default return code */
823 cmd->SCp.phase = 0; /* bus free */ 822 cmd->SCp.phase = 0; /* bus free */
824 823
825 dev->ppa_tq.data = dev; 824 schedule_delayed_work(&dev->ppa_tq, 0);
826 schedule_work(&dev->ppa_tq);
827 825
828 ppa_pb_claim(dev); 826 ppa_pb_claim(dev);
829 827
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb)
1086 else 1084 else
1087 ports = 8; 1085 ports = 8;
1088 1086
1089 INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); 1087 INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
1090 1088
1091 err = -ENOMEM; 1089 err = -ENOMEM;
1092 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); 1090 host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 285c8e8ff1a0..7b18a6c7b7eb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -390,7 +390,7 @@ static struct sysfs_entry {
390 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 390 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
391 { "vpd", &sysfs_vpd_attr, 1 }, 391 { "vpd", &sysfs_vpd_attr, 1 },
392 { "sfp", &sysfs_sfp_attr, 1 }, 392 { "sfp", &sysfs_sfp_attr, 1 },
393 { 0 }, 393 { NULL },
394}; 394};
395 395
396void 396void
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 08cb5e3fb553..a823f0bc519d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,9 +59,6 @@ int
59qla2x00_initialize_adapter(scsi_qla_host_t *ha) 59qla2x00_initialize_adapter(scsi_qla_host_t *ha)
60{ 60{
61 int rval; 61 int rval;
62 uint8_t restart_risc = 0;
63 uint8_t retry;
64 uint32_t wait_time;
65 62
66 /* Clear adapter flags. */ 63 /* Clear adapter flags. */
67 ha->flags.online = 0; 64 ha->flags.online = 0;
@@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
104 101
105 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 102 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
106 103
107 retry = 10; 104 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
108 /* 105 rval = ha->isp_ops.chip_diag(ha);
109 * Try to configure the loop. 106 if (rval)
110 */ 107 return (rval);
111 do { 108 rval = qla2x00_setup_chip(ha);
112 restart_risc = 0; 109 if (rval)
113 110 return (rval);
114 /* If firmware needs to be loaded */
115 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
116 if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
117 rval = qla2x00_setup_chip(ha);
118 }
119 }
120
121 if (rval == QLA_SUCCESS &&
122 (rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
123check_fw_ready_again:
124 /*
125 * Wait for a successful LIP up to a maximum
126 * of (in seconds): RISC login timeout value,
127 * RISC retry count value, and port down retry
128 * value OR a minimum of 4 seconds OR If no
129 * cable, only 5 seconds.
130 */
131 rval = qla2x00_fw_ready(ha);
132 if (rval == QLA_SUCCESS) {
133 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
134
135 /* Issue a marker after FW becomes ready. */
136 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
137
138 /*
139 * Wait at most MAX_TARGET RSCNs for a stable
140 * link.
141 */
142 wait_time = 256;
143 do {
144 clear_bit(LOOP_RESYNC_NEEDED,
145 &ha->dpc_flags);
146 rval = qla2x00_configure_loop(ha);
147
148 if (test_and_clear_bit(ISP_ABORT_NEEDED,
149 &ha->dpc_flags)) {
150 restart_risc = 1;
151 break;
152 }
153
154 /*
155 * If loop state change while we were
156 * discoverying devices then wait for
157 * LIP to complete
158 */
159
160 if (atomic_read(&ha->loop_state) !=
161 LOOP_READY && retry--) {
162 goto check_fw_ready_again;
163 }
164 wait_time--;
165 } while (!atomic_read(&ha->loop_down_timer) &&
166 retry &&
167 wait_time &&
168 (test_bit(LOOP_RESYNC_NEEDED,
169 &ha->dpc_flags)));
170
171 if (wait_time == 0)
172 rval = QLA_FUNCTION_FAILED;
173 } else if (ha->device_flags & DFLG_NO_CABLE)
174 /* If no cable, then all is good. */
175 rval = QLA_SUCCESS;
176 }
177 } while (restart_risc && retry--);
178
179 if (rval == QLA_SUCCESS) {
180 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
181 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
182 ha->marker_needed = 0;
183
184 ha->flags.online = 1;
185 } else {
186 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
187 } 111 }
112 rval = qla2x00_init_rings(ha);
188 113
189 return (rval); 114 return (rval);
190} 115}
@@ -2208,8 +2133,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2208 2133
2209 atomic_set(&fcport->state, FCS_ONLINE); 2134 atomic_set(&fcport->state, FCS_ONLINE);
2210 2135
2211 if (ha->flags.init_done) 2136 qla2x00_reg_remote_port(ha, fcport);
2212 qla2x00_reg_remote_port(ha, fcport);
2213} 2137}
2214 2138
2215void 2139void
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 208607be78c7..cbe0cad83b68 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup,
95 */ 95 */
96static int qla2xxx_slave_configure(struct scsi_device * device); 96static int qla2xxx_slave_configure(struct scsi_device * device);
97static int qla2xxx_slave_alloc(struct scsi_device *); 97static int qla2xxx_slave_alloc(struct scsi_device *);
98static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
99static void qla2xxx_scan_start(struct Scsi_Host *);
98static void qla2xxx_slave_destroy(struct scsi_device *); 100static void qla2xxx_slave_destroy(struct scsi_device *);
99static int qla2x00_queuecommand(struct scsi_cmnd *cmd, 101static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
100 void (*fn)(struct scsi_cmnd *)); 102 void (*fn)(struct scsi_cmnd *));
@@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = {
124 126
125 .slave_alloc = qla2xxx_slave_alloc, 127 .slave_alloc = qla2xxx_slave_alloc,
126 .slave_destroy = qla2xxx_slave_destroy, 128 .slave_destroy = qla2xxx_slave_destroy,
129 .scan_finished = qla2xxx_scan_finished,
130 .scan_start = qla2xxx_scan_start,
127 .change_queue_depth = qla2x00_change_queue_depth, 131 .change_queue_depth = qla2x00_change_queue_depth,
128 .change_queue_type = qla2x00_change_queue_type, 132 .change_queue_type = qla2x00_change_queue_type,
129 .this_id = -1, 133 .this_id = -1,
@@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
287 return str; 291 return str;
288} 292}
289 293
290char * 294static char *
291qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 295qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
292{ 296{
293 char un_str[10]; 297 char un_str[10];
@@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
325 return (str); 329 return (str);
326} 330}
327 331
328char * 332static char *
329qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 333qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
330{ 334{
331 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 335 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
634* Note: 638* Note:
635* Only return FAILED if command not returned by firmware. 639* Only return FAILED if command not returned by firmware.
636**************************************************************************/ 640**************************************************************************/
637int 641static int
638qla2xxx_eh_abort(struct scsi_cmnd *cmd) 642qla2xxx_eh_abort(struct scsi_cmnd *cmd)
639{ 643{
640 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 644 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
771* SUCCESS/FAILURE (defined as macro in scsi.h). 775* SUCCESS/FAILURE (defined as macro in scsi.h).
772* 776*
773**************************************************************************/ 777**************************************************************************/
774int 778static int
775qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 779qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
776{ 780{
777 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 781 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
902* SUCCESS/FAILURE (defined as macro in scsi.h). 906* SUCCESS/FAILURE (defined as macro in scsi.h).
903* 907*
904**************************************************************************/ 908**************************************************************************/
905int 909static int
906qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 910qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
907{ 911{
908 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 912 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -963,7 +967,7 @@ eh_bus_reset_done:
963* 967*
964* Note: 968* Note:
965**************************************************************************/ 969**************************************************************************/
966int 970static int
967qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 971qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
968{ 972{
969 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 973 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
1366 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1370 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1367} 1371}
1368 1372
1373static void
1374qla2xxx_scan_start(struct Scsi_Host *shost)
1375{
1376 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
1377
1378 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1379 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1380 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1381}
1382
1383static int
1384qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1385{
1386 scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
1387
1388 if (!ha->host)
1389 return 1;
1390 if (time > ha->loop_reset_delay * HZ)
1391 return 1;
1392
1393 return atomic_read(&ha->loop_state) == LOOP_READY;
1394}
1395
1369/* 1396/*
1370 * PCI driver interface 1397 * PCI driver interface
1371 */ 1398 */
@@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1377 struct Scsi_Host *host; 1404 struct Scsi_Host *host;
1378 scsi_qla_host_t *ha; 1405 scsi_qla_host_t *ha;
1379 unsigned long flags = 0; 1406 unsigned long flags = 0;
1380 unsigned long wait_switch = 0;
1381 char pci_info[20]; 1407 char pci_info[20];
1382 char fw_str[30]; 1408 char fw_str[30];
1383 fc_port_t *fcport;
1384 struct scsi_host_template *sht; 1409 struct scsi_host_template *sht;
1385 1410
1386 if (pci_enable_device(pdev)) 1411 if (pci_enable_device(pdev))
@@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1631 1656
1632 ha->isp_ops.enable_intrs(ha); 1657 ha->isp_ops.enable_intrs(ha);
1633 1658
1634 /* v2.19.5b6 */
1635 /*
1636 * Wait around max loop_reset_delay secs for the devices to come
1637 * on-line. We don't want Linux scanning before we are ready.
1638 *
1639 */
1640 for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
1641 time_before(jiffies,wait_switch) &&
1642 !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
1643 && (ha->device_flags & SWITCH_FOUND) ;) {
1644
1645 qla2x00_check_fabric_devices(ha);
1646
1647 msleep(10);
1648 }
1649
1650 pci_set_drvdata(pdev, ha); 1659 pci_set_drvdata(pdev, ha);
1660
1651 ha->flags.init_done = 1; 1661 ha->flags.init_done = 1;
1662 ha->flags.online = 1;
1663
1652 num_hosts++; 1664 num_hosts++;
1653 1665
1654 ret = scsi_add_host(host, &pdev->dev); 1666 ret = scsi_add_host(host, &pdev->dev);
1655 if (ret) 1667 if (ret)
1656 goto probe_failed; 1668 goto probe_failed;
1657 1669
1670 scsi_scan_host(host);
1671
1658 qla2x00_alloc_sysfs_attr(ha); 1672 qla2x00_alloc_sysfs_attr(ha);
1659 1673
1660 qla2x00_init_host_attr(ha); 1674 qla2x00_init_host_attr(ha);
@@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1669 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1683 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
1670 ha->isp_ops.fw_version_str(ha, fw_str)); 1684 ha->isp_ops.fw_version_str(ha, fw_str));
1671 1685
1672 /* Go with fc_rport registration. */
1673 list_for_each_entry(fcport, &ha->fcports, list)
1674 qla2x00_reg_remote_port(ha, fcport);
1675
1676 return 0; 1686 return 0;
1677 1687
1678probe_failed: 1688probe_failed:
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c71dbd5bd543..15390ad87456 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
449 return FARX_ACCESS_NVRAM_DATA | naddr; 449 return FARX_ACCESS_NVRAM_DATA | naddr;
450} 450}
451 451
452uint32_t 452static uint32_t
453qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) 453qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
454{ 454{
455 int rval; 455 int rval;
@@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
490 return dwptr; 490 return dwptr;
491} 491}
492 492
493int 493static int
494qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) 494qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
495{ 495{
496 int rval; 496 int rval;
@@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
512 return rval; 512 return rval;
513} 513}
514 514
515void 515static void
516qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 516qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
517 uint8_t *flash_id) 517 uint8_t *flash_id)
518{ 518{
@@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
537 } 537 }
538} 538}
539 539
540int 540static int
541qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 541qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
542 uint32_t dwords) 542 uint32_t dwords)
543{ 543{
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 752031fadfef..7b4e077a39c1 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha)
71 readw(&ha->reg->u1.isp4010.nvram)); 71 readw(&ha->reg->u1.isp4010.nvram));
72 } 72 }
73 73
74 else if (is_qla4022(ha)) { 74 else if (is_qla4022(ha) | is_qla4032(ha)) {
75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", 75 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
76 (uint8_t) offsetof(struct isp_reg, 76 (uint8_t) offsetof(struct isp_reg,
77 u1.isp4022.intr_mask), 77 u1.isp4022.intr_mask),
@@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha)
119 readw(&ha->reg->u2.isp4010.port_err_status)); 119 readw(&ha->reg->u2.isp4010.port_err_status));
120 } 120 }
121 121
122 else if (is_qla4022(ha)) { 122 else if (is_qla4022(ha) | is_qla4032(ha)) {
123 printk(KERN_INFO "Page 0 Registers:\n"); 123 printk(KERN_INFO "Page 0 Registers:\n");
124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", 124 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
125 (uint8_t) offsetof(struct isp_reg, 125 (uint8_t) offsetof(struct isp_reg,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7f6c7b1c590..4249e52a5592 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -40,7 +40,11 @@
40 40
41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022 41#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022 42#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
43#endif /* */ 43#endif
44
45#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
46#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
47#endif
44 48
45#define QLA_SUCCESS 0 49#define QLA_SUCCESS 0
46#define QLA_ERROR 1 50#define QLA_ERROR 1
@@ -277,7 +281,6 @@ struct scsi_qla_host {
277#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */ 281#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
278#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 282#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
279#define AF_LINK_UP 8 /* 0x00000100 */ 283#define AF_LINK_UP 8 /* 0x00000100 */
280#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
281#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 284#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
282#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */ 285#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
283#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */ 286#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
@@ -317,16 +320,17 @@ struct scsi_qla_host {
317 /* NVRAM registers */ 320 /* NVRAM registers */
318 struct eeprom_data *nvram; 321 struct eeprom_data *nvram;
319 spinlock_t hardware_lock ____cacheline_aligned; 322 spinlock_t hardware_lock ____cacheline_aligned;
320 spinlock_t list_lock;
321 uint32_t eeprom_cmd_data; 323 uint32_t eeprom_cmd_data;
322 324
323 /* Counters for general statistics */ 325 /* Counters for general statistics */
326 uint64_t isr_count;
324 uint64_t adapter_error_count; 327 uint64_t adapter_error_count;
325 uint64_t device_error_count; 328 uint64_t device_error_count;
326 uint64_t total_io_count; 329 uint64_t total_io_count;
327 uint64_t total_mbytes_xferred; 330 uint64_t total_mbytes_xferred;
328 uint64_t link_failure_count; 331 uint64_t link_failure_count;
329 uint64_t invalid_crc_count; 332 uint64_t invalid_crc_count;
333 uint32_t bytes_xfered;
330 uint32_t spurious_int_count; 334 uint32_t spurious_int_count;
331 uint32_t aborted_io_count; 335 uint32_t aborted_io_count;
332 uint32_t io_timeout_count; 336 uint32_t io_timeout_count;
@@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha)
438 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022; 442 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
439} 443}
440 444
445static inline int is_qla4032(struct scsi_qla_host *ha)
446{
447 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
448}
449
441static inline int adapter_up(struct scsi_qla_host *ha) 450static inline int adapter_up(struct scsi_qla_host *ha)
442{ 451{
443 return (test_bit(AF_ONLINE, &ha->flags) != 0) && 452 return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
@@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
451 460
452static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) 461static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
453{ 462{
454 return (is_qla4022(ha) ? 463 return (is_qla4010(ha) ?
455 &ha->reg->u1.isp4022.semaphore : 464 &ha->reg->u1.isp4010.nvram :
456 &ha->reg->u1.isp4010.nvram); 465 &ha->reg->u1.isp4022.semaphore);
457} 466}
458 467
459static inline void __iomem* isp_nvram(struct scsi_qla_host *ha) 468static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
460{ 469{
461 return (is_qla4022(ha) ? 470 return (is_qla4010(ha) ?
462 &ha->reg->u1.isp4022.nvram : 471 &ha->reg->u1.isp4010.nvram :
463 &ha->reg->u1.isp4010.nvram); 472 &ha->reg->u1.isp4022.nvram);
464} 473}
465 474
466static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha) 475static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
467{ 476{
468 return (is_qla4022(ha) ? 477 return (is_qla4010(ha) ?
469 &ha->reg->u2.isp4022.p0.ext_hw_conf : 478 &ha->reg->u2.isp4010.ext_hw_conf :
470 &ha->reg->u2.isp4010.ext_hw_conf); 479 &ha->reg->u2.isp4022.p0.ext_hw_conf);
471} 480}
472 481
473static inline void __iomem* isp_port_status(struct scsi_qla_host *ha) 482static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
474{ 483{
475 return (is_qla4022(ha) ? 484 return (is_qla4010(ha) ?
476 &ha->reg->u2.isp4022.p0.port_status : 485 &ha->reg->u2.isp4010.port_status :
477 &ha->reg->u2.isp4010.port_status); 486 &ha->reg->u2.isp4022.p0.port_status);
478} 487}
479 488
480static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha) 489static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
481{ 490{
482 return (is_qla4022(ha) ? 491 return (is_qla4010(ha) ?
483 &ha->reg->u2.isp4022.p0.port_ctrl : 492 &ha->reg->u2.isp4010.port_ctrl :
484 &ha->reg->u2.isp4010.port_ctrl); 493 &ha->reg->u2.isp4022.p0.port_ctrl);
485} 494}
486 495
487static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha) 496static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
488{ 497{
489 return (is_qla4022(ha) ? 498 return (is_qla4010(ha) ?
490 &ha->reg->u2.isp4022.p0.port_err_status : 499 &ha->reg->u2.isp4010.port_err_status :
491 &ha->reg->u2.isp4010.port_err_status); 500 &ha->reg->u2.isp4022.p0.port_err_status);
492} 501}
493 502
494static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha) 503static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
495{ 504{
496 return (is_qla4022(ha) ? 505 return (is_qla4010(ha) ?
497 &ha->reg->u2.isp4022.p0.gp_out : 506 &ha->reg->u2.isp4010.gp_out :
498 &ha->reg->u2.isp4010.gp_out); 507 &ha->reg->u2.isp4022.p0.gp_out);
499} 508}
500 509
501static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha) 510static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
502{ 511{
503 return (is_qla4022(ha) ? 512 return (is_qla4010(ha) ?
504 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 : 513 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
505 offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2); 514 offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
506} 515}
507 516
508int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); 517int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
@@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
511 520
512static inline int ql4xxx_lock_flash(struct scsi_qla_host *a) 521static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
513{ 522{
514 if (is_qla4022(a)) 523 if (is_qla4010(a))
524 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
525 QL4010_FLASH_SEM_BITS);
526 else
515 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK, 527 return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
516 (QL4022_RESOURCE_BITS_BASE_CODE | 528 (QL4022_RESOURCE_BITS_BASE_CODE |
517 (a->mac_index)) << 13); 529 (a->mac_index)) << 13);
518 else
519 return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
520 QL4010_FLASH_SEM_BITS);
521} 530}
522 531
523static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a) 532static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
524{ 533{
525 if (is_qla4022(a)) 534 if (is_qla4010(a))
526 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
527 else
528 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK); 535 ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
536 else
537 ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
529} 538}
530 539
531static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a) 540static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
532{ 541{
533 if (is_qla4022(a)) 542 if (is_qla4010(a))
543 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
544 QL4010_NVRAM_SEM_BITS);
545 else
534 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK, 546 return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
535 (QL4022_RESOURCE_BITS_BASE_CODE | 547 (QL4022_RESOURCE_BITS_BASE_CODE |
536 (a->mac_index)) << 10); 548 (a->mac_index)) << 10);
537 else
538 return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
539 QL4010_NVRAM_SEM_BITS);
540} 549}
541 550
542static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a) 551static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
543{ 552{
544 if (is_qla4022(a)) 553 if (is_qla4010(a))
545 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
546 else
547 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK); 554 ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
555 else
556 ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
548} 557}
549 558
550static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a) 559static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
551{ 560{
552 if (is_qla4022(a)) 561 if (is_qla4010(a))
562 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
563 QL4010_DRVR_SEM_BITS);
564 else
553 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK, 565 return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
554 (QL4022_RESOURCE_BITS_BASE_CODE | 566 (QL4022_RESOURCE_BITS_BASE_CODE |
555 (a->mac_index)) << 1); 567 (a->mac_index)) << 1);
556 else
557 return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
558 QL4010_DRVR_SEM_BITS);
559} 568}
560 569
561static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) 570static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
562{ 571{
563 if (is_qla4022(a)) 572 if (is_qla4010(a))
564 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
565 else
566 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK); 573 ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
574 else
575 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
567} 576}
568 577
569/*---------------------------------------------------------------------------*/ 578/*---------------------------------------------------------------------------*/
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 427489de64bc..4eea8c571916 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val)
296/* ISP Semaphore definitions */ 296/* ISP Semaphore definitions */
297 297
298/* ISP General Purpose Output definitions */ 298/* ISP General Purpose Output definitions */
299#define GPOR_TOPCAT_RESET 0x00000004
300 299
301/* shadow registers (DMA'd from HA to system memory. read only) */ 300/* shadow registers (DMA'd from HA to system memory. read only) */
302struct shadow_regs { 301struct shadow_regs {
@@ -339,10 +338,13 @@ union external_hw_config_reg {
339/* Mailbox command definitions */ 338/* Mailbox command definitions */
340#define MBOX_CMD_ABOUT_FW 0x0009 339#define MBOX_CMD_ABOUT_FW 0x0009
341#define MBOX_CMD_LUN_RESET 0x0016 340#define MBOX_CMD_LUN_RESET 0x0016
341#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
342#define MBOX_CMD_GET_FW_STATUS 0x001F 342#define MBOX_CMD_GET_FW_STATUS 0x001F
343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021 343#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
344#define ISNS_DISABLE 0 344#define ISNS_DISABLE 0
345#define ISNS_ENABLE 1 345#define ISNS_ENABLE 1
346#define MBOX_CMD_COPY_FLASH 0x0024
347#define MBOX_CMD_WRITE_FLASH 0x0025
346#define MBOX_CMD_READ_FLASH 0x0026 348#define MBOX_CMD_READ_FLASH 0x0026
347#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 349#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
348#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 350#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
@@ -360,10 +362,13 @@ union external_hw_config_reg {
360#define DDB_DS_SESSION_FAILED 0x06 362#define DDB_DS_SESSION_FAILED 0x06
361#define DDB_DS_LOGIN_IN_PROCESS 0x07 363#define DDB_DS_LOGIN_IN_PROCESS 0x07
362#define MBOX_CMD_GET_FW_STATE 0x0069 364#define MBOX_CMD_GET_FW_STATE 0x0069
365#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
366#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
363 367
364/* Mailbox 1 */ 368/* Mailbox 1 */
365#define FW_STATE_READY 0x0000 369#define FW_STATE_READY 0x0000
366#define FW_STATE_CONFIG_WAIT 0x0001 370#define FW_STATE_CONFIG_WAIT 0x0001
371#define FW_STATE_WAIT_LOGIN 0x0002
367#define FW_STATE_ERROR 0x0004 372#define FW_STATE_ERROR 0x0004
368#define FW_STATE_DHCP_IN_PROGRESS 0x0008 373#define FW_STATE_DHCP_IN_PROGRESS 0x0008
369 374
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 1b221ff0f6f7..2122967bbf0b 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,7 @@
8#ifndef __QLA4x_GBL_H 8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H 9#define __QLA4x_GBL_H
10 10
11int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
11int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); 12int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
12int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); 13int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
13int qla4xxx_initialize_adapter(struct scsi_qla_host * ha, 14int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
@@ -75,4 +76,4 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
75extern int ql4xextended_error_logging; 76extern int ql4xextended_error_logging;
76extern int ql4xdiscoverywait; 77extern int ql4xdiscoverywait;
77extern int ql4xdontresethba; 78extern int ql4xdontresethba;
78#endif /* _QLA4x_GBL_H */ 79#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index bb3a1c11f44c..cc210f297a78 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -259,10 +259,16 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
259 "seconds expired= %d\n", ha->host_no, __func__, 259 "seconds expired= %d\n", ha->host_no, __func__,
260 ha->firmware_state, ha->addl_fw_state, 260 ha->firmware_state, ha->addl_fw_state,
261 timeout_count)); 261 timeout_count));
262 if (is_qla4032(ha) &&
263 !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
264 (timeout_count < ADAPTER_INIT_TOV - 5)) {
265 break;
266 }
267
262 msleep(1000); 268 msleep(1000);
263 } /* end of for */ 269 } /* end of for */
264 270
265 if (timeout_count <= 0) 271 if (timeout_count == 0)
266 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", 272 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
267 ha->host_no, __func__)); 273 ha->host_no, __func__));
268 274
@@ -806,32 +812,6 @@ int qla4xxx_relogin_device(struct scsi_qla_host *ha,
806 return QLA_SUCCESS; 812 return QLA_SUCCESS;
807} 813}
808 814
809/**
810 * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
811 * @ha: Pointer to host adapter structure.
812 *
813 **/
814static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
815{
816 unsigned long flags;
817 uint16_t topcat;
818
819 if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
820 return QLA_ERROR;
821 spin_lock_irqsave(&ha->hardware_lock, flags);
822 topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
823 isp4010.topcat));
824 spin_unlock_irqrestore(&ha->hardware_lock, flags);
825
826 if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
827 set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
828 else
829 clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
830 ql4xxx_unlock_nvram(ha);
831 return QLA_SUCCESS;
832}
833
834
835static int qla4xxx_config_nvram(struct scsi_qla_host *ha) 815static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
836{ 816{
837 unsigned long flags; 817 unsigned long flags;
@@ -866,7 +846,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
866 /* set defaults */ 846 /* set defaults */
867 if (is_qla4010(ha)) 847 if (is_qla4010(ha))
868 extHwConfig.Asuint32_t = 0x1912; 848 extHwConfig.Asuint32_t = 0x1912;
869 else if (is_qla4022(ha)) 849 else if (is_qla4022(ha) | is_qla4032(ha))
870 extHwConfig.Asuint32_t = 0x0023; 850 extHwConfig.Asuint32_t = 0x0023;
871 } 851 }
872 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", 852 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
@@ -927,7 +907,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
927 907
928 spin_lock_irqsave(&ha->hardware_lock, flags); 908 spin_lock_irqsave(&ha->hardware_lock, flags);
929 writel(jiffies, &ha->reg->mailbox[7]); 909 writel(jiffies, &ha->reg->mailbox[7]);
930 if (is_qla4022(ha)) 910 if (is_qla4022(ha) | is_qla4032(ha))
931 writel(set_rmask(NVR_WRITE_ENABLE), 911 writel(set_rmask(NVR_WRITE_ENABLE),
932 &ha->reg->u1.isp4022.nvram); 912 &ha->reg->u1.isp4022.nvram);
933 913
@@ -978,7 +958,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
978 return status; 958 return status;
979} 959}
980 960
981static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) 961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
982{ 962{
983#define QL4_LOCK_DRVR_WAIT 300 963#define QL4_LOCK_DRVR_WAIT 300
984#define QL4_LOCK_DRVR_SLEEP 100 964#define QL4_LOCK_DRVR_SLEEP 100
@@ -1018,12 +998,7 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1018 int soft_reset = 1; 998 int soft_reset = 1;
1019 int config_chip = 0; 999 int config_chip = 0;
1020 1000
1021 if (is_qla4010(ha)){ 1001 if (is_qla4022(ha) | is_qla4032(ha))
1022 if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
1023 return QLA_ERROR;
1024 }
1025
1026 if (is_qla4022(ha))
1027 ql4xxx_set_mac_number(ha); 1002 ql4xxx_set_mac_number(ha);
1028 1003
1029 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) 1004 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 0d61797af7da..6375eb017dd3 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
38static inline void 38static inline void
39__qla4xxx_enable_intrs(struct scsi_qla_host *ha) 39__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
40{ 40{
41 if (is_qla4022(ha)) { 41 if (is_qla4022(ha) | is_qla4032(ha)) {
42 writel(set_rmask(IMR_SCSI_INTR_ENABLE), 42 writel(set_rmask(IMR_SCSI_INTR_ENABLE),
43 &ha->reg->u1.isp4022.intr_mask); 43 &ha->reg->u1.isp4022.intr_mask);
44 readl(&ha->reg->u1.isp4022.intr_mask); 44 readl(&ha->reg->u1.isp4022.intr_mask);
@@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
52static inline void 52static inline void
53__qla4xxx_disable_intrs(struct scsi_qla_host *ha) 53__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
54{ 54{
55 if (is_qla4022(ha)) { 55 if (is_qla4022(ha) | is_qla4032(ha)) {
56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE), 56 writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
57 &ha->reg->u1.isp4022.intr_mask); 57 &ha->reg->u1.isp4022.intr_mask);
58 readl(&ha->reg->u1.isp4022.intr_mask); 58 readl(&ha->reg->u1.isp4022.intr_mask);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index c0a254b89a30..d41ce380eedc 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
294 cmd_entry->control_flags = CF_WRITE; 294 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ; 296 cmd_entry->control_flags = CF_READ;
297
298 ha->bytes_xfered += cmd->request_bufflen;
299 if (ha->bytes_xfered & ~0xFFFFF){
300 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
301 ha->bytes_xfered &= 0xFFFFF;
302 }
297 } 303 }
298 304
299 /* Set tagged queueing control flags */ 305 /* Set tagged queueing control flags */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1e283321a59d..ef975e0dc87f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
627 627
628 spin_lock_irqsave(&ha->hardware_lock, flags); 628 spin_lock_irqsave(&ha->hardware_lock, flags);
629 629
630 ha->isr_count++;
630 /* 631 /*
631 * Repeatedly service interrupts up to a maximum of 632 * Repeatedly service interrupts up to a maximum of
632 * MAX_REQS_SERVICED_PER_INTR 633 * MAX_REQS_SERVICED_PER_INTR
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index e3957ca5b645..58afd135aa1d 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -7,15 +7,22 @@
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9 9
10static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
11{
12 writel(cmd, isp_nvram(ha));
13 readl(isp_nvram(ha));
14 udelay(1);
15}
16
10static inline int eeprom_size(struct scsi_qla_host *ha) 17static inline int eeprom_size(struct scsi_qla_host *ha)
11{ 18{
12 return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16; 19 return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
13} 20}
14 21
15static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) 22static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
16{ 23{
17 return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 : 24 return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
18 FM93C56A_NO_ADDR_BITS_16; 25 FM93C86A_NO_ADDR_BITS_16 ;
19} 26}
20 27
21static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) 28static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
@@ -28,8 +35,7 @@ static int fm93c56a_select(struct scsi_qla_host * ha)
28 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n")); 35 DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
29 36
30 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; 37 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
31 writel(ha->eeprom_cmd_data, isp_nvram(ha)); 38 eeprom_cmd(ha->eeprom_cmd_data, ha);
32 readl(isp_nvram(ha));
33 return 1; 39 return 1;
34} 40}
35 41
@@ -41,12 +47,13 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
41 int previousBit; 47 int previousBit;
42 48
43 /* Clock in a zero, then do the start bit. */ 49 /* Clock in a zero, then do the start bit. */
44 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha)); 50 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
45 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 51
46 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 52 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
47 writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 53 AUBURN_EEPROM_CLK_RISE, ha);
48 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 54 eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
49 readl(isp_nvram(ha)); 55 AUBURN_EEPROM_CLK_FALL, ha);
56
50 mask = 1 << (FM93C56A_CMD_BITS - 1); 57 mask = 1 << (FM93C56A_CMD_BITS - 1);
51 58
52 /* Force the previous data bit to be different. */ 59 /* Force the previous data bit to be different. */
@@ -60,14 +67,14 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
60 * If the bit changed, then change the DO state to 67 * If the bit changed, then change the DO state to
61 * match. 68 * match.
62 */ 69 */
63 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha)); 70 eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
64 previousBit = dataBit; 71 previousBit = dataBit;
65 } 72 }
66 writel(ha->eeprom_cmd_data | dataBit | 73 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
67 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 74 AUBURN_EEPROM_CLK_RISE, ha);
68 writel(ha->eeprom_cmd_data | dataBit | 75 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
69 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 76 AUBURN_EEPROM_CLK_FALL, ha);
70 readl(isp_nvram(ha)); 77
71 cmd = cmd << 1; 78 cmd = cmd << 1;
72 } 79 }
73 mask = 1 << (eeprom_no_addr_bits(ha) - 1); 80 mask = 1 << (eeprom_no_addr_bits(ha) - 1);
@@ -82,14 +89,15 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
82 * If the bit changed, then change the DO state to 89 * If the bit changed, then change the DO state to
83 * match. 90 * match.
84 */ 91 */
85 writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha)); 92 eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
93
86 previousBit = dataBit; 94 previousBit = dataBit;
87 } 95 }
88 writel(ha->eeprom_cmd_data | dataBit | 96 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
89 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 97 AUBURN_EEPROM_CLK_RISE, ha);
90 writel(ha->eeprom_cmd_data | dataBit | 98 eeprom_cmd(ha->eeprom_cmd_data | dataBit |
91 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 99 AUBURN_EEPROM_CLK_FALL, ha);
92 readl(isp_nvram(ha)); 100
93 addr = addr << 1; 101 addr = addr << 1;
94 } 102 }
95 return 1; 103 return 1;
@@ -98,8 +106,7 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
98static int fm93c56a_deselect(struct scsi_qla_host * ha) 106static int fm93c56a_deselect(struct scsi_qla_host * ha)
99{ 107{
100 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000; 108 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
101 writel(ha->eeprom_cmd_data, isp_nvram(ha)); 109 eeprom_cmd(ha->eeprom_cmd_data, ha);
102 readl(isp_nvram(ha));
103 return 1; 110 return 1;
104} 111}
105 112
@@ -112,12 +119,13 @@ static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
112 /* Read the data bits 119 /* Read the data bits
113 * The first bit is a dummy. Clock right over it. */ 120 * The first bit is a dummy. Clock right over it. */
114 for (i = 0; i < eeprom_no_data_bits(ha); i++) { 121 for (i = 0; i < eeprom_no_data_bits(ha); i++) {
115 writel(ha->eeprom_cmd_data | 122 eeprom_cmd(ha->eeprom_cmd_data |
116 AUBURN_EEPROM_CLK_RISE, isp_nvram(ha)); 123 AUBURN_EEPROM_CLK_RISE, ha);
117 writel(ha->eeprom_cmd_data | 124 eeprom_cmd(ha->eeprom_cmd_data |
118 AUBURN_EEPROM_CLK_FALL, isp_nvram(ha)); 125 AUBURN_EEPROM_CLK_FALL, ha);
119 dataBit = 126
120 (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0; 127 dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
128
121 data = (data << 1) | dataBit; 129 data = (data << 1) | dataBit;
122 } 130 }
123 131
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 08e2aed8c6cc..b47b4fc59d83 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -134,9 +134,7 @@ struct eeprom_data {
134 u16 phyConfig; /* x36 */ 134 u16 phyConfig; /* x36 */
135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f 135#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20 136#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
137 u16 topcat; /* x38 */ 137 u16 reserved_56; /* x38 */
138#define TOPCAT_PRESENT 0x0100
139#define TOPCAT_MASK 0xFF00
140 138
141#define EEPROM_UNUSED_1_SIZE 2 139#define EEPROM_UNUSED_1_SIZE 2
142 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */ 140 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5b8db6109536..969c9e431028 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -708,10 +708,10 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
708} 708}
709 709
710/** 710/**
711 * qla4010_soft_reset - performs soft reset. 711 * qla4xxx_soft_reset - performs soft reset.
712 * @ha: Pointer to host adapter structure. 712 * @ha: Pointer to host adapter structure.
713 **/ 713 **/
714static int qla4010_soft_reset(struct scsi_qla_host *ha) 714int qla4xxx_soft_reset(struct scsi_qla_host *ha)
715{ 715{
716 uint32_t max_wait_time; 716 uint32_t max_wait_time;
717 unsigned long flags = 0; 717 unsigned long flags = 0;
@@ -817,29 +817,6 @@ static int qla4010_soft_reset(struct scsi_qla_host *ha)
817} 817}
818 818
819/** 819/**
820 * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
821 * @ha: Pointer to host adapter structure.
822 **/
823static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
824{
825 unsigned long flags;
826
827 ql4xxx_lock_nvram(ha);
828 spin_lock_irqsave(&ha->hardware_lock, flags);
829 writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
830 readl(isp_gp_out(ha));
831 mdelay(1);
832
833 writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
834 readl(isp_gp_out(ha));
835 spin_unlock_irqrestore(&ha->hardware_lock, flags);
836 mdelay(2523);
837
838 ql4xxx_unlock_nvram(ha);
839 return QLA_SUCCESS;
840}
841
842/**
843 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S. 820 * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
844 * @ha: Pointer to host adapter structure. 821 * @ha: Pointer to host adapter structure.
845 * 822 *
@@ -867,26 +844,6 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
867} 844}
868 845
869/** 846/**
870 * qla4xxx_hard_reset - performs HBA Hard Reset
871 * @ha: Pointer to host adapter structure.
872 **/
873static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
874{
875 /* The QLA4010 really doesn't have an equivalent to a hard reset */
876 qla4xxx_flush_active_srbs(ha);
877 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
878 int status = QLA_ERROR;
879
880 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
881 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
882 (qla4010_soft_reset(ha) == QLA_SUCCESS))
883 status = QLA_SUCCESS;
884 return status;
885 } else
886 return qla4010_soft_reset(ha);
887}
888
889/**
890 * qla4xxx_recover_adapter - recovers adapter after a fatal error 847 * qla4xxx_recover_adapter - recovers adapter after a fatal error
891 * @ha: Pointer to host adapter structure. 848 * @ha: Pointer to host adapter structure.
892 * @renew_ddb_list: Indicates what to do with the adapter's ddb list 849 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
@@ -919,18 +876,11 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
919 if (status == QLA_SUCCESS) { 876 if (status == QLA_SUCCESS) {
920 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", 877 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
921 ha->host_no, __func__)); 878 ha->host_no, __func__));
922 status = qla4xxx_soft_reset(ha); 879 qla4xxx_flush_active_srbs(ha);
923 } 880 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
924 /* FIXMEkaren: Do we want to keep interrupts enabled and process 881 status = qla4xxx_soft_reset(ha);
925 AENs after soft reset */ 882 else
926 883 status = QLA_ERROR;
927 /* If firmware (SOFT) reset failed, or if all outstanding
928 * commands have not returned, then do a HARD reset.
929 */
930 if (status == QLA_ERROR) {
931 DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
932 ha->host_no, __func__));
933 status = qla4xxx_hard_reset(ha);
934 } 884 }
935 885
936 /* Flush any pending ddb changed AENs */ 886 /* Flush any pending ddb changed AENs */
@@ -1011,18 +961,15 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
1011 * the mid-level tries to sleep when it reaches the driver threshold 961 * the mid-level tries to sleep when it reaches the driver threshold
1012 * "host->can_queue". This can cause a panic if we were in our interrupt code. 962 * "host->can_queue". This can cause a panic if we were in our interrupt code.
1013 **/ 963 **/
1014static void qla4xxx_do_dpc(void *data) 964static void qla4xxx_do_dpc(struct work_struct *work)
1015{ 965{
1016 struct scsi_qla_host *ha = (struct scsi_qla_host *) data; 966 struct scsi_qla_host *ha =
967 container_of(work, struct scsi_qla_host, dpc_work);
1017 struct ddb_entry *ddb_entry, *dtemp; 968 struct ddb_entry *ddb_entry, *dtemp;
1018 969
1019 DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n", 970 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
1020 ha->host_no, __func__)); 971 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
1021 972 ha->host_no, __func__, ha->flags, ha->dpc_flags));
1022 DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
1023 ha->host_no, __func__, ha->flags));
1024 DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
1025 ha->host_no, __func__, ha->dpc_flags));
1026 973
1027 /* Initialization not yet finished. Don't do anything yet. */ 974 /* Initialization not yet finished. Don't do anything yet. */
1028 if (!test_bit(AF_INIT_DONE, &ha->flags)) 975 if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -1032,16 +979,8 @@ static void qla4xxx_do_dpc(void *data)
1032 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 979 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1033 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 980 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1034 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) { 981 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
1035 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) 982 if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
1036 /* 983 test_bit(DPC_RESET_HA, &ha->dpc_flags))
1037 * dg 09/23 Never initialize ddb list
1038 * once we up and running
1039 * qla4xxx_recover_adapter(ha,
1040 * REBUILD_DDB_LIST);
1041 */
1042 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1043
1044 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1045 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST); 984 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
1046 985
1047 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 986 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
@@ -1122,7 +1061,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1122 destroy_workqueue(ha->dpc_thread); 1061 destroy_workqueue(ha->dpc_thread);
1123 1062
1124 /* Issue Soft Reset to put firmware in unknown state */ 1063 /* Issue Soft Reset to put firmware in unknown state */
1125 qla4xxx_soft_reset(ha); 1064 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
1065 qla4xxx_soft_reset(ha);
1126 1066
1127 /* Remove timer thread, if present */ 1067 /* Remove timer thread, if present */
1128 if (ha->timer_active) 1068 if (ha->timer_active)
@@ -1261,7 +1201,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1261 init_waitqueue_head(&ha->mailbox_wait_queue); 1201 init_waitqueue_head(&ha->mailbox_wait_queue);
1262 1202
1263 spin_lock_init(&ha->hardware_lock); 1203 spin_lock_init(&ha->hardware_lock);
1264 spin_lock_init(&ha->list_lock);
1265 1204
1266 /* Allocate dma buffers */ 1205 /* Allocate dma buffers */
1267 if (qla4xxx_mem_alloc(ha)) { 1206 if (qla4xxx_mem_alloc(ha)) {
@@ -1315,7 +1254,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1315 ret = -ENODEV; 1254 ret = -ENODEV;
1316 goto probe_failed; 1255 goto probe_failed;
1317 } 1256 }
1318 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); 1257 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1319 1258
1320 ret = request_irq(pdev->irq, qla4xxx_intr_handler, 1259 ret = request_irq(pdev->irq, qla4xxx_intr_handler,
1321 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); 1260 SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
@@ -1468,27 +1407,6 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
1468} 1407}
1469 1408
1470/** 1409/**
1471 * qla4xxx_soft_reset - performs a SOFT RESET of hba.
1472 * @ha: Pointer to host adapter structure.
1473 **/
1474int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1475{
1476
1477 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
1478 __func__));
1479 if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
1480 int status = QLA_ERROR;
1481
1482 if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
1483 (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
1484 (qla4010_soft_reset(ha) == QLA_SUCCESS) )
1485 status = QLA_SUCCESS;
1486 return status;
1487 } else
1488 return qla4010_soft_reset(ha);
1489}
1490
1491/**
1492 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 1410 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1493 * @ha: actual ha whose done queue will contain the comd returned by firmware. 1411 * @ha: actual ha whose done queue will contain the comd returned by firmware.
1494 * @cmd: Scsi Command to wait on. 1412 * @cmd: Scsi Command to wait on.
@@ -1686,6 +1604,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
1686 .subvendor = PCI_ANY_ID, 1604 .subvendor = PCI_ANY_ID,
1687 .subdevice = PCI_ANY_ID, 1605 .subdevice = PCI_ANY_ID,
1688 }, 1606 },
1607 {
1608 .vendor = PCI_VENDOR_ID_QLOGIC,
1609 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
1610 .subvendor = PCI_ANY_ID,
1611 .subdevice = PCI_ANY_ID,
1612 },
1689 {0, 0}, 1613 {0, 0},
1690}; 1614};
1691MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 1615MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index b3fe7e68988e..454e19c8ad68 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,9 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k" 8#define QLA4XXX_DRIVER_VERSION "5.00.07-k"
9
10#define QL4_DRIVER_MAJOR_VER 5
11#define QL4_DRIVER_MINOR_VER 0
12#define QL4_DRIVER_PATCH_VER 5
13#define QL4_DRIVER_BETA_VER 9
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59f31533ab4..fafc00deaade 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
156 156
157static DEFINE_MUTEX(host_cmd_pool_mutex); 157static DEFINE_MUTEX(host_cmd_pool_mutex);
158 158
159static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, 159struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
160 gfp_t gfp_mask)
161{ 160{
162 struct scsi_cmnd *cmd; 161 struct scsi_cmnd *cmd;
163 162
@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
178 177
179 return cmd; 178 return cmd;
180} 179}
180EXPORT_SYMBOL_GPL(__scsi_get_command);
181 181
182/* 182/*
183 * Function: scsi_get_command() 183 * Function: scsi_get_command()
@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
214 put_device(&dev->sdev_gendev); 214 put_device(&dev->sdev_gendev);
215 215
216 return cmd; 216 return cmd;
217} 217}
218EXPORT_SYMBOL(scsi_get_command); 218EXPORT_SYMBOL(scsi_get_command);
219 219
220void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
221 struct device *dev)
222{
223 unsigned long flags;
224
225 /* changing locks here, don't need to restore the irq state */
226 spin_lock_irqsave(&shost->free_list_lock, flags);
227 if (unlikely(list_empty(&shost->free_list))) {
228 list_add(&cmd->list, &shost->free_list);
229 cmd = NULL;
230 }
231 spin_unlock_irqrestore(&shost->free_list_lock, flags);
232
233 if (likely(cmd != NULL))
234 kmem_cache_free(shost->cmd_pool->slab, cmd);
235
236 put_device(dev);
237}
238EXPORT_SYMBOL(__scsi_put_command);
239
220/* 240/*
221 * Function: scsi_put_command() 241 * Function: scsi_put_command()
222 * 242 *
@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
231void scsi_put_command(struct scsi_cmnd *cmd) 251void scsi_put_command(struct scsi_cmnd *cmd)
232{ 252{
233 struct scsi_device *sdev = cmd->device; 253 struct scsi_device *sdev = cmd->device;
234 struct Scsi_Host *shost = sdev->host;
235 unsigned long flags; 254 unsigned long flags;
236 255
237 /* serious error if the command hasn't come from a device list */ 256 /* serious error if the command hasn't come from a device list */
238 spin_lock_irqsave(&cmd->device->list_lock, flags); 257 spin_lock_irqsave(&cmd->device->list_lock, flags);
239 BUG_ON(list_empty(&cmd->list)); 258 BUG_ON(list_empty(&cmd->list));
240 list_del_init(&cmd->list); 259 list_del_init(&cmd->list);
241 spin_unlock(&cmd->device->list_lock); 260 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
242 /* changing locks here, don't need to restore the irq state */
243 spin_lock(&shost->free_list_lock);
244 if (unlikely(list_empty(&shost->free_list))) {
245 list_add(&cmd->list, &shost->free_list);
246 cmd = NULL;
247 }
248 spin_unlock_irqrestore(&shost->free_list_lock, flags);
249
250 if (likely(cmd != NULL))
251 kmem_cache_free(shost->cmd_pool->slab, cmd);
252 261
253 put_device(&sdev->sdev_gendev); 262 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
254} 263}
255EXPORT_SYMBOL(scsi_put_command); 264EXPORT_SYMBOL(scsi_put_command);
256 265
@@ -871,9 +880,9 @@ EXPORT_SYMBOL(scsi_device_get);
871 */ 880 */
872void scsi_device_put(struct scsi_device *sdev) 881void scsi_device_put(struct scsi_device *sdev)
873{ 882{
883#ifdef CONFIG_MODULE_UNLOAD
874 struct module *module = sdev->host->hostt->module; 884 struct module *module = sdev->host->hostt->module;
875 885
876#ifdef CONFIG_MODULE_UNLOAD
877 /* The module refcount will be zero if scsi_device_get() 886 /* The module refcount will be zero if scsi_device_get()
878 * was called from a module removal routine */ 887 * was called from a module removal routine */
879 if (module && module_refcount(module) != 0) 888 if (module && module_refcount(module) != 0)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index aff1b0cfd4b2..2ecb6ff42444 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -453,9 +453,18 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
453} 453}
454 454
455/** 455/**
456 * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. 456 * scsi_send_eh_cmnd - submit a scsi command as part of error recory
457 * @scmd: SCSI Cmd to send. 457 * @scmd: SCSI command structure to hijack
458 * @timeout: Timeout for cmd. 458 * @cmnd: CDB to send
459 * @cmnd_size: size in bytes of @cmnd
460 * @timeout: timeout for this request
461 * @copy_sense: request sense data if set to 1
462 *
463 * This function is used to send a scsi command down to a target device
464 * as part of the error recovery process. If @copy_sense is 0 the command
465 * sent must be one that does not transfer any data. If @copy_sense is 1
466 * the command must be REQUEST_SENSE and this functions copies out the
467 * sense buffer it got into @scmd->sense_buffer.
459 * 468 *
460 * Return value: 469 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 470 * SUCCESS or FAILED or NEEDS_RETRY
@@ -469,6 +478,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
469 DECLARE_COMPLETION_ONSTACK(done); 478 DECLARE_COMPLETION_ONSTACK(done);
470 unsigned long timeleft; 479 unsigned long timeleft;
471 unsigned long flags; 480 unsigned long flags;
481 struct scatterlist sgl;
472 unsigned char old_cmnd[MAX_COMMAND_SIZE]; 482 unsigned char old_cmnd[MAX_COMMAND_SIZE];
473 enum dma_data_direction old_data_direction; 483 enum dma_data_direction old_data_direction;
474 unsigned short old_use_sg; 484 unsigned short old_use_sg;
@@ -500,19 +510,24 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
500 if (shost->hostt->unchecked_isa_dma) 510 if (shost->hostt->unchecked_isa_dma)
501 gfp_mask |= __GFP_DMA; 511 gfp_mask |= __GFP_DMA;
502 512
503 scmd->sc_data_direction = DMA_FROM_DEVICE; 513 sgl.page = alloc_page(gfp_mask);
504 scmd->request_bufflen = 252; 514 if (!sgl.page)
505 scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
506 if (!scmd->request_buffer)
507 return FAILED; 515 return FAILED;
516 sgl.offset = 0;
517 sgl.length = 252;
518
519 scmd->sc_data_direction = DMA_FROM_DEVICE;
520 scmd->request_bufflen = sgl.length;
521 scmd->request_buffer = &sgl;
522 scmd->use_sg = 1;
508 } else { 523 } else {
509 scmd->request_buffer = NULL; 524 scmd->request_buffer = NULL;
510 scmd->request_bufflen = 0; 525 scmd->request_bufflen = 0;
511 scmd->sc_data_direction = DMA_NONE; 526 scmd->sc_data_direction = DMA_NONE;
527 scmd->use_sg = 0;
512 } 528 }
513 529
514 scmd->underflow = 0; 530 scmd->underflow = 0;
515 scmd->use_sg = 0;
516 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 531 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
517 532
518 if (sdev->scsi_level <= SCSI_2) 533 if (sdev->scsi_level <= SCSI_2)
@@ -583,7 +598,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
583 memcpy(scmd->sense_buffer, scmd->request_buffer, 598 memcpy(scmd->sense_buffer, scmd->request_buffer,
584 sizeof(scmd->sense_buffer)); 599 sizeof(scmd->sense_buffer));
585 } 600 }
586 kfree(scmd->request_buffer); 601 __free_page(sgl.page);
587 } 602 }
588 603
589 604
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ac4890ce086..fb616c69151f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
704 return NULL; 704 return NULL;
705} 705}
706 706
707static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 707struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
708{ 708{
709 struct scsi_host_sg_pool *sgp; 709 struct scsi_host_sg_pool *sgp;
710 struct scatterlist *sgl; 710 struct scatterlist *sgl;
@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
745 return sgl; 745 return sgl;
746} 746}
747 747
748static void scsi_free_sgtable(struct scatterlist *sgl, int index) 748EXPORT_SYMBOL(scsi_alloc_sgtable);
749
750void scsi_free_sgtable(struct scatterlist *sgl, int index)
749{ 751{
750 struct scsi_host_sg_pool *sgp; 752 struct scsi_host_sg_pool *sgp;
751 753
@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
755 mempool_free(sgl, sgp->pool); 757 mempool_free(sgl, sgp->pool);
756} 758}
757 759
760EXPORT_SYMBOL(scsi_free_sgtable);
761
758/* 762/*
759 * Function: scsi_release_buffers() 763 * Function: scsi_release_buffers()
760 * 764 *
@@ -996,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
996 int count; 1000 int count;
997 1001
998 /* 1002 /*
999 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 1003 * We used to not use scatter-gather for single segment request,
1000 */
1001 if (blk_pc_request(req) && !req->bio) {
1002 cmd->request_bufflen = req->data_len;
1003 cmd->request_buffer = req->data;
1004 req->buffer = req->data;
1005 cmd->use_sg = 0;
1006 return 0;
1007 }
1008
1009 /*
1010 * we used to not use scatter-gather for single segment request,
1011 * but now we do (it makes highmem I/O easier to support without 1004 * but now we do (it makes highmem I/O easier to support without
1012 * kmapping pages) 1005 * kmapping pages)
1013 */ 1006 */
1014 cmd->use_sg = req->nr_phys_segments; 1007 cmd->use_sg = req->nr_phys_segments;
1015 1008
1016 /* 1009 /*
1017 * if sg table allocation fails, requeue request later. 1010 * If sg table allocation fails, requeue request later.
1018 */ 1011 */
1019 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1012 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1020 if (unlikely(!sgpnt)) { 1013 if (unlikely(!sgpnt)) {
@@ -1022,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1022 return BLKPREP_DEFER; 1015 return BLKPREP_DEFER;
1023 } 1016 }
1024 1017
1018 req->buffer = NULL;
1025 cmd->request_buffer = (char *) sgpnt; 1019 cmd->request_buffer = (char *) sgpnt;
1026 cmd->request_bufflen = req->nr_sectors << 9;
1027 if (blk_pc_request(req)) 1020 if (blk_pc_request(req))
1028 cmd->request_bufflen = req->data_len; 1021 cmd->request_bufflen = req->data_len;
1029 req->buffer = NULL; 1022 else
1023 cmd->request_bufflen = req->nr_sectors << 9;
1030 1024
1031 /* 1025 /*
1032 * Next, walk the list, and fill in the addresses and sizes of 1026 * Next, walk the list, and fill in the addresses and sizes of
1033 * each segment. 1027 * each segment.
1034 */ 1028 */
1035 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1029 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1036
1037 /*
1038 * mapped well, send it off
1039 */
1040 if (likely(count <= cmd->use_sg)) { 1030 if (likely(count <= cmd->use_sg)) {
1041 cmd->use_sg = count; 1031 cmd->use_sg = count;
1042 return 0; 1032 return BLKPREP_OK;
1043 } 1033 }
1044 1034
1045 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1035 printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1069 return -EOPNOTSUPP; 1059 return -EOPNOTSUPP;
1070} 1060}
1071 1061
1062static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1063 struct request *req)
1064{
1065 struct scsi_cmnd *cmd;
1066
1067 if (!req->special) {
1068 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1069 if (unlikely(!cmd))
1070 return NULL;
1071 req->special = cmd;
1072 } else {
1073 cmd = req->special;
1074 }
1075
1076 /* pull a tag out of the request if we have one */
1077 cmd->tag = req->tag;
1078 cmd->request = req;
1079
1080 return cmd;
1081}
1082
1072static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1083static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1073{ 1084{
1074 BUG_ON(!blk_pc_request(cmd->request)); 1085 BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1081 scsi_io_completion(cmd, cmd->request_bufflen); 1092 scsi_io_completion(cmd, cmd->request_bufflen);
1082} 1093}
1083 1094
1084static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1095static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1085{ 1096{
1086 struct request *req = cmd->request; 1097 struct scsi_cmnd *cmd;
1098
1099 cmd = scsi_get_cmd_from_req(sdev, req);
1100 if (unlikely(!cmd))
1101 return BLKPREP_DEFER;
1102
1103 /*
1104 * BLOCK_PC requests may transfer data, in which case they must
1105 * a bio attached to them. Or they might contain a SCSI command
1106 * that does not transfer data, in which case they may optionally
1107 * submit a request without an attached bio.
1108 */
1109 if (req->bio) {
1110 int ret;
1111
1112 BUG_ON(!req->nr_phys_segments);
1113
1114 ret = scsi_init_io(cmd);
1115 if (unlikely(ret))
1116 return ret;
1117 } else {
1118 BUG_ON(req->data_len);
1119 BUG_ON(req->data);
1120
1121 cmd->request_bufflen = 0;
1122 cmd->request_buffer = NULL;
1123 cmd->use_sg = 0;
1124 req->buffer = NULL;
1125 }
1087 1126
1088 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1127 BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1089 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1128 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1099 cmd->allowed = req->retries; 1138 cmd->allowed = req->retries;
1100 cmd->timeout_per_command = req->timeout; 1139 cmd->timeout_per_command = req->timeout;
1101 cmd->done = scsi_blk_pc_done; 1140 cmd->done = scsi_blk_pc_done;
1141 return BLKPREP_OK;
1102} 1142}
1103 1143
1104static int scsi_prep_fn(struct request_queue *q, struct request *req) 1144/*
1145 * Setup a REQ_TYPE_FS command. These are simple read/write request
1146 * from filesystems that still need to be translated to SCSI CDBs from
1147 * the ULD.
1148 */
1149static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1105{ 1150{
1106 struct scsi_device *sdev = q->queuedata;
1107 struct scsi_cmnd *cmd; 1151 struct scsi_cmnd *cmd;
1108 int specials_only = 0; 1152 struct scsi_driver *drv;
1153 int ret;
1109 1154
1110 /* 1155 /*
1111 * Just check to see if the device is online. If it isn't, we 1156 * Filesystem requests must transfer data.
1112 * refuse to process any commands. The device must be brought
1113 * online before trying any recovery commands
1114 */ 1157 */
1115 if (unlikely(!scsi_device_online(sdev))) { 1158 BUG_ON(!req->nr_phys_segments);
1116 sdev_printk(KERN_ERR, sdev, 1159
1117 "rejecting I/O to offline device\n"); 1160 cmd = scsi_get_cmd_from_req(sdev, req);
1118 goto kill; 1161 if (unlikely(!cmd))
1119 } 1162 return BLKPREP_DEFER;
1120 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1163
1121 /* OK, we're not in a running state don't prep 1164 ret = scsi_init_io(cmd);
1122 * user commands */ 1165 if (unlikely(ret))
1123 if (sdev->sdev_state == SDEV_DEL) { 1166 return ret;
1124 /* Device is fully deleted, no commands 1167
1125 * at all allowed down */ 1168 /*
1126 sdev_printk(KERN_ERR, sdev, 1169 * Initialize the actual SCSI command for this request.
1127 "rejecting I/O to dead device\n"); 1170 */
1128 goto kill; 1171 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1129 } 1172 if (unlikely(!drv->init_command(cmd))) {
1130 /* OK, we only allow special commands (i.e. not 1173 scsi_release_buffers(cmd);
1131 * user initiated ones */ 1174 scsi_put_command(cmd);
1132 specials_only = sdev->sdev_state; 1175 return BLKPREP_KILL;
1133 } 1176 }
1134 1177
1178 return BLKPREP_OK;
1179}
1180
1181static int scsi_prep_fn(struct request_queue *q, struct request *req)
1182{
1183 struct scsi_device *sdev = q->queuedata;
1184 int ret = BLKPREP_OK;
1185
1135 /* 1186 /*
1136 * Find the actual device driver associated with this command. 1187 * If the device is not in running state we will reject some
1137 * The SPECIAL requests are things like character device or 1188 * or all commands.
1138 * ioctls, which did not originate from ll_rw_blk. Note that
1139 * the special field is also used to indicate the cmd for
1140 * the remainder of a partially fulfilled request that can
1141 * come up when there is a medium error. We have to treat
1142 * these two cases differently. We differentiate by looking
1143 * at request->cmd, as this tells us the real story.
1144 */ 1189 */
1145 if (blk_special_request(req) && req->special) 1190 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1146 cmd = req->special; 1191 switch (sdev->sdev_state) {
1147 else if (blk_pc_request(req) || blk_fs_request(req)) { 1192 case SDEV_OFFLINE:
1148 if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1193 /*
1149 if (specials_only == SDEV_QUIESCE || 1194 * If the device is offline we refuse to process any
1150 specials_only == SDEV_BLOCK) 1195 * commands. The device must be brought online
1151 goto defer; 1196 * before trying any recovery commands.
1152 1197 */
1153 sdev_printk(KERN_ERR, sdev, 1198 sdev_printk(KERN_ERR, sdev,
1154 "rejecting I/O to device being removed\n"); 1199 "rejecting I/O to offline device\n");
1155 goto kill; 1200 ret = BLKPREP_KILL;
1201 break;
1202 case SDEV_DEL:
1203 /*
1204 * If the device is fully deleted, we refuse to
1205 * process any commands as well.
1206 */
1207 sdev_printk(KERN_ERR, sdev,
1208 "rejecting I/O to dead device\n");
1209 ret = BLKPREP_KILL;
1210 break;
1211 case SDEV_QUIESCE:
1212 case SDEV_BLOCK:
1213 /*
1214 * If the devices is blocked we defer normal commands.
1215 */
1216 if (!(req->cmd_flags & REQ_PREEMPT))
1217 ret = BLKPREP_DEFER;
1218 break;
1219 default:
1220 /*
1221 * For any other not fully online state we only allow
1222 * special commands. In particular any user initiated
1223 * command is not allowed.
1224 */
1225 if (!(req->cmd_flags & REQ_PREEMPT))
1226 ret = BLKPREP_KILL;
1227 break;
1156 } 1228 }
1157 1229
1158 /* 1230 if (ret != BLKPREP_OK)
1159 * Now try and find a command block that we can use. 1231 goto out;
1160 */
1161 if (!req->special) {
1162 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1163 if (unlikely(!cmd))
1164 goto defer;
1165 } else
1166 cmd = req->special;
1167
1168 /* pull a tag out of the request if we have one */
1169 cmd->tag = req->tag;
1170 } else {
1171 blk_dump_rq_flags(req, "SCSI bad req");
1172 goto kill;
1173 } 1232 }
1174
1175 /* note the overloading of req->special. When the tag
1176 * is active it always means cmd. If the tag goes
1177 * back for re-queueing, it may be reset */
1178 req->special = cmd;
1179 cmd->request = req;
1180
1181 /*
1182 * FIXME: drop the lock here because the functions below
1183 * expect to be called without the queue lock held. Also,
1184 * previously, we dequeued the request before dropping the
1185 * lock. We hope REQ_STARTED prevents anything untoward from
1186 * happening now.
1187 */
1188 if (blk_fs_request(req) || blk_pc_request(req)) {
1189 int ret;
1190 1233
1234 switch (req->cmd_type) {
1235 case REQ_TYPE_BLOCK_PC:
1236 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1237 break;
1238 case REQ_TYPE_FS:
1239 ret = scsi_setup_fs_cmnd(sdev, req);
1240 break;
1241 default:
1191 /* 1242 /*
1192 * This will do a couple of things: 1243 * All other command types are not supported.
1193 * 1) Fill in the actual SCSI command.
1194 * 2) Fill in any other upper-level specific fields
1195 * (timeout).
1196 * 1244 *
1197 * If this returns 0, it means that the request failed 1245 * Note that these days the SCSI subsystem does not use
1198 * (reading past end of disk, reading offline device, 1246 * REQ_TYPE_SPECIAL requests anymore. These are only used
1199 * etc). This won't actually talk to the device, but 1247 * (directly or via blk_insert_request) by non-SCSI drivers.
1200 * some kinds of consistency checking may cause the
1201 * request to be rejected immediately.
1202 */ 1248 */
1249 blk_dump_rq_flags(req, "SCSI bad req");
1250 ret = BLKPREP_KILL;
1251 break;
1252 }
1203 1253
1204 /* 1254 out:
1205 * This sets up the scatter-gather table (allocating if 1255 switch (ret) {
1206 * required). 1256 case BLKPREP_KILL:
1207 */ 1257 req->errors = DID_NO_CONNECT << 16;
1208 ret = scsi_init_io(cmd); 1258 break;
1209 switch(ret) { 1259 case BLKPREP_DEFER:
1210 /* For BLKPREP_KILL/DEFER the cmd was released */
1211 case BLKPREP_KILL:
1212 goto kill;
1213 case BLKPREP_DEFER:
1214 goto defer;
1215 }
1216
1217 /* 1260 /*
1218 * Initialize the actual SCSI command for this request. 1261 * If we defer, the elv_next_request() returns NULL, but the
1262 * queue must be restarted, so we plug here if no returning
1263 * command will automatically do that.
1219 */ 1264 */
1220 if (blk_pc_request(req)) { 1265 if (sdev->device_busy == 0)
1221 scsi_setup_blk_pc_cmnd(cmd); 1266 blk_plug_device(q);
1222 } else if (req->rq_disk) { 1267 break;
1223 struct scsi_driver *drv; 1268 default:
1224 1269 req->cmd_flags |= REQ_DONTPREP;
1225 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1226 if (unlikely(!drv->init_command(cmd))) {
1227 scsi_release_buffers(cmd);
1228 scsi_put_command(cmd);
1229 goto kill;
1230 }
1231 }
1232 } 1270 }
1233 1271
1234 /* 1272 return ret;
1235 * The request is now prepped, no need to come back here
1236 */
1237 req->cmd_flags |= REQ_DONTPREP;
1238 return BLKPREP_OK;
1239
1240 defer:
1241 /* If we defer, the elv_next_request() returns NULL, but the
1242 * queue must be restarted, so we plug here if no returning
1243 * command will automatically do that. */
1244 if (sdev->device_busy == 0)
1245 blk_plug_device(q);
1246 return BLKPREP_DEFER;
1247 kill:
1248 req->errors = DID_NO_CONNECT << 16;
1249 return BLKPREP_KILL;
1250} 1273}
1251 1274
1252/* 1275/*
@@ -1548,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1548} 1571}
1549EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1572EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1550 1573
1551struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1574struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1575 request_fn_proc *request_fn)
1552{ 1576{
1553 struct Scsi_Host *shost = sdev->host;
1554 struct request_queue *q; 1577 struct request_queue *q;
1555 1578
1556 q = blk_init_queue(scsi_request_fn, NULL); 1579 q = blk_init_queue(request_fn, NULL);
1557 if (!q) 1580 if (!q)
1558 return NULL; 1581 return NULL;
1559 1582
1560 blk_queue_prep_rq(q, scsi_prep_fn);
1561
1562 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1583 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1563 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1584 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1564 blk_queue_max_sectors(q, shost->max_sectors); 1585 blk_queue_max_sectors(q, shost->max_sectors);
1565 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1586 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1566 blk_queue_segment_boundary(q, shost->dma_boundary); 1587 blk_queue_segment_boundary(q, shost->dma_boundary);
1567 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1568 blk_queue_softirq_done(q, scsi_softirq_done);
1569 1588
1570 if (!shost->use_clustering) 1589 if (!shost->use_clustering)
1571 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1572 return q; 1591 return q;
1573} 1592}
1593EXPORT_SYMBOL(__scsi_alloc_queue);
1594
1595struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1596{
1597 struct request_queue *q;
1598
1599 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1600 if (!q)
1601 return NULL;
1602
1603 blk_queue_prep_rq(q, scsi_prep_fn);
1604 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1605 blk_queue_softirq_done(q, scsi_softirq_done);
1606 return q;
1607}
1574 1608
1575void scsi_free_queue(struct request_queue *q) 1609void scsi_free_queue(struct request_queue *q)
1576{ 1610{
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5d023d44e5e7..f458c2f686d2 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,6 +39,9 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
39 { }; 39 { };
40#endif 40#endif
41 41
42/* scsi_scan.c */
43int scsi_complete_async_scans(void);
44
42/* scsi_devinfo.c */ 45/* scsi_devinfo.c */
43extern int scsi_get_device_flags(struct scsi_device *sdev, 46extern int scsi_get_device_flags(struct scsi_device *sdev,
44 const unsigned char *vendor, 47 const unsigned char *vendor,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 94a274645f6f..14e635aa44ce 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -29,7 +29,9 @@
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <asm/semaphore.h> 32#include <linux/delay.h>
33#include <linux/kthread.h>
34#include <linux/spinlock.h>
33 35
34#include <scsi/scsi.h> 36#include <scsi/scsi.h>
35#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
@@ -87,6 +89,17 @@ module_param_named(max_luns, max_scsi_luns, int, S_IRUGO|S_IWUSR);
87MODULE_PARM_DESC(max_luns, 89MODULE_PARM_DESC(max_luns,
88 "last scsi LUN (should be between 1 and 2^32-1)"); 90 "last scsi LUN (should be between 1 and 2^32-1)");
89 91
92#ifdef CONFIG_SCSI_SCAN_ASYNC
93#define SCSI_SCAN_TYPE_DEFAULT "async"
94#else
95#define SCSI_SCAN_TYPE_DEFAULT "sync"
96#endif
97
98static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
99
100module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
101MODULE_PARM_DESC(scan, "sync, async or none");
102
90/* 103/*
91 * max_scsi_report_luns: the maximum number of LUNS that will be 104 * max_scsi_report_luns: the maximum number of LUNS that will be
92 * returned from the REPORT LUNS command. 8 times this value must 105 * returned from the REPORT LUNS command. 8 times this value must
@@ -108,6 +121,68 @@ MODULE_PARM_DESC(inq_timeout,
108 "Timeout (in seconds) waiting for devices to answer INQUIRY." 121 "Timeout (in seconds) waiting for devices to answer INQUIRY."
109 " Default is 5. Some non-compliant devices need more."); 122 " Default is 5. Some non-compliant devices need more.");
110 123
124static DEFINE_SPINLOCK(async_scan_lock);
125static LIST_HEAD(scanning_hosts);
126
127struct async_scan_data {
128 struct list_head list;
129 struct Scsi_Host *shost;
130 struct completion prev_finished;
131};
132
133/**
134 * scsi_complete_async_scans - Wait for asynchronous scans to complete
135 *
136 * Asynchronous scans add themselves to the scanning_hosts list. Once
137 * that list is empty, we know that the scans are complete. Rather than
138 * waking up periodically to check the state of the list, we pretend to be
139 * a scanning task by adding ourselves at the end of the list and going to
140 * sleep. When the task before us wakes us up, we take ourselves off the
141 * list and return.
142 */
143int scsi_complete_async_scans(void)
144{
145 struct async_scan_data *data;
146
147 do {
148 if (list_empty(&scanning_hosts))
149 return 0;
150 /* If we can't get memory immediately, that's OK. Just
151 * sleep a little. Even if we never get memory, the async
152 * scans will finish eventually.
153 */
154 data = kmalloc(sizeof(*data), GFP_KERNEL);
155 if (!data)
156 msleep(1);
157 } while (!data);
158
159 data->shost = NULL;
160 init_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 /* Check that there's still somebody else on the list */
164 if (list_empty(&scanning_hosts))
165 goto done;
166 list_add_tail(&data->list, &scanning_hosts);
167 spin_unlock(&async_scan_lock);
168
169 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
170 wait_for_completion(&data->prev_finished);
171
172 spin_lock(&async_scan_lock);
173 list_del(&data->list);
174 done:
175 spin_unlock(&async_scan_lock);
176
177 kfree(data);
178 return 0;
179}
180
181#ifdef MODULE
182/* Only exported for the benefit of scsi_wait_scan */
183EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
184#endif
185
111/** 186/**
112 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 187 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
113 * @sdev: scsi device to send command to 188 * @sdev: scsi device to send command to
@@ -362,9 +437,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
362 goto retry; 437 goto retry;
363} 438}
364 439
365static void scsi_target_reap_usercontext(void *data) 440static void scsi_target_reap_usercontext(struct work_struct *work)
366{ 441{
367 struct scsi_target *starget = data; 442 struct scsi_target *starget =
443 container_of(work, struct scsi_target, ew.work);
368 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 444 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
369 unsigned long flags; 445 unsigned long flags;
370 446
@@ -400,7 +476,7 @@ void scsi_target_reap(struct scsi_target *starget)
400 starget->state = STARGET_DEL; 476 starget->state = STARGET_DEL;
401 spin_unlock_irqrestore(shost->host_lock, flags); 477 spin_unlock_irqrestore(shost->host_lock, flags);
402 execute_in_process_context(scsi_target_reap_usercontext, 478 execute_in_process_context(scsi_target_reap_usercontext,
403 starget, &starget->ew); 479 &starget->ew);
404 return; 480 return;
405 481
406 } 482 }
@@ -619,7 +695,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
619 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 695 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
620 **/ 696 **/
621static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 697static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
622 int *bflags) 698 int *bflags, int async)
623{ 699{
624 /* 700 /*
625 * XXX do not save the inquiry, since it can change underneath us, 701 * XXX do not save the inquiry, since it can change underneath us,
@@ -805,7 +881,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
805 * register it and tell the rest of the kernel 881 * register it and tell the rest of the kernel
806 * about it. 882 * about it.
807 */ 883 */
808 if (scsi_sysfs_add_sdev(sdev) != 0) 884 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
809 return SCSI_SCAN_NO_RESPONSE; 885 return SCSI_SCAN_NO_RESPONSE;
810 886
811 return SCSI_SCAN_LUN_PRESENT; 887 return SCSI_SCAN_LUN_PRESENT;
@@ -974,7 +1050,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
974 goto out_free_result; 1050 goto out_free_result;
975 } 1051 }
976 1052
977 res = scsi_add_lun(sdev, result, &bflags); 1053 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
978 if (res == SCSI_SCAN_LUN_PRESENT) { 1054 if (res == SCSI_SCAN_LUN_PRESENT) {
979 if (bflags & BLIST_KEY) { 1055 if (bflags & BLIST_KEY) {
980 sdev->lockable = 0; 1056 sdev->lockable = 0;
@@ -1474,6 +1550,12 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
1474{ 1550{
1475 struct Scsi_Host *shost = dev_to_shost(parent); 1551 struct Scsi_Host *shost = dev_to_shost(parent);
1476 1552
1553 if (strncmp(scsi_scan_type, "none", 4) == 0)
1554 return;
1555
1556 if (!shost->async_scan)
1557 scsi_complete_async_scans();
1558
1477 mutex_lock(&shost->scan_mutex); 1559 mutex_lock(&shost->scan_mutex);
1478 if (scsi_host_scan_allowed(shost)) 1560 if (scsi_host_scan_allowed(shost))
1479 __scsi_scan_target(parent, channel, id, lun, rescan); 1561 __scsi_scan_target(parent, channel, id, lun, rescan);
@@ -1519,6 +1601,9 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1519 "%s: <%u:%u:%u>\n", 1601 "%s: <%u:%u:%u>\n",
1520 __FUNCTION__, channel, id, lun)); 1602 __FUNCTION__, channel, id, lun));
1521 1603
1604 if (!shost->async_scan)
1605 scsi_complete_async_scans();
1606
1522 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1607 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1523 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1608 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1524 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) 1609 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
@@ -1539,14 +1624,143 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1539 return 0; 1624 return 0;
1540} 1625}
1541 1626
1627static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1628{
1629 struct scsi_device *sdev;
1630 shost_for_each_device(sdev, shost) {
1631 if (scsi_sysfs_add_sdev(sdev) != 0)
1632 scsi_destroy_sdev(sdev);
1633 }
1634}
1635
1636/**
1637 * scsi_prep_async_scan - prepare for an async scan
1638 * @shost: the host which will be scanned
1639 * Returns: a cookie to be passed to scsi_finish_async_scan()
1640 *
1641 * Tells the midlayer this host is going to do an asynchronous scan.
1642 * It reserves the host's position in the scanning list and ensures
1643 * that other asynchronous scans started after this one won't affect the
1644 * ordering of the discovered devices.
1645 */
1646static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1647{
1648 struct async_scan_data *data;
1649
1650 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1651 return NULL;
1652
1653 if (shost->async_scan) {
1654 printk("%s called twice for host %d", __FUNCTION__,
1655 shost->host_no);
1656 dump_stack();
1657 return NULL;
1658 }
1659
1660 data = kmalloc(sizeof(*data), GFP_KERNEL);
1661 if (!data)
1662 goto err;
1663 data->shost = scsi_host_get(shost);
1664 if (!data->shost)
1665 goto err;
1666 init_completion(&data->prev_finished);
1667
1668 spin_lock(&async_scan_lock);
1669 shost->async_scan = 1;
1670 if (list_empty(&scanning_hosts))
1671 complete(&data->prev_finished);
1672 list_add_tail(&data->list, &scanning_hosts);
1673 spin_unlock(&async_scan_lock);
1674
1675 return data;
1676
1677 err:
1678 kfree(data);
1679 return NULL;
1680}
1681
1682/**
1683 * scsi_finish_async_scan - asynchronous scan has finished
1684 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1685 *
1686 * All the devices currently attached to this host have been found.
1687 * This function announces all the devices it has found to the rest
1688 * of the system.
1689 */
1690static void scsi_finish_async_scan(struct async_scan_data *data)
1691{
1692 struct Scsi_Host *shost;
1693
1694 if (!data)
1695 return;
1696
1697 shost = data->shost;
1698 if (!shost->async_scan) {
1699 printk("%s called twice for host %d", __FUNCTION__,
1700 shost->host_no);
1701 dump_stack();
1702 return;
1703 }
1704
1705 wait_for_completion(&data->prev_finished);
1706
1707 scsi_sysfs_add_devices(shost);
1708
1709 spin_lock(&async_scan_lock);
1710 shost->async_scan = 0;
1711 list_del(&data->list);
1712 if (!list_empty(&scanning_hosts)) {
1713 struct async_scan_data *next = list_entry(scanning_hosts.next,
1714 struct async_scan_data, list);
1715 complete(&next->prev_finished);
1716 }
1717 spin_unlock(&async_scan_lock);
1718
1719 scsi_host_put(shost);
1720 kfree(data);
1721}
1722
1723static void do_scsi_scan_host(struct Scsi_Host *shost)
1724{
1725 if (shost->hostt->scan_finished) {
1726 unsigned long start = jiffies;
1727 if (shost->hostt->scan_start)
1728 shost->hostt->scan_start(shost);
1729
1730 while (!shost->hostt->scan_finished(shost, jiffies - start))
1731 msleep(10);
1732 } else {
1733 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1734 SCAN_WILD_CARD, 0);
1735 }
1736}
1737
1738static int do_scan_async(void *_data)
1739{
1740 struct async_scan_data *data = _data;
1741 do_scsi_scan_host(data->shost);
1742 scsi_finish_async_scan(data);
1743 return 0;
1744}
1745
1542/** 1746/**
1543 * scsi_scan_host - scan the given adapter 1747 * scsi_scan_host - scan the given adapter
1544 * @shost: adapter to scan 1748 * @shost: adapter to scan
1545 **/ 1749 **/
1546void scsi_scan_host(struct Scsi_Host *shost) 1750void scsi_scan_host(struct Scsi_Host *shost)
1547{ 1751{
1548 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, 1752 struct async_scan_data *data;
1549 SCAN_WILD_CARD, 0); 1753
1754 if (strncmp(scsi_scan_type, "none", 4) == 0)
1755 return;
1756
1757 data = scsi_prep_async_scan(shost);
1758 if (!data) {
1759 do_scsi_scan_host(shost);
1760 return;
1761 }
1762
1763 kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1550} 1764}
1551EXPORT_SYMBOL(scsi_scan_host); 1765EXPORT_SYMBOL(scsi_scan_host);
1552 1766
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a91665d1c2..259c90cfa367 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev)
218 put_device(&sdev->sdev_gendev); 218 put_device(&sdev->sdev_gendev);
219} 219}
220 220
221static void scsi_device_dev_release_usercontext(void *data) 221static void scsi_device_dev_release_usercontext(struct work_struct *work)
222{ 222{
223 struct device *dev = data;
224 struct scsi_device *sdev; 223 struct scsi_device *sdev;
225 struct device *parent; 224 struct device *parent;
226 struct scsi_target *starget; 225 struct scsi_target *starget;
227 unsigned long flags; 226 unsigned long flags;
228 227
229 parent = dev->parent; 228 sdev = container_of(work, struct scsi_device, ew.work);
230 sdev = to_scsi_device(dev); 229
230 parent = sdev->sdev_gendev.parent;
231 starget = to_scsi_target(parent); 231 starget = to_scsi_target(parent);
232 232
233 spin_lock_irqsave(sdev->host->host_lock, flags); 233 spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data)
258static void scsi_device_dev_release(struct device *dev) 258static void scsi_device_dev_release(struct device *dev)
259{ 259{
260 struct scsi_device *sdp = to_scsi_device(dev); 260 struct scsi_device *sdp = to_scsi_device(dev);
261 execute_in_process_context(scsi_device_dev_release_usercontext, dev, 261 execute_in_process_context(scsi_device_dev_release_usercontext,
262 &sdp->ew); 262 &sdp->ew);
263} 263}
264 264
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
new file mode 100644
index 000000000000..37bbfbdb870f
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -0,0 +1,352 @@
1/*
2 * SCSI target kernel/user interface functions
3 *
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/miscdevice.h>
23#include <linux/file.h>
24#include <net/tcp.h>
25#include <scsi/scsi.h>
26#include <scsi/scsi_cmnd.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_tgt.h>
30#include <scsi/scsi_tgt_if.h>
31
32#include <asm/cacheflush.h>
33
34#include "scsi_tgt_priv.h"
35
36struct tgt_ring {
37 u32 tr_idx;
38 unsigned long tr_pages[TGT_RING_PAGES];
39 spinlock_t tr_lock;
40};
41
42/* tx_ring : kernel->user, rx_ring : user->kernel */
43static struct tgt_ring tx_ring, rx_ring;
44static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
45
46static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
47{
48 if (ring->tr_idx == TGT_MAX_EVENTS - 1)
49 ring->tr_idx = 0;
50 else
51 ring->tr_idx++;
52}
53
54static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
55{
56 u32 pidx, off;
57
58 pidx = idx / TGT_EVENT_PER_PAGE;
59 off = idx % TGT_EVENT_PER_PAGE;
60
61 return (struct tgt_event *)
62 (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
63}
64
65static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
66{
67 struct tgt_event *ev;
68 struct tgt_ring *ring = &tx_ring;
69 unsigned long flags;
70 int err = 0;
71
72 spin_lock_irqsave(&ring->tr_lock, flags);
73
74 ev = tgt_head_event(ring, ring->tr_idx);
75 if (!ev->hdr.status)
76 tgt_ring_idx_inc(ring);
77 else
78 err = -BUSY;
79
80 spin_unlock_irqrestore(&ring->tr_lock, flags);
81
82 if (err)
83 return err;
84
85 memcpy(ev, p, sizeof(*ev));
86 ev->hdr.type = type;
87 mb();
88 ev->hdr.status = 1;
89
90 flush_dcache_page(virt_to_page(ev));
91
92 wake_up_interruptible(&tgt_poll_wait);
93
94 return 0;
95}
96
97int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
98{
99 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
100 struct tgt_event ev;
101 int err;
102
103 memset(&ev, 0, sizeof(ev));
104 ev.p.cmd_req.host_no = shost->host_no;
105 ev.p.cmd_req.data_len = cmd->request_bufflen;
106 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
107 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
108 ev.p.cmd_req.attribute = cmd->tag;
109 ev.p.cmd_req.tag = tag;
110
111 dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
112 ev.p.cmd_req.data_len, cmd->tag,
113 (unsigned long long) ev.p.cmd_req.tag);
114
115 err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
116 if (err)
117 eprintk("tx buf is full, could not send\n");
118
119 return err;
120}
121
122int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
123{
124 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
125 struct tgt_event ev;
126 int err;
127
128 memset(&ev, 0, sizeof(ev));
129 ev.p.cmd_done.host_no = shost->host_no;
130 ev.p.cmd_done.tag = tag;
131 ev.p.cmd_done.result = cmd->result;
132
133 dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
134 (unsigned long long) ev.p.cmd_req.tag,
135 ev.p.cmd_req.data_len, cmd->tag);
136
137 err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
138 if (err)
139 eprintk("tx buf is full, could not send\n");
140
141 return err;
142}
143
144int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
145 struct scsi_lun *scsilun, void *data)
146{
147 struct tgt_event ev;
148 int err;
149
150 memset(&ev, 0, sizeof(ev));
151 ev.p.tsk_mgmt_req.host_no = host_no;
152 ev.p.tsk_mgmt_req.function = function;
153 ev.p.tsk_mgmt_req.tag = tag;
154 memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
155 ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
156
157 dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
158 (unsigned long long) ev.p.tsk_mgmt_req.mid);
159
160 err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
161 if (err)
162 eprintk("tx buf is full, could not send\n");
163
164 return err;
165}
166
167static int event_recv_msg(struct tgt_event *ev)
168{
169 int err = 0;
170
171 switch (ev->hdr.type) {
172 case TGT_UEVENT_CMD_RSP:
173 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
174 ev->p.cmd_rsp.tag,
175 ev->p.cmd_rsp.result,
176 ev->p.cmd_rsp.len,
177 ev->p.cmd_rsp.uaddr,
178 ev->p.cmd_rsp.rw);
179 break;
180 case TGT_UEVENT_TSK_MGMT_RSP:
181 err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
182 ev->p.tsk_mgmt_rsp.mid,
183 ev->p.tsk_mgmt_rsp.result);
184 break;
185 default:
186 eprintk("unknown type %d\n", ev->hdr.type);
187 err = -EINVAL;
188 }
189
190 return err;
191}
192
193static ssize_t tgt_write(struct file *file, const char __user * buffer,
194 size_t count, loff_t * ppos)
195{
196 struct tgt_event *ev;
197 struct tgt_ring *ring = &rx_ring;
198
199 while (1) {
200 ev = tgt_head_event(ring, ring->tr_idx);
201 /* do we need this? */
202 flush_dcache_page(virt_to_page(ev));
203
204 if (!ev->hdr.status)
205 break;
206
207 tgt_ring_idx_inc(ring);
208 event_recv_msg(ev);
209 ev->hdr.status = 0;
210 };
211
212 return count;
213}
214
215static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
216{
217 struct tgt_event *ev;
218 struct tgt_ring *ring = &tx_ring;
219 unsigned long flags;
220 unsigned int mask = 0;
221 u32 idx;
222
223 poll_wait(file, &tgt_poll_wait, wait);
224
225 spin_lock_irqsave(&ring->tr_lock, flags);
226
227 idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
228 ev = tgt_head_event(ring, idx);
229 if (ev->hdr.status)
230 mask |= POLLIN | POLLRDNORM;
231
232 spin_unlock_irqrestore(&ring->tr_lock, flags);
233
234 return mask;
235}
236
237static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
238 struct tgt_ring *ring)
239{
240 int i, err;
241
242 for (i = 0; i < TGT_RING_PAGES; i++) {
243 struct page *page = virt_to_page(ring->tr_pages[i]);
244 err = vm_insert_page(vma, addr, page);
245 if (err)
246 return err;
247 addr += PAGE_SIZE;
248 }
249
250 return 0;
251}
252
253static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
254{
255 unsigned long addr;
256 int err;
257
258 if (vma->vm_pgoff)
259 return -EINVAL;
260
261 if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
262 eprintk("mmap size must be %lu, not %lu \n",
263 TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
264 return -EINVAL;
265 }
266
267 addr = vma->vm_start;
268 err = uspace_ring_map(vma, addr, &tx_ring);
269 if (err)
270 return err;
271 err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
272
273 return err;
274}
275
276static int tgt_open(struct inode *inode, struct file *file)
277{
278 tx_ring.tr_idx = rx_ring.tr_idx = 0;
279
280 return 0;
281}
282
283static struct file_operations tgt_fops = {
284 .owner = THIS_MODULE,
285 .open = tgt_open,
286 .poll = tgt_poll,
287 .write = tgt_write,
288 .mmap = tgt_mmap,
289};
290
291static struct miscdevice tgt_miscdev = {
292 .minor = MISC_DYNAMIC_MINOR,
293 .name = "tgt",
294 .fops = &tgt_fops,
295};
296
297static void tgt_ring_exit(struct tgt_ring *ring)
298{
299 int i;
300
301 for (i = 0; i < TGT_RING_PAGES; i++)
302 free_page(ring->tr_pages[i]);
303}
304
305static int tgt_ring_init(struct tgt_ring *ring)
306{
307 int i;
308
309 spin_lock_init(&ring->tr_lock);
310
311 for (i = 0; i < TGT_RING_PAGES; i++) {
312 ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
313 if (!ring->tr_pages[i]) {
314 eprintk("out of memory\n");
315 return -ENOMEM;
316 }
317 }
318
319 return 0;
320}
321
322void scsi_tgt_if_exit(void)
323{
324 tgt_ring_exit(&tx_ring);
325 tgt_ring_exit(&rx_ring);
326 misc_deregister(&tgt_miscdev);
327}
328
329int scsi_tgt_if_init(void)
330{
331 int err;
332
333 err = tgt_ring_init(&tx_ring);
334 if (err)
335 return err;
336
337 err = tgt_ring_init(&rx_ring);
338 if (err)
339 goto free_tx_ring;
340
341 err = misc_register(&tgt_miscdev);
342 if (err)
343 goto free_rx_ring;
344
345 return 0;
346free_rx_ring:
347 tgt_ring_exit(&rx_ring);
348free_tx_ring:
349 tgt_ring_exit(&tx_ring);
350
351 return err;
352}
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
new file mode 100644
index 000000000000..386dbae17b44
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -0,0 +1,745 @@
1/*
2 * SCSI target lib functions
3 *
4 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
5 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/blkdev.h>
23#include <linux/hash.h>
24#include <linux/module.h>
25#include <linux/pagemap.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32
33#include "scsi_tgt_priv.h"
34
35static struct workqueue_struct *scsi_tgtd;
36static kmem_cache_t *scsi_tgt_cmd_cache;
37
38/*
39 * TODO: this struct will be killed when the block layer supports large bios
40 * and James's work struct code is in
41 */
42struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */
44 struct work_struct work;
45 /* TODO replace the lists with a large bio */
46 struct bio_list xfer_done_list;
47 struct bio_list xfer_list;
48
49 struct list_head hash_list;
50 struct request *rq;
51 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55};
56
57#define TGT_HASH_ORDER 4
58#define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
59
60struct scsi_tgt_queuedata {
61 struct Scsi_Host *shost;
62 struct list_head cmd_hash[1 << TGT_HASH_ORDER];
63 spinlock_t cmd_hash_lock;
64};
65
66/*
67 * Function: scsi_host_get_command()
68 *
69 * Purpose: Allocate and setup a scsi command block and blk request
70 *
71 * Arguments: shost - scsi host
72 * data_dir - dma data dir
73 * gfp_mask- allocator flags
74 *
75 * Returns: The allocated scsi command structure.
76 *
77 * This should be called by target LLDs to get a command.
78 */
79struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
80 enum dma_data_direction data_dir,
81 gfp_t gfp_mask)
82{
83 int write = (data_dir == DMA_TO_DEVICE);
84 struct request *rq;
85 struct scsi_cmnd *cmd;
86 struct scsi_tgt_cmd *tcmd;
87
88 /* Bail if we can't get a reference to the device */
89 if (!get_device(&shost->shost_gendev))
90 return NULL;
91
92 tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
93 if (!tcmd)
94 goto put_dev;
95
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
97 if (!rq)
98 goto free_tcmd;
99
100 cmd = __scsi_get_command(shost, gfp_mask);
101 if (!cmd)
102 goto release_rq;
103
104 memset(cmd, 0, sizeof(*cmd));
105 cmd->sc_data_direction = data_dir;
106 cmd->jiffies_at_alloc = jiffies;
107 cmd->request = rq;
108
109 rq->special = cmd;
110 rq->cmd_type = REQ_TYPE_SPECIAL;
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd;
113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq;
117
118 return cmd;
119
120release_rq:
121 blk_put_request(rq);
122free_tcmd:
123 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
124put_dev:
125 put_device(&shost->shost_gendev);
126 return NULL;
127
128}
129EXPORT_SYMBOL_GPL(scsi_host_get_command);
130
131/*
132 * Function: scsi_host_put_command()
133 *
134 * Purpose: Free a scsi command block
135 *
136 * Arguments: shost - scsi host
137 * cmd - command block to free
138 *
139 * Returns: Nothing.
140 *
141 * Notes: The command must not belong to any lists.
142 */
143void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
144{
145 struct request_queue *q = shost->uspace_req_q;
146 struct request *rq = cmd->request;
147 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
148 unsigned long flags;
149
150 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
151
152 spin_lock_irqsave(q->queue_lock, flags);
153 __blk_put_request(q, rq);
154 spin_unlock_irqrestore(q->queue_lock, flags);
155
156 __scsi_put_command(shost, cmd, &shost->shost_gendev);
157}
158EXPORT_SYMBOL_GPL(scsi_host_put_command);
159
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{
178 struct request_queue *q = cmd->request->q;
179 struct scsi_tgt_queuedata *qdata = q->queuedata;
180 unsigned long flags;
181 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
182
183 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
184 list_del(&tcmd->hash_list);
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186}
187
188static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{
190 struct scsi_tgt_cmd *tcmd =
191 container_of(work, struct scsi_tgt_cmd, work);
192 struct scsi_cmnd *cmd = tcmd->rq->special;
193
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208}
209
210static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
211 u64 tag)
212{
213 struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
214 unsigned long flags;
215 struct list_head *head;
216
217 tcmd->tag = tag;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)];
221 list_add(&tcmd->hash_list, head);
222 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
223}
224
225/*
226 * scsi_tgt_alloc_queue - setup queue used for message passing
227 * shost: scsi host
228 *
229 * This should be called by the LLD after host allocation.
230 * And will be released when the host is released.
231 */
232int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
233{
234 struct scsi_tgt_queuedata *queuedata;
235 struct request_queue *q;
236 int err, i;
237
238 /*
239 * Do we need to send a netlink event or should uspace
240 * just respond to the hotplug event?
241 */
242 q = __scsi_alloc_queue(shost, NULL);
243 if (!q)
244 return -ENOMEM;
245
246 queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
247 if (!queuedata) {
248 err = -ENOMEM;
249 goto cleanup_queue;
250 }
251 queuedata->shost = shost;
252 q->queuedata = queuedata;
253
254 /*
255 * this is a silly hack. We should probably just queue as many
256 * command as is recvd to userspace. uspace can then make
257 * sure we do not overload the HBA
258 */
259 q->nr_requests = shost->hostt->can_queue;
260 /*
261 * We currently only support software LLDs so this does
262 * not matter for now. Do we need this for the cards we support?
263 * If so we should make it a host template value.
264 */
265 blk_queue_dma_alignment(q, 0);
266 shost->uspace_req_q = q;
267
268 for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
269 INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
270 spin_lock_init(&queuedata->cmd_hash_lock);
271
272 return 0;
273
274cleanup_queue:
275 blk_cleanup_queue(q);
276 return err;
277}
278EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
279
280void scsi_tgt_free_queue(struct Scsi_Host *shost)
281{
282 int i;
283 unsigned long flags;
284 struct request_queue *q = shost->uspace_req_q;
285 struct scsi_cmnd *cmd;
286 struct scsi_tgt_queuedata *qdata = q->queuedata;
287 struct scsi_tgt_cmd *tcmd, *n;
288 LIST_HEAD(cmds);
289
290 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
291
292 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
293 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
294 hash_list) {
295 list_del(&tcmd->hash_list);
296 list_add(&tcmd->hash_list, &cmds);
297 }
298 }
299
300 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
301
302 while (!list_empty(&cmds)) {
303 tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
304 list_del(&tcmd->hash_list);
305 cmd = tcmd->rq->special;
306
307 shost->hostt->eh_abort_handler(cmd);
308 scsi_tgt_cmd_destroy(&tcmd->work);
309 }
310}
311EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
312
313struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
314{
315 struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
316 return queue->shost;
317}
318EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
319
320/*
321 * scsi_tgt_queue_command - queue command for userspace processing
322 * @cmd: scsi command
323 * @scsilun: scsi lun
324 * @tag: unique value to identify this command for tmf
325 */
326int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
327 u64 tag)
328{
329 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
330 int err;
331
332 init_scsi_tgt_cmd(cmd->request, tcmd, tag);
333 err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
334 if (err)
335 cmd_hashlist_del(cmd);
336
337 return err;
338}
339EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
340
341/*
342 * This is run from a interrpt handler normally and the unmap
343 * needs process context so we must queue
344 */
345static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
346{
347 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
348
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
352 queue_work(scsi_tgtd, &tcmd->work);
353}
354
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err;
359
360 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
361
362 err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
363 switch (err) {
364 case SCSI_MLQUEUE_HOST_BUSY:
365 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN;
367 }
368
369 return 0;
370}
371
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{
390 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count;
393
394 cmd->use_sg = rq->nr_phys_segments;
395 cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
396 if (!cmd->request_buffer)
397 return -ENOMEM;
398
399 cmd->request_bufflen = rq->data_len;
400
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count;
406 return 0;
407 }
408
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL;
412}
413
414/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw)
417{
418 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err;
424
425 while (len > 0) {
426 dprintk("%lx %u\n", (unsigned long) uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /*
439 * The first bio is added and merged. We could probably
440 * try to add others using scsi_merge_bio() but for now
441 * we keep it simple. The first bio should be pretty large
442 * (either hitting the 1 MB bio pages limit or a queue limit)
443 * already but for really large IO we may want to try and
444 * merge these.
445 */
446 if (!rq->bio) {
447 blk_rq_bio_prep(q, rq, bio);
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 }
453
454 cmd->offset = 0;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err)
457 goto unmap_bios;
458
459 return 0;
460
461unmap_bios:
462 if (rq->bio) {
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err;
469}
470
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len)
540{
541 char __user *p = (char __user *) uaddr;
542
543 if (copy_from_user(cmd->sense_buffer, p,
544 min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
545 printk(KERN_ERR "Could not copy the sense buffer\n");
546 return -EIO;
547 }
548 return 0;
549}
550
551static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
552{
553 struct scsi_tgt_cmd *tcmd;
554 int err;
555
556 err = shost->hostt->eh_abort_handler(cmd);
557 if (err)
558 eprintk("fail to abort %p\n", cmd);
559
560 tcmd = cmd->request->end_io_data;
561 scsi_tgt_cmd_destroy(&tcmd->work);
562 return err;
563}
564
565static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
566{
567 struct scsi_tgt_queuedata *qdata = q->queuedata;
568 struct request *rq = NULL;
569 struct list_head *head;
570 struct scsi_tgt_cmd *tcmd;
571 unsigned long flags;
572
573 head = &qdata->cmd_hash[cmd_hashfn(tag)];
574 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
575 list_for_each_entry(tcmd, head, hash_list) {
576 if (tcmd->tag == tag) {
577 rq = tcmd->rq;
578 list_del(&tcmd->hash_list);
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
583
584 return rq;
585}
586
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
588 unsigned long uaddr, u8 rw)
589{
590 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd;
592 struct request *rq;
593 struct scsi_tgt_cmd *tcmd;
594 int err = 0;
595
596 dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
597 result, len, uaddr, rw);
598
599 /* TODO: replace with a O(1) alg */
600 shost = scsi_host_lookup(host_no);
601 if (IS_ERR(shost)) {
602 printk(KERN_ERR "Could not find host no %d\n", host_no);
603 return -EINVAL;
604 }
605
606 if (!shost->uspace_req_q) {
607 printk(KERN_ERR "Not target scsi host %d\n", host_no);
608 goto done;
609 }
610
611 rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
612 if (!rq) {
613 printk(KERN_ERR "Could not find tag %llu\n",
614 (unsigned long long) tag);
615 err = -EINVAL;
616 goto done;
617 }
618 cmd = rq->special;
619
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
622
623 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd);
625 goto done;
626 }
627 /*
628 * store the userspace values here, the working values are
629 * in the request_* values
630 */
631 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result;
635
636 if (!tcmd->bufflen || cmd->request_buffer) {
637 err = __scsi_tgt_transfer_response(cmd);
638 goto done;
639 }
640
641 /*
642 * TODO: Do we need to handle case where request does not
643 * align with LLD.
644 */
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
646 if (err) {
647 eprintk("%p %d\n", cmd, err);
648 err = -EAGAIN;
649 goto done;
650 }
651
652 /* userspace failure */
653 if (cmd->result) {
654 if (status_byte(cmd->result) == CHECK_CONDITION)
655 scsi_tgt_copy_sense(cmd, uaddr, len);
656 err = __scsi_tgt_transfer_response(cmd);
657 goto done;
658 }
659 /* ask the target LLD to transfer the data to the buffer */
660 err = scsi_tgt_transfer_data(cmd);
661
662done:
663 scsi_host_put(shost);
664 return err;
665}
666
667int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
668 struct scsi_lun *scsilun, void *data)
669{
670 int err;
671
672 /* TODO: need to retry if this fails. */
673 err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
674 tag, scsilun, data);
675 if (err < 0)
676 eprintk("The task management request lost!\n");
677 return err;
678}
679EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
680
681int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
682{
683 struct Scsi_Host *shost;
684 int err = -EINVAL;
685
686 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
687
688 shost = scsi_host_lookup(host_no);
689 if (IS_ERR(shost)) {
690 printk(KERN_ERR "Could not find host no %d\n", host_no);
691 return err;
692 }
693
694 if (!shost->uspace_req_q) {
695 printk(KERN_ERR "Not target scsi host %d\n", host_no);
696 goto done;
697 }
698
699 err = shost->hostt->tsk_mgmt_response(mid, result);
700done:
701 scsi_host_put(shost);
702 return err;
703}
704
705static int __init scsi_tgt_init(void)
706{
707 int err;
708
709 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
710 sizeof(struct scsi_tgt_cmd),
711 0, 0, NULL, NULL);
712 if (!scsi_tgt_cmd_cache)
713 return -ENOMEM;
714
715 scsi_tgtd = create_workqueue("scsi_tgtd");
716 if (!scsi_tgtd) {
717 err = -ENOMEM;
718 goto free_kmemcache;
719 }
720
721 err = scsi_tgt_if_init();
722 if (err)
723 goto destroy_wq;
724
725 return 0;
726
727destroy_wq:
728 destroy_workqueue(scsi_tgtd);
729free_kmemcache:
730 kmem_cache_destroy(scsi_tgt_cmd_cache);
731 return err;
732}
733
734static void __exit scsi_tgt_exit(void)
735{
736 destroy_workqueue(scsi_tgtd);
737 scsi_tgt_if_exit();
738 kmem_cache_destroy(scsi_tgt_cmd_cache);
739}
740
741module_init(scsi_tgt_init);
742module_exit(scsi_tgt_exit);
743
744MODULE_DESCRIPTION("SCSI target core");
745MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
new file mode 100644
index 000000000000..84488c51ff62
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -0,0 +1,25 @@
1struct scsi_cmnd;
2struct scsi_lun;
3struct Scsi_Host;
4struct task_struct;
5
6/* tmp - will replace with SCSI logging stuff */
7#define eprintk(fmt, args...) \
8do { \
9 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
10} while (0)
11
12#define dprintk(fmt, args...)
13/* #define dprintk eprintk */
14
15extern void scsi_tgt_if_exit(void);
16extern int scsi_tgt_if_init(void);
17
18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
19 u64 tag);
20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
21extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
22 unsigned long uaddr, u8 rw);
23extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
24 struct scsi_lun *scsilun, void *data);
25extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a78f69..3571ce8934e7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
241#define FC_MGMTSRVR_PORTID 0x00000a 241#define FC_MGMTSRVR_PORTID 0x00000a
242 242
243 243
244static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(struct work_struct *work);
245static void fc_timeout_fail_rport_io(void *data); 245static void fc_timeout_fail_rport_io(struct work_struct *work);
246static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(struct work_struct *work);
247 247
248/* 248/*
249 * Attribute counts pre object type... 249 * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost)
1613 * 1 on success / 0 already queued / < 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1614 **/ 1614 **/
1615static int 1615static int
1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
1617 unsigned long delay) 1617 unsigned long delay)
1618{ 1618{
1619 if (unlikely(!fc_host_devloss_work_q(shost))) { 1619 if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1625 return -EINVAL; 1625 return -EINVAL;
1626 } 1626 }
1627 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1628 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1632} 1629}
1633 1630
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host);
1712 * fc_starget_delete - called to delete the scsi decendents of an rport 1709 * fc_starget_delete - called to delete the scsi decendents of an rport
1713 * (target and all sdevs) 1710 * (target and all sdevs)
1714 * 1711 *
1715 * @data: remote port to be operated on. 1712 * @work: remote port to be operated on.
1716 **/ 1713 **/
1717static void 1714static void
1718fc_starget_delete(void *data) 1715fc_starget_delete(struct work_struct *work)
1719{ 1716{
1720 struct fc_rport *rport = (struct fc_rport *)data; 1717 struct fc_rport *rport =
1718 container_of(work, struct fc_rport, stgt_delete_work);
1721 struct Scsi_Host *shost = rport_to_shost(rport); 1719 struct Scsi_Host *shost = rport_to_shost(rport);
1722 unsigned long flags; 1720 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt); 1721 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data)
1751/** 1749/**
1752 * fc_rport_final_delete - finish rport termination and delete it. 1750 * fc_rport_final_delete - finish rport termination and delete it.
1753 * 1751 *
1754 * @data: remote port to be deleted. 1752 * @work: remote port to be deleted.
1755 **/ 1753 **/
1756static void 1754static void
1757fc_rport_final_delete(void *data) 1755fc_rport_final_delete(struct work_struct *work)
1758{ 1756{
1759 struct fc_rport *rport = (struct fc_rport *)data; 1757 struct fc_rport *rport =
1758 container_of(work, struct fc_rport, rport_delete_work);
1760 struct device *dev = &rport->dev; 1759 struct device *dev = &rport->dev;
1761 struct Scsi_Host *shost = rport_to_shost(rport); 1760 struct Scsi_Host *shost = rport_to_shost(rport);
1762 struct fc_internal *i = to_fc_internal(shost->transportt); 1761 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data)
1770 1769
1771 /* Delete SCSI target and sdevs */ 1770 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1) 1771 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data); 1772 fc_starget_delete(&rport->stgt_delete_work);
1774 else if (i->f->dev_loss_tmo_callbk) 1773 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport); 1774 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io) 1775 else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1829 rport->channel = channel; 1828 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1; 1829 rport->fast_io_fail_tmo = -1;
1831 1830
1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1831 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); 1832 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1833 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1834 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1835 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
1837 1836
1838 spin_lock_irqsave(shost->host_lock, flags); 1837 spin_lock_irqsave(shost->host_lock, flags);
1839 1838
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1963 } 1962 }
1964 1963
1965 if (match) { 1964 if (match) {
1966 struct work_struct *work = 1965 struct delayed_work *work =
1967 &rport->dev_loss_work; 1966 &rport->dev_loss_work;
1968 1967
1969 memcpy(&rport->node_name, &ids->node_name, 1968 memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
2267 * was a SCSI target (thus was blocked), and failed 2266 * was a SCSI target (thus was blocked), and failed
2268 * to return in the alloted time. 2267 * to return in the alloted time.
2269 * 2268 *
2270 * @data: rport target that failed to reappear in the alloted time. 2269 * @work: rport target that failed to reappear in the alloted time.
2271 **/ 2270 **/
2272static void 2271static void
2273fc_timeout_deleted_rport(void *data) 2272fc_timeout_deleted_rport(struct work_struct *work)
2274{ 2273{
2275 struct fc_rport *rport = (struct fc_rport *)data; 2274 struct fc_rport *rport =
2275 container_of(work, struct fc_rport, dev_loss_work.work);
2276 struct Scsi_Host *shost = rport_to_shost(rport); 2276 struct Scsi_Host *shost = rport_to_shost(rport);
2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2277 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2278 unsigned long flags; 2278 unsigned long flags;
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void *data)
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a 2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target. 2367 * disconnected SCSI target.
2368 * 2368 *
2369 * @data: rport to terminate io on. 2369 * @work: rport to terminate io on.
2370 * 2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed 2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning. 2372 * prior to returning.
2373 **/ 2373 **/
2374static void 2374static void
2375fc_timeout_fail_rport_io(void *data) 2375fc_timeout_fail_rport_io(struct work_struct *work)
2376{ 2376{
2377 struct fc_rport *rport = (struct fc_rport *)data; 2377 struct fc_rport *rport =
2378 container_of(work, struct fc_rport, fail_io_work.work);
2378 struct Scsi_Host *shost = rport_to_shost(rport); 2379 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt); 2380 struct fc_internal *i = to_fc_internal(shost->transportt);
2380 2381
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void *data)
2387/** 2388/**
2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2389 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2389 * 2390 *
2390 * @data: remote port to be scanned. 2391 * @work: remote port to be scanned.
2391 **/ 2392 **/
2392static void 2393static void
2393fc_scsi_scan_rport(void *data) 2394fc_scsi_scan_rport(struct work_struct *work)
2394{ 2395{
2395 struct fc_rport *rport = (struct fc_rport *)data; 2396 struct fc_rport *rport =
2397 container_of(work, struct fc_rport, scan_work);
2396 struct Scsi_Host *shost = rport_to_shost(rport); 2398 struct Scsi_Host *shost = rport_to_shost(rport);
2397 unsigned long flags; 2399 unsigned long flags;
2398 2400
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124a989e..9c22f1342715 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
234 return 0; 234 return 0;
235} 235}
236 236
237static void session_recovery_timedout(void *data) 237static void session_recovery_timedout(struct work_struct *work)
238{ 238{
239 struct iscsi_cls_session *session = data; 239 struct iscsi_cls_session *session =
240 container_of(work, struct iscsi_cls_session,
241 recovery_work.work);
240 242
241 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 243 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
242 "out after %d secs\n", session->recovery_tmo); 244 "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
276 278
277 session->transport = transport; 279 session->transport = transport;
278 session->recovery_tmo = 120; 280 session->recovery_tmo = 120;
279 INIT_WORK(&session->recovery_work, session_recovery_timedout, session); 281 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
280 INIT_LIST_HEAD(&session->host_list); 282 INIT_LIST_HEAD(&session->host_list);
281 INIT_LIST_HEAD(&session->sess_list); 283 INIT_LIST_HEAD(&session->sess_list);
282 284
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0d0f2b..3fded4831460 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@ struct work_queue_wrapper {
964}; 964};
965 965
966static void 966static void
967spi_dv_device_work_wrapper(void *data) 967spi_dv_device_work_wrapper(struct work_struct *work)
968{ 968{
969 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; 969 struct work_queue_wrapper *wqw =
970 container_of(work, struct work_queue_wrapper, work);
970 struct scsi_device *sdev = wqw->sdev; 971 struct scsi_device *sdev = wqw->sdev;
971 972
972 kfree(wqw); 973 kfree(wqw);
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev)
1006 return; 1007 return;
1007 } 1008 }
1008 1009
1009 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); 1010 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
1010 wqw->sdev = sdev; 1011 wqw->sdev = sdev;
1011 1012
1012 schedule_work(&wqw->work); 1013 schedule_work(&wqw->work);
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
new file mode 100644
index 000000000000..8a636103083d
--- /dev/null
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -0,0 +1,31 @@
1/*
2 * scsi_wait_scan.c
3 *
4 * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
5 *
6 * This is a simple module to wait until all the async scans are
7 * complete. The idea is to use it in initrd/initramfs scripts. You
8 * modprobe it after all the modprobes of the root SCSI drivers and it
9 * will wait until they have all finished scanning their busses before
10 * allowing the boot to proceed
11 */
12
13#include <linux/module.h>
14#include "scsi_priv.h"
15
16static int __init wait_scan_init(void)
17{
18 scsi_complete_async_scans();
19 return 0;
20}
21
22static void __exit wait_scan_exit(void)
23{
24}
25
26MODULE_DESCRIPTION("SCSI wait for scans");
27MODULE_AUTHOR("James Bottomley");
28MODULE_LICENSE("GPL");
29
30late_initcall(wait_scan_init);
31module_exit(wait_scan_exit);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84ff203ffedd..f6a452846fab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1051,6 +1051,14 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1051 &sshdr, SD_TIMEOUT, 1051 &sshdr, SD_TIMEOUT,
1052 SD_MAX_RETRIES); 1052 SD_MAX_RETRIES);
1053 1053
1054 /*
1055 * If the drive has indicated to us that it
1056 * doesn't have any media in it, don't bother
1057 * with any more polling.
1058 */
1059 if (media_not_present(sdkp, &sshdr))
1060 return;
1061
1054 if (the_result) 1062 if (the_result)
1055 sense_valid = scsi_sense_valid(&sshdr); 1063 sense_valid = scsi_sense_valid(&sshdr);
1056 retries++; 1064 retries++;
@@ -1059,14 +1067,6 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1059 ((driver_byte(the_result) & DRIVER_SENSE) && 1067 ((driver_byte(the_result) & DRIVER_SENSE) &&
1060 sense_valid && sshdr.sense_key == UNIT_ATTENTION))); 1068 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
1061 1069
1062 /*
1063 * If the drive has indicated to us that it doesn't have
1064 * any media in it, don't bother with any of the rest of
1065 * this crap.
1066 */
1067 if (media_not_present(sdkp, &sshdr))
1068 return;
1069
1070 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1070 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1071 /* no sense, TUR either succeeded or failed 1071 /* no sense, TUR either succeeded or failed
1072 * with a status error */ 1072 * with a status error */
@@ -1467,7 +1467,6 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1467 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 1467 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1468 1468
1469 if (scsi_status_is_good(res)) { 1469 if (scsi_status_is_good(res)) {
1470 int ct = 0;
1471 int offset = data.header_length + data.block_descriptor_length; 1470 int offset = data.header_length + data.block_descriptor_length;
1472 1471
1473 if (offset >= SD_BUF_SIZE - 2) { 1472 if (offset >= SD_BUF_SIZE - 2) {
@@ -1496,11 +1495,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1496 sdkp->DPOFUA = 0; 1495 sdkp->DPOFUA = 0;
1497 } 1496 }
1498 1497
1499 ct = sdkp->RCD + 2*sdkp->WCE; 1498 printk(KERN_NOTICE "SCSI device %s: "
1500 1499 "write cache: %s, read cache: %s, %s\n",
1501 printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n", 1500 diskname,
1502 diskname, sd_cache_types[ct], 1501 sdkp->WCE ? "enabled" : "disabled",
1503 sdkp->DPOFUA ? " w/ FUA" : ""); 1502 sdkp->RCD ? "disabled" : "enabled",
1503 sdkp->DPOFUA ? "supports DPO and FUA"
1504 : "doesn't support DPO or FUA");
1504 1505
1505 return; 1506 return;
1506 } 1507 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e1a52c525ed4..587274dd7059 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2005 Kai Makisara 12 Copyright 1992 - 2006 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20050830"; 20static const char *verstr = "20061107";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -999,7 +999,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
999 STp->min_block = ((STp->buffer)->b_data[4] << 8) | 999 STp->min_block = ((STp->buffer)->b_data[4] << 8) |
1000 (STp->buffer)->b_data[5]; 1000 (STp->buffer)->b_data[5];
1001 if ( DEB( debugging || ) !STp->inited) 1001 if ( DEB( debugging || ) !STp->inited)
1002 printk(KERN_WARNING 1002 printk(KERN_INFO
1003 "%s: Block limits %d - %d bytes.\n", name, 1003 "%s: Block limits %d - %d bytes.\n", name,
1004 STp->min_block, STp->max_block); 1004 STp->min_block, STp->max_block);
1005 } else { 1005 } else {
@@ -1224,7 +1224,7 @@ static int st_flush(struct file *filp, fl_owner_t id)
1224 } 1224 }
1225 1225
1226 DEBC( if (STp->nbr_requests) 1226 DEBC( if (STp->nbr_requests)
1227 printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1227 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
1228 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1228 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
1229 1229
1230 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1230 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@@ -4056,11 +4056,11 @@ static int st_probe(struct device *dev)
4056 goto out_free_tape; 4056 goto out_free_tape;
4057 } 4057 }
4058 4058
4059 sdev_printk(KERN_WARNING, SDp, 4059 sdev_printk(KERN_NOTICE, SDp,
4060 "Attached scsi tape %s\n", tape_name(tpnt)); 4060 "Attached scsi tape %s\n", tape_name(tpnt));
4061 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", 4061 sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
4062 tape_name(tpnt), tpnt->try_dio ? "yes" : "no", 4062 tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
4063 queue_dma_alignment(SDp->request_queue) + 1); 4063 queue_dma_alignment(SDp->request_queue) + 1);
4064 4064
4065 return 0; 4065 return 0;
4066 4066
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 185c270bb043..ba6bcdaf2a6a 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -11,8 +11,6 @@
11 * Written By: 11 * Written By:
12 * Ed Lin <promise_linux@promise.com> 12 * Ed Lin <promise_linux@promise.com>
13 * 13 *
14 * Version: 3.0.0.1
15 *
16 */ 14 */
17 15
18#include <linux/init.h> 16#include <linux/init.h>
@@ -37,9 +35,9 @@
37#include <scsi/scsi_tcq.h> 35#include <scsi/scsi_tcq.h>
38 36
39#define DRV_NAME "stex" 37#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "3.0.0.1" 38#define ST_DRIVER_VERSION "3.1.0.1"
41#define ST_VER_MAJOR 3 39#define ST_VER_MAJOR 3
42#define ST_VER_MINOR 0 40#define ST_VER_MINOR 1
43#define ST_OEM 0 41#define ST_OEM 0
44#define ST_BUILD_VER 1 42#define ST_BUILD_VER 1
45 43
@@ -76,8 +74,10 @@ enum {
76 MU_STATE_STARTED = 4, 74 MU_STATE_STARTED = 4,
77 MU_STATE_RESETTING = 5, 75 MU_STATE_RESETTING = 5,
78 76
79 MU_MAX_DELAY_TIME = 240000, 77 MU_MAX_DELAY = 120,
80 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, 78 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
79 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
80 MU_HARD_RESET_WAIT = 30000,
81 HMU_PARTNER_TYPE = 2, 81 HMU_PARTNER_TYPE = 2,
82 82
83 /* firmware returned values */ 83 /* firmware returned values */
@@ -120,7 +120,8 @@ enum {
120 120
121 st_shasta = 0, 121 st_shasta = 0,
122 st_vsc = 1, 122 st_vsc = 1,
123 st_yosemite = 2, 123 st_vsc1 = 2,
124 st_yosemite = 3,
124 125
125 PASSTHRU_REQ_TYPE = 0x00000001, 126 PASSTHRU_REQ_TYPE = 0x00000001,
126 PASSTHRU_REQ_NO_WAKEUP = 0x00000100, 127 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
@@ -150,6 +151,8 @@ enum {
150 MGT_CMD_SIGNATURE = 0xba, 151 MGT_CMD_SIGNATURE = 0xba,
151 152
152 INQUIRY_EVPD = 0x01, 153 INQUIRY_EVPD = 0x01,
154
155 ST_ADDITIONAL_MEM = 0x200000,
153}; 156};
154 157
155/* SCSI inquiry data */ 158/* SCSI inquiry data */
@@ -211,7 +214,9 @@ struct handshake_frame {
211 __le32 partner_ver_minor; 214 __le32 partner_ver_minor;
212 __le32 partner_ver_oem; 215 __le32 partner_ver_oem;
213 __le32 partner_ver_build; 216 __le32 partner_ver_build;
214 u32 reserved1[4]; 217 __le32 extra_offset; /* NEW */
218 __le32 extra_size; /* NEW */
219 u32 reserved1[2];
215}; 220};
216 221
217struct req_msg { 222struct req_msg {
@@ -302,6 +307,7 @@ struct st_hba {
302 void __iomem *mmio_base; /* iomapped PCI memory space */ 307 void __iomem *mmio_base; /* iomapped PCI memory space */
303 void *dma_mem; 308 void *dma_mem;
304 dma_addr_t dma_handle; 309 dma_addr_t dma_handle;
310 size_t dma_size;
305 311
306 struct Scsi_Host *host; 312 struct Scsi_Host *host;
307 struct pci_dev *pdev; 313 struct pci_dev *pdev;
@@ -507,6 +513,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
507 size_t count = sizeof(struct st_frame); 513 size_t count = sizeof(struct st_frame);
508 514
509 p = hba->copy_buffer; 515 p = hba->copy_buffer;
516 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
510 memset(p->base, 0, sizeof(u32)*6); 517 memset(p->base, 0, sizeof(u32)*6);
511 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); 518 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
512 p->rom_addr = 0; 519 p->rom_addr = 0;
@@ -901,27 +908,34 @@ static int stex_handshake(struct st_hba *hba)
901 void __iomem *base = hba->mmio_base; 908 void __iomem *base = hba->mmio_base;
902 struct handshake_frame *h; 909 struct handshake_frame *h;
903 dma_addr_t status_phys; 910 dma_addr_t status_phys;
904 int i; 911 u32 data;
912 unsigned long before;
905 913
906 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { 914 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
907 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); 915 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
908 readl(base + IDBL); 916 readl(base + IDBL);
909 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE 917 before = jiffies;
910 && i < MU_MAX_DELAY_TIME; i++) { 918 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
919 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
920 printk(KERN_ERR DRV_NAME
921 "(%s): no handshake signature\n",
922 pci_name(hba->pdev));
923 return -1;
924 }
911 rmb(); 925 rmb();
912 msleep(1); 926 msleep(1);
913 } 927 }
914
915 if (i == MU_MAX_DELAY_TIME) {
916 printk(KERN_ERR DRV_NAME
917 "(%s): no handshake signature\n",
918 pci_name(hba->pdev));
919 return -1;
920 }
921 } 928 }
922 929
923 udelay(10); 930 udelay(10);
924 931
932 data = readl(base + OMR1);
933 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
934 data &= 0x0000ffff;
935 if (hba->host->can_queue > data)
936 hba->host->can_queue = data;
937 }
938
925 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); 939 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
926 h->rb_phy = cpu_to_le32(hba->dma_handle); 940 h->rb_phy = cpu_to_le32(hba->dma_handle);
927 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16); 941 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
@@ -931,6 +945,11 @@ static int stex_handshake(struct st_hba *hba)
931 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT); 945 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
932 stex_gettime(&h->hosttime); 946 stex_gettime(&h->hosttime);
933 h->partner_type = HMU_PARTNER_TYPE; 947 h->partner_type = HMU_PARTNER_TYPE;
948 if (hba->dma_size > STEX_BUFFER_SIZE) {
949 h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
950 h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
951 } else
952 h->extra_offset = h->extra_size = 0;
934 953
935 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE; 954 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
936 writel(status_phys, base + IMR0); 955 writel(status_phys, base + IMR0);
@@ -944,19 +963,18 @@ static int stex_handshake(struct st_hba *hba)
944 readl(base + IDBL); /* flush */ 963 readl(base + IDBL); /* flush */
945 964
946 udelay(10); 965 udelay(10);
947 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE 966 before = jiffies;
948 && i < MU_MAX_DELAY_TIME; i++) { 967 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
968 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
969 printk(KERN_ERR DRV_NAME
970 "(%s): no signature after handshake frame\n",
971 pci_name(hba->pdev));
972 return -1;
973 }
949 rmb(); 974 rmb();
950 msleep(1); 975 msleep(1);
951 } 976 }
952 977
953 if (i == MU_MAX_DELAY_TIME) {
954 printk(KERN_ERR DRV_NAME
955 "(%s): no signature after handshake frame\n",
956 pci_name(hba->pdev));
957 return -1;
958 }
959
960 writel(0, base + IMR0); 978 writel(0, base + IMR0);
961 readl(base + IMR0); 979 readl(base + IMR0);
962 writel(0, base + OMR0); 980 writel(0, base + OMR0);
@@ -1038,9 +1056,9 @@ static void stex_hard_reset(struct st_hba *hba)
1038 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; 1056 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1039 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); 1057 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1040 1058
1041 for (i = 0; i < MU_MAX_DELAY_TIME; i++) { 1059 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1042 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); 1060 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1043 if (pci_cmd & PCI_COMMAND_MASTER) 1061 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1044 break; 1062 break;
1045 msleep(1); 1063 msleep(1);
1046 } 1064 }
@@ -1100,18 +1118,18 @@ static int stex_reset(struct scsi_cmnd *cmd)
1100static int stex_biosparam(struct scsi_device *sdev, 1118static int stex_biosparam(struct scsi_device *sdev,
1101 struct block_device *bdev, sector_t capacity, int geom[]) 1119 struct block_device *bdev, sector_t capacity, int geom[])
1102{ 1120{
1103 int heads = 255, sectors = 63, cylinders; 1121 int heads = 255, sectors = 63;
1104 1122
1105 if (capacity < 0x200000) { 1123 if (capacity < 0x200000) {
1106 heads = 64; 1124 heads = 64;
1107 sectors = 32; 1125 sectors = 32;
1108 } 1126 }
1109 1127
1110 cylinders = sector_div(capacity, heads * sectors); 1128 sector_div(capacity, heads * sectors);
1111 1129
1112 geom[0] = heads; 1130 geom[0] = heads;
1113 geom[1] = sectors; 1131 geom[1] = sectors;
1114 geom[2] = cylinders; 1132 geom[2] = capacity;
1115 1133
1116 return 0; 1134 return 0;
1117} 1135}
@@ -1193,8 +1211,13 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1193 goto out_iounmap; 1211 goto out_iounmap;
1194 } 1212 }
1195 1213
1214 hba->cardtype = (unsigned int) id->driver_data;
1215 if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
1216 hba->cardtype = st_vsc1;
1217 hba->dma_size = (hba->cardtype == st_vsc1) ?
1218 (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
1196 hba->dma_mem = dma_alloc_coherent(&pdev->dev, 1219 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1197 STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL); 1220 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1198 if (!hba->dma_mem) { 1221 if (!hba->dma_mem) {
1199 err = -ENOMEM; 1222 err = -ENOMEM;
1200 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", 1223 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@@ -1207,8 +1230,6 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1207 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE; 1230 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1208 hba->mu_status = MU_STATE_STARTING; 1231 hba->mu_status = MU_STATE_STARTING;
1209 1232
1210 hba->cardtype = (unsigned int) id->driver_data;
1211
1212 /* firmware uses id/lun pair for a logical drive, but lun would be 1233 /* firmware uses id/lun pair for a logical drive, but lun would be
1213 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use 1234 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
1214 channel to map lun here */ 1235 channel to map lun here */
@@ -1233,7 +1254,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1233 if (err) 1254 if (err)
1234 goto out_free_irq; 1255 goto out_free_irq;
1235 1256
1236 err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE); 1257 err = scsi_init_shared_tag_map(host, host->can_queue);
1237 if (err) { 1258 if (err) {
1238 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", 1259 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1239 pci_name(pdev)); 1260 pci_name(pdev));
@@ -1256,7 +1277,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1256out_free_irq: 1277out_free_irq:
1257 free_irq(pdev->irq, hba); 1278 free_irq(pdev->irq, hba);
1258out_pci_free: 1279out_pci_free:
1259 dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE, 1280 dma_free_coherent(&pdev->dev, hba->dma_size,
1260 hba->dma_mem, hba->dma_handle); 1281 hba->dma_mem, hba->dma_handle);
1261out_iounmap: 1282out_iounmap:
1262 iounmap(hba->mmio_base); 1283 iounmap(hba->mmio_base);
@@ -1317,7 +1338,7 @@ static void stex_hba_free(struct st_hba *hba)
1317 1338
1318 pci_release_regions(hba->pdev); 1339 pci_release_regions(hba->pdev);
1319 1340
1320 dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE, 1341 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1321 hba->dma_mem, hba->dma_handle); 1342 hba->dma_mem, hba->dma_handle);
1322} 1343}
1323 1344
@@ -1346,15 +1367,32 @@ static void stex_shutdown(struct pci_dev *pdev)
1346} 1367}
1347 1368
1348static struct pci_device_id stex_pci_tbl[] = { 1369static struct pci_device_id stex_pci_tbl[] = {
1349 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1370 /* st_shasta */
1350 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1371 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1351 { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1372 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1352 { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1373 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1353 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1374 st_shasta }, /* SuperTrak EX12350 */
1354 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1375 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1355 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, 1376 st_shasta }, /* SuperTrak EX4350 */
1356 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, 1377 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1357 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite }, 1378 st_shasta }, /* SuperTrak EX24350 */
1379
1380 /* st_vsc */
1381 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1382
1383 /* st_yosemite */
1384 { 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
1385 st_yosemite }, /* SuperTrak EX4650 */
1386 { 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
1387 st_yosemite }, /* SuperTrak EX4650o */
1388 { 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
1389 st_yosemite }, /* SuperTrak EX8650EL */
1390 { 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
1391 st_yosemite }, /* SuperTrak EX8650 */
1392 { 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
1393 st_yosemite }, /* SuperTrak EX8654 */
1394 { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1395 st_yosemite }, /* generic st_yosemite */
1358 { } /* terminate list */ 1396 { } /* terminate list */
1359}; 1397};
1360MODULE_DEVICE_TABLE(pci, stex_pci_tbl); 1398MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 646e840266e2..76a069b7ac0b 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -8,20 +8,20 @@
8 * drew@colorado.edu 8 * drew@colorado.edu
9 * +1 (303) 440-4894 9 * +1 (303) 440-4894
10 * 10 *
11 * DISTRIBUTION RELEASE 3. 11 * DISTRIBUTION RELEASE 3.
12 * 12 *
13 * For more information, please consult 13 * For more information, please consult
14 * 14 *
15 * Trantor Systems, Ltd. 15 * Trantor Systems, Ltd.
16 * T128/T128F/T228 SCSI Host Adapter 16 * T128/T128F/T228 SCSI Host Adapter
17 * Hardware Specifications 17 * Hardware Specifications
18 * 18 *
19 * Trantor Systems, Ltd. 19 * Trantor Systems, Ltd.
20 * 5415 Randall Place 20 * 5415 Randall Place
21 * Fremont, CA 94538 21 * Fremont, CA 94538
22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 22 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
23 * 23 *
24 * and 24 * and
25 * 25 *
26 * NCR 5380 Family 26 * NCR 5380 Family
27 * SCSI Protocol Controller 27 * SCSI Protocol Controller
@@ -48,15 +48,15 @@
48#define TDEBUG_TRANSFER 0x2 48#define TDEBUG_TRANSFER 0x2
49 49
50/* 50/*
51 * The trantor boards are memory mapped. They use an NCR5380 or 51 * The trantor boards are memory mapped. They use an NCR5380 or
52 * equivalent (my sample board had part second sourced from ZILOG). 52 * equivalent (my sample board had part second sourced from ZILOG).
53 * NCR's recommended "Pseudo-DMA" architecture is used, where 53 * NCR's recommended "Pseudo-DMA" architecture is used, where
54 * a PAL drives the DMA signals on the 5380 allowing fast, blind 54 * a PAL drives the DMA signals on the 5380 allowing fast, blind
55 * transfers with proper handshaking. 55 * transfers with proper handshaking.
56 */ 56 */
57 57
58/* 58/*
59 * Note : a boot switch is provided for the purpose of informing the 59 * Note : a boot switch is provided for the purpose of informing the
60 * firmware to boot or not boot from attached SCSI devices. So, I imagine 60 * firmware to boot or not boot from attached SCSI devices. So, I imagine
61 * there are fewer people who've yanked the ROM like they do on the Seagate 61 * there are fewer people who've yanked the ROM like they do on the Seagate
62 * to make bootup faster, and I'll probably use this for autodetection. 62 * to make bootup faster, and I'll probably use this for autodetection.
@@ -92,19 +92,20 @@
92#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ 92#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
93 93
94#ifndef ASM 94#ifndef ASM
95static int t128_abort(Scsi_Cmnd *); 95static int t128_abort(struct scsi_cmnd *);
96static int t128_biosparam(struct scsi_device *, struct block_device *, 96static int t128_biosparam(struct scsi_device *, struct block_device *,
97 sector_t, int*); 97 sector_t, int*);
98static int t128_detect(struct scsi_host_template *); 98static int t128_detect(struct scsi_host_template *);
99static int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 99static int t128_queue_command(struct scsi_cmnd *,
100static int t128_bus_reset(Scsi_Cmnd *); 100 void (*done)(struct scsi_cmnd *));
101static int t128_bus_reset(struct scsi_cmnd *);
101 102
102#ifndef CMD_PER_LUN 103#ifndef CMD_PER_LUN
103#define CMD_PER_LUN 2 104#define CMD_PER_LUN 2
104#endif 105#endif
105 106
106#ifndef CAN_QUEUE 107#ifndef CAN_QUEUE
107#define CAN_QUEUE 32 108#define CAN_QUEUE 32
108#endif 109#endif
109 110
110#ifndef HOSTS_C 111#ifndef HOSTS_C
@@ -120,7 +121,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
120 121
121#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20)) 122#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
122 123
123#if !(TDEBUG & TDEBUG_TRANSFER) 124#if !(TDEBUG & TDEBUG_TRANSFER)
124#define NCR5380_read(reg) readb(T128_address(reg)) 125#define NCR5380_read(reg) readb(T128_address(reg))
125#define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) 126#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
126#else 127#else
@@ -129,7 +130,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
129 , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg))) 130 , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg)))
130 131
131#define NCR5380_write(reg, value) { \ 132#define NCR5380_write(reg, value) { \
132 printk("scsi%d : write %02x to register %d at address %08x\n", \ 133 printk("scsi%d : write %02x to register %d at address %08x\n", \
133 instance->hostno, (value), (reg), T128_address(reg)); \ 134 instance->hostno, (value), (reg), T128_address(reg)); \
134 writeb((value), (T128_address(reg))); \ 135 writeb((value), (T128_address(reg))); \
135} 136}
@@ -142,10 +143,10 @@ static int t128_bus_reset(Scsi_Cmnd *);
142#define NCR5380_bus_reset t128_bus_reset 143#define NCR5380_bus_reset t128_bus_reset
143#define NCR5380_proc_info t128_proc_info 144#define NCR5380_proc_info t128_proc_info
144 145
145/* 15 14 12 10 7 5 3 146/* 15 14 12 10 7 5 3
146 1101 0100 1010 1000 */ 147 1101 0100 1010 1000 */
147 148
148#define T128_IRQS 0xc4a8 149#define T128_IRQS 0xc4a8
149 150
150#endif /* else def HOSTS_C */ 151#endif /* else def HOSTS_C */
151#endif /* ndef ASM */ 152#endif /* ndef ASM */