aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/BusLogic.c73
-rw-r--r--drivers/scsi/Kconfig28
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c402
-rw-r--r--drivers/scsi/aacraid/aacraid.h76
-rw-r--r--drivers/scsi/aacraid/commctrl.c286
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c112
-rw-r--r--drivers/scsi/aacraid/dpcsup.c36
-rw-r--r--drivers/scsi/aacraid/linit.c65
-rw-r--r--drivers/scsi/aacraid/nark.c3
-rw-r--r--drivers/scsi/aacraid/rkt.c3
-rw-r--r--drivers/scsi/aacraid/rx.c115
-rw-r--r--drivers/scsi/aacraid/sa.c1
-rw-r--r--drivers/scsi/aha1542.c1
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx12
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx10
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c1
-rw-r--r--drivers/scsi/atari_NCR5380.c4398
-rw-r--r--drivers/scsi/atari_scsi.c377
-rw-r--r--drivers/scsi/atari_scsi.h174
-rw-r--r--drivers/scsi/constants.c274
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h48
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h2
-rw-r--r--drivers/scsi/dpt/dptsig.h4
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/eata_generic.h7
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c80
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c37
-rw-r--r--drivers/scsi/ipr.c290
-rw-r--r--drivers/scsi/ipr.h45
-rw-r--r--drivers/scsi/iscsi_tcp.c21
-rw-r--r--drivers/scsi/libiscsi.c29
-rw-r--r--drivers/scsi/libsas/sas_expander.c1
-rw-r--r--drivers/scsi/libsrp.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/osst.c1
-rw-r--r--drivers/scsi/pci2000.h197
-rw-r--r--drivers/scsi/pcmcia/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c47
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c54
-rw-r--r--drivers/scsi/scsi_tgt_if.c6
-rw-r--r--drivers/scsi/scsi_tgt_lib.c261
-rw-r--r--drivers/scsi/scsi_tgt_priv.h5
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/sd.c405
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/scsi/tmscsim.c2
69 files changed, 4255 insertions, 4068 deletions
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index e874b8944875..96f4cab07614 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -579,17 +579,17 @@ static void __init BusLogic_InitializeProbeInfoListISA(struct BusLogic_HostAdapt
579 /* 579 /*
580 Append the list of standard BusLogic MultiMaster ISA I/O Addresses. 580 Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
581 */ 581 */
582 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0) 582 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe330)
583 BusLogic_AppendProbeAddressISA(0x330); 583 BusLogic_AppendProbeAddressISA(0x330);
584 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0) 584 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe334)
585 BusLogic_AppendProbeAddressISA(0x334); 585 BusLogic_AppendProbeAddressISA(0x334);
586 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0) 586 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe230)
587 BusLogic_AppendProbeAddressISA(0x230); 587 BusLogic_AppendProbeAddressISA(0x230);
588 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0) 588 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe234)
589 BusLogic_AppendProbeAddressISA(0x234); 589 BusLogic_AppendProbeAddressISA(0x234);
590 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0) 590 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe130)
591 BusLogic_AppendProbeAddressISA(0x130); 591 BusLogic_AppendProbeAddressISA(0x130);
592 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0) 592 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe134)
593 BusLogic_AppendProbeAddressISA(0x134); 593 BusLogic_AppendProbeAddressISA(0x134);
594} 594}
595 595
@@ -795,7 +795,9 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
795 host adapters are probed. 795 host adapters are probed.
796 */ 796 */
797 if (!BusLogic_ProbeOptions.NoProbeISA) 797 if (!BusLogic_ProbeOptions.NoProbeISA)
798 if (PrimaryProbeInfo->IO_Address == 0 && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)) { 798 if (PrimaryProbeInfo->IO_Address == 0 &&
799 (!BusLogic_ProbeOptions.LimitedProbeISA ||
800 BusLogic_ProbeOptions.Probe330)) {
799 PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster; 801 PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
800 PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus; 802 PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
801 PrimaryProbeInfo->IO_Address = 0x330; 803 PrimaryProbeInfo->IO_Address = 0x330;
@@ -805,15 +807,25 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
805 omitting the Primary I/O Address which has already been handled. 807 omitting the Primary I/O Address which has already been handled.
806 */ 808 */
807 if (!BusLogic_ProbeOptions.NoProbeISA) { 809 if (!BusLogic_ProbeOptions.NoProbeISA) {
808 if (!StandardAddressSeen[1] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0)) 810 if (!StandardAddressSeen[1] &&
811 (!BusLogic_ProbeOptions.LimitedProbeISA ||
812 BusLogic_ProbeOptions.Probe334))
809 BusLogic_AppendProbeAddressISA(0x334); 813 BusLogic_AppendProbeAddressISA(0x334);
810 if (!StandardAddressSeen[2] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0)) 814 if (!StandardAddressSeen[2] &&
815 (!BusLogic_ProbeOptions.LimitedProbeISA ||
816 BusLogic_ProbeOptions.Probe230))
811 BusLogic_AppendProbeAddressISA(0x230); 817 BusLogic_AppendProbeAddressISA(0x230);
812 if (!StandardAddressSeen[3] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0)) 818 if (!StandardAddressSeen[3] &&
819 (!BusLogic_ProbeOptions.LimitedProbeISA ||
820 BusLogic_ProbeOptions.Probe234))
813 BusLogic_AppendProbeAddressISA(0x234); 821 BusLogic_AppendProbeAddressISA(0x234);
814 if (!StandardAddressSeen[4] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0)) 822 if (!StandardAddressSeen[4] &&
823 (!BusLogic_ProbeOptions.LimitedProbeISA ||
824 BusLogic_ProbeOptions.Probe130))
815 BusLogic_AppendProbeAddressISA(0x130); 825 BusLogic_AppendProbeAddressISA(0x130);
816 if (!StandardAddressSeen[5] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0)) 826 if (!StandardAddressSeen[5] &&
827 (!BusLogic_ProbeOptions.LimitedProbeISA ||
828 BusLogic_ProbeOptions.Probe134))
817 BusLogic_AppendProbeAddressISA(0x134); 829 BusLogic_AppendProbeAddressISA(0x134);
818 } 830 }
819 /* 831 /*
@@ -2220,22 +2232,35 @@ static int __init BusLogic_init(void)
2220 HostAdapter->PCI_Device = ProbeInfo->PCI_Device; 2232 HostAdapter->PCI_Device = ProbeInfo->PCI_Device;
2221 HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel; 2233 HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel;
2222 HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType]; 2234 HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType];
2235
2236 /*
2237 Make sure region is free prior to probing.
2238 */
2239 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount,
2240 "BusLogic"))
2241 continue;
2223 /* 2242 /*
2224 Probe the Host Adapter. If unsuccessful, abort further initialization. 2243 Probe the Host Adapter. If unsuccessful, abort further initialization.
2225 */ 2244 */
2226 if (!BusLogic_ProbeHostAdapter(HostAdapter)) 2245 if (!BusLogic_ProbeHostAdapter(HostAdapter)) {
2246 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2227 continue; 2247 continue;
2248 }
2228 /* 2249 /*
2229 Hard Reset the Host Adapter. If unsuccessful, abort further 2250 Hard Reset the Host Adapter. If unsuccessful, abort further
2230 initialization. 2251 initialization.
2231 */ 2252 */
2232 if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) 2253 if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) {
2254 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2233 continue; 2255 continue;
2256 }
2234 /* 2257 /*
2235 Check the Host Adapter. If unsuccessful, abort further initialization. 2258 Check the Host Adapter. If unsuccessful, abort further initialization.
2236 */ 2259 */
2237 if (!BusLogic_CheckHostAdapter(HostAdapter)) 2260 if (!BusLogic_CheckHostAdapter(HostAdapter)) {
2261 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2238 continue; 2262 continue;
2263 }
2239 /* 2264 /*
2240 Initialize the Driver Options field if provided. 2265 Initialize the Driver Options field if provided.
2241 */ 2266 */
@@ -2247,16 +2272,6 @@ static int __init BusLogic_init(void)
2247 */ 2272 */
2248 BusLogic_AnnounceDriver(HostAdapter); 2273 BusLogic_AnnounceDriver(HostAdapter);
2249 /* 2274 /*
2250 Register usage of the I/O Address range. From this point onward, any
2251 failure will be assumed to be due to a problem with the Host Adapter,
2252 rather than due to having mistakenly identified this port as belonging
2253 to a BusLogic Host Adapter. The I/O Address range will not be
2254 released, thereby preventing it from being incorrectly identified as
2255 any other type of Host Adapter.
2256 */
2257 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, "BusLogic"))
2258 continue;
2259 /*
2260 Register the SCSI Host structure. 2275 Register the SCSI Host structure.
2261 */ 2276 */
2262 2277
@@ -2280,6 +2295,12 @@ static int __init BusLogic_init(void)
2280 Acquire the System Resources necessary to use the Host Adapter, then 2295 Acquire the System Resources necessary to use the Host Adapter, then
2281 Create the Initial CCBs, Initialize the Host Adapter, and finally 2296 Create the Initial CCBs, Initialize the Host Adapter, and finally
2282 perform Target Device Inquiry. 2297 perform Target Device Inquiry.
2298
2299 From this point onward, any failure will be assumed to be due to a
2300 problem with the Host Adapter, rather than due to having mistakenly
2301 identified this port as belonging to a BusLogic Host Adapter. The
2302 I/O Address range will not be released, thereby preventing it from
2303 being incorrectly identified as any other type of Host Adapter.
2283 */ 2304 */
2284 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && 2305 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
2285 BusLogic_ReportHostAdapterConfiguration(HostAdapter) && 2306 BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
@@ -3598,6 +3619,7 @@ static void __exit BusLogic_exit(void)
3598 3619
3599__setup("BusLogic=", BusLogic_Setup); 3620__setup("BusLogic=", BusLogic_Setup);
3600 3621
3622#ifdef MODULE
3601static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = { 3623static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
3602 { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 3624 { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
3603 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 3625 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -3607,6 +3629,7 @@ static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
3607 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 3629 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3608 { } 3630 { }
3609}; 3631};
3632#endif
3610MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl); 3633MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl);
3611 3634
3612module_init(BusLogic_init); 3635module_init(BusLogic_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index fcc4cb6c7f46..e62d23f65180 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -170,7 +170,7 @@ config CHR_DEV_SCH
170 170
171 If you want to compile this as a module ( = code which can be 171 If you want to compile this as a module ( = code which can be
172 inserted in and removed from the running kernel whenever you want), 172 inserted in and removed from the running kernel whenever you want),
173 say M here and read <file:Documentation/modules.txt> and 173 say M here and read <file:Documentation/kbuild/modules.txt> and
174 <file:Documentation/scsi.txt>. The module will be called ch.o. 174 <file:Documentation/scsi.txt>. The module will be called ch.o.
175 If unsure, say N. 175 If unsure, say N.
176 176
@@ -241,6 +241,12 @@ config SCSI_SCAN_ASYNC
241 You can override this choice by specifying "scsi_mod.scan=sync" 241 You can override this choice by specifying "scsi_mod.scan=sync"
242 or async on the kernel's command line. 242 or async on the kernel's command line.
243 243
244config SCSI_WAIT_SCAN
245 tristate
246 default m
247 depends on SCSI
248 depends on MODULES
249
244menu "SCSI Transports" 250menu "SCSI Transports"
245 depends on SCSI 251 depends on SCSI
246 252
@@ -1194,17 +1200,6 @@ config SCSI_NCR53C8XX_SYNC
1194 There is no safe option other than using good cabling, right 1200 There is no safe option other than using good cabling, right
1195 terminations and SCSI conformant devices. 1201 terminations and SCSI conformant devices.
1196 1202
1197config SCSI_NCR53C8XX_PROFILE
1198 bool "enable profiling"
1199 depends on SCSI_ZALON || SCSI_NCR_Q720
1200 help
1201 This option allows you to enable profiling information gathering.
1202 These statistics are not very accurate due to the low frequency
1203 of the kernel clock (100 Hz on i386) and have performance impact
1204 on systems that use very fast devices.
1205
1206 The normal answer therefore is N.
1207
1208config SCSI_NCR53C8XX_NO_DISCONNECT 1203config SCSI_NCR53C8XX_NO_DISCONNECT
1209 bool "not allow targets to disconnect" 1204 bool "not allow targets to disconnect"
1210 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0 1205 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
@@ -1334,11 +1329,6 @@ config SCSI_SIM710
1334 1329
1335 It currently supports Compaq EISA cards and NCR MCA cards 1330 It currently supports Compaq EISA cards and NCR MCA cards
1336 1331
1337config 53C700_IO_MAPPED
1338 bool
1339 depends on SCSI_SIM710
1340 default y
1341
1342config SCSI_SYM53C416 1332config SCSI_SYM53C416
1343 tristate "Symbios 53c416 SCSI support" 1333 tristate "Symbios 53c416 SCSI support"
1344 depends on ISA && SCSI 1334 depends on ISA && SCSI
@@ -1649,7 +1639,7 @@ config OKTAGON_SCSI
1649 1639
1650config ATARI_SCSI 1640config ATARI_SCSI
1651 tristate "Atari native SCSI support" 1641 tristate "Atari native SCSI support"
1652 depends on ATARI && SCSI && BROKEN 1642 depends on ATARI && SCSI
1653 select SCSI_SPI_ATTRS 1643 select SCSI_SPI_ATTRS
1654 ---help--- 1644 ---help---
1655 If you have an Atari with built-in NCR5380 SCSI controller (TT, 1645 If you have an Atari with built-in NCR5380 SCSI controller (TT,
@@ -1793,7 +1783,7 @@ config ZFCP
1793 1783
1794 This driver is also available as a module. This module will be 1784 This driver is also available as a module. This module will be
1795 called zfcp. If you want to compile it as a module, say M here 1785 called zfcp. If you want to compile it as a module, say M here
1796 and read <file:Documentation/modules.txt>. 1786 and read <file:Documentation/kbuild/modules.txt>.
1797 1787
1798config SCSI_SRP 1788config SCSI_SRP
1799 tristate "SCSI RDMA Protocol helper library" 1789 tristate "SCSI RDMA Protocol helper library"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 70cff4c599d7..51e884fa10b0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -146,7 +146,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
146# This goes last, so that "real" scsi devices probe earlier 146# This goes last, so that "real" scsi devices probe earlier
147obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 147obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
148 148
149obj-$(CONFIG_SCSI) += scsi_wait_scan.o 149obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
150 150
151scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 151scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
152 scsicam.o scsi_error.o scsi_lib.o \ 152 scsicam.o scsi_error.o scsi_lib.o \
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d789e61bdc49..1e82c69b36b0 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -172,6 +172,30 @@ MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size.
172int expose_physicals = -1; 172int expose_physicals = -1;
173module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 173module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
174MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); 174MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
175
176
177static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
178 struct fib *fibptr) {
179 struct scsi_device *device;
180
181 if (unlikely(!scsicmd || !scsicmd->scsi_done )) {
182 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"))
183;
184 aac_fib_complete(fibptr);
185 aac_fib_free(fibptr);
186 return 0;
187 }
188 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
189 device = scsicmd->device;
190 if (unlikely(!device || !scsi_device_online(device))) {
191 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
192 aac_fib_complete(fibptr);
193 aac_fib_free(fibptr);
194 return 0;
195 }
196 return 1;
197}
198
175/** 199/**
176 * aac_get_config_status - check the adapter configuration 200 * aac_get_config_status - check the adapter configuration
177 * @common: adapter to query 201 * @common: adapter to query
@@ -258,13 +282,10 @@ int aac_get_containers(struct aac_dev *dev)
258 u32 index; 282 u32 index;
259 int status = 0; 283 int status = 0;
260 struct fib * fibptr; 284 struct fib * fibptr;
261 unsigned instance;
262 struct aac_get_container_count *dinfo; 285 struct aac_get_container_count *dinfo;
263 struct aac_get_container_count_resp *dresp; 286 struct aac_get_container_count_resp *dresp;
264 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 287 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
265 288
266 instance = dev->scsi_host_ptr->unique_id;
267
268 if (!(fibptr = aac_fib_alloc(dev))) 289 if (!(fibptr = aac_fib_alloc(dev)))
269 return -ENOMEM; 290 return -ENOMEM;
270 291
@@ -284,88 +305,35 @@ int aac_get_containers(struct aac_dev *dev)
284 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 305 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
285 aac_fib_complete(fibptr); 306 aac_fib_complete(fibptr);
286 } 307 }
308 aac_fib_free(fibptr);
287 309
288 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 310 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
289 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 311 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
290 fsa_dev_ptr = kmalloc( 312 fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
291 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 313 GFP_KERNEL);
292 if (!fsa_dev_ptr) { 314 if (!fsa_dev_ptr)
293 aac_fib_free(fibptr);
294 return -ENOMEM; 315 return -ENOMEM;
295 }
296 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 316 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
297 317
298 dev->fsa_dev = fsa_dev_ptr; 318 dev->fsa_dev = fsa_dev_ptr;
299 dev->maximum_num_containers = maximum_num_containers; 319 dev->maximum_num_containers = maximum_num_containers;
300 320
301 for (index = 0; index < dev->maximum_num_containers; index++) { 321 for (index = 0; index < dev->maximum_num_containers; ) {
302 struct aac_query_mount *dinfo;
303 struct aac_mount *dresp;
304
305 fsa_dev_ptr[index].devname[0] = '\0'; 322 fsa_dev_ptr[index].devname[0] = '\0';
306 323
307 aac_fib_init(fibptr); 324 status = aac_probe_container(dev, index);
308 dinfo = (struct aac_query_mount *) fib_data(fibptr);
309
310 dinfo->command = cpu_to_le32(VM_NameServe);
311 dinfo->count = cpu_to_le32(index);
312 dinfo->type = cpu_to_le32(FT_FILESYS);
313 325
314 status = aac_fib_send(ContainerCommand, 326 if (status < 0) {
315 fibptr,
316 sizeof (struct aac_query_mount),
317 FsaNormal,
318 1, 1,
319 NULL, NULL);
320 if (status < 0 ) {
321 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); 327 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
322 break; 328 break;
323 } 329 }
324 dresp = (struct aac_mount *)fib_data(fibptr);
325 330
326 if ((le32_to_cpu(dresp->status) == ST_OK) &&
327 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
328 dinfo->command = cpu_to_le32(VM_NameServe64);
329 dinfo->count = cpu_to_le32(index);
330 dinfo->type = cpu_to_le32(FT_FILESYS);
331
332 if (aac_fib_send(ContainerCommand,
333 fibptr,
334 sizeof(struct aac_query_mount),
335 FsaNormal,
336 1, 1,
337 NULL, NULL) < 0)
338 continue;
339 } else
340 dresp->mnt[0].capacityhigh = 0;
341
342 dprintk ((KERN_DEBUG
343 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
344 (int)index, (int)le32_to_cpu(dresp->status),
345 (int)le32_to_cpu(dresp->mnt[0].vol),
346 (int)le32_to_cpu(dresp->mnt[0].state),
347 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
348 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
349 if ((le32_to_cpu(dresp->status) == ST_OK) &&
350 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
351 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
352 fsa_dev_ptr[index].valid = 1;
353 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
354 fsa_dev_ptr[index].size
355 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
356 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
357 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
358 fsa_dev_ptr[index].ro = 1;
359 }
360 aac_fib_complete(fibptr);
361 /* 331 /*
362 * If there are no more containers, then stop asking. 332 * If there are no more containers, then stop asking.
363 */ 333 */
364 if ((index + 1) >= le32_to_cpu(dresp->count)){ 334 if (++index >= status)
365 break; 335 break;
366 }
367 } 336 }
368 aac_fib_free(fibptr);
369 return status; 337 return status;
370} 338}
371 339
@@ -382,8 +350,9 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
382 buf = scsicmd->request_buffer; 350 buf = scsicmd->request_buffer;
383 transfer_len = min(scsicmd->request_bufflen, len + offset); 351 transfer_len = min(scsicmd->request_bufflen, len + offset);
384 } 352 }
385 353 transfer_len -= offset;
386 memcpy(buf + offset, data, transfer_len - offset); 354 if (buf && transfer_len)
355 memcpy(buf + offset, data, transfer_len);
387 356
388 if (scsicmd->use_sg) 357 if (scsicmd->use_sg)
389 kunmap_atomic(buf - sg->offset, KM_IRQ0); 358 kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -396,7 +365,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
396 struct scsi_cmnd * scsicmd; 365 struct scsi_cmnd * scsicmd;
397 366
398 scsicmd = (struct scsi_cmnd *) context; 367 scsicmd = (struct scsi_cmnd *) context;
399 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 368
369 if (!aac_valid_context(scsicmd, fibptr))
370 return;
400 371
401 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); 372 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
402 BUG_ON(fibptr == NULL); 373 BUG_ON(fibptr == NULL);
@@ -431,7 +402,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
431/** 402/**
432 * aac_get_container_name - get container name, none blocking. 403 * aac_get_container_name - get container name, none blocking.
433 */ 404 */
434static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) 405static int aac_get_container_name(struct scsi_cmnd * scsicmd)
435{ 406{
436 int status; 407 int status;
437 struct aac_get_name *dinfo; 408 struct aac_get_name *dinfo;
@@ -448,7 +419,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
448 419
449 dinfo->command = cpu_to_le32(VM_ContainerConfig); 420 dinfo->command = cpu_to_le32(VM_ContainerConfig);
450 dinfo->type = cpu_to_le32(CT_READ_NAME); 421 dinfo->type = cpu_to_le32(CT_READ_NAME);
451 dinfo->cid = cpu_to_le32(cid); 422 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
452 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 423 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
453 424
454 status = aac_fib_send(ContainerCommand, 425 status = aac_fib_send(ContainerCommand,
@@ -473,85 +444,192 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
473 return -1; 444 return -1;
474} 445}
475 446
476/** 447static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
477 * aac_probe_container - query a logical volume 448{
478 * @dev: device to query 449 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
479 * @cid: container identifier 450
480 * 451 if (fsa_dev_ptr[scmd_id(scsicmd)].valid)
481 * Queries the controller about the given volume. The volume information 452 return aac_scsi_cmd(scsicmd);
482 * is updated in the struct fsa_dev_info structure rather than returned. 453
483 */ 454 scsicmd->result = DID_NO_CONNECT << 16;
484 455 scsicmd->scsi_done(scsicmd);
485int aac_probe_container(struct aac_dev *dev, int cid) 456 return 0;
457}
458
459static int _aac_probe_container2(void * context, struct fib * fibptr)
486{ 460{
487 struct fsa_dev_info *fsa_dev_ptr; 461 struct fsa_dev_info *fsa_dev_ptr;
488 int status; 462 int (*callback)(struct scsi_cmnd *);
463 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
464
465 if (!aac_valid_context(scsicmd, fibptr))
466 return 0;
467
468 fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
469
470 scsicmd->SCp.Status = 0;
471 if (fsa_dev_ptr) {
472 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
473 fsa_dev_ptr += scmd_id(scsicmd);
474
475 if ((le32_to_cpu(dresp->status) == ST_OK) &&
476 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
477 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
478 fsa_dev_ptr->valid = 1;
479 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
480 fsa_dev_ptr->size
481 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
482 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
483 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
484 }
485 if ((fsa_dev_ptr->valid & 1) == 0)
486 fsa_dev_ptr->valid = 0;
487 scsicmd->SCp.Status = le32_to_cpu(dresp->count);
488 }
489 aac_fib_complete(fibptr);
490 aac_fib_free(fibptr);
491 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
492 scsicmd->SCp.ptr = NULL;
493 return (*callback)(scsicmd);
494}
495
496static int _aac_probe_container1(void * context, struct fib * fibptr)
497{
498 struct scsi_cmnd * scsicmd;
499 struct aac_mount * dresp;
489 struct aac_query_mount *dinfo; 500 struct aac_query_mount *dinfo;
490 struct aac_mount *dresp; 501 int status;
491 struct fib * fibptr;
492 unsigned instance;
493 502
494 fsa_dev_ptr = dev->fsa_dev; 503 dresp = (struct aac_mount *) fib_data(fibptr);
495 if (!fsa_dev_ptr) 504 dresp->mnt[0].capacityhigh = 0;
496 return -ENOMEM; 505 if ((le32_to_cpu(dresp->status) != ST_OK) ||
497 instance = dev->scsi_host_ptr->unique_id; 506 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE))
507 return _aac_probe_container2(context, fibptr);
508 scsicmd = (struct scsi_cmnd *) context;
509 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
498 510
499 if (!(fibptr = aac_fib_alloc(dev))) 511 if (!aac_valid_context(scsicmd, fibptr))
500 return -ENOMEM; 512 return 0;
501 513
502 aac_fib_init(fibptr); 514 aac_fib_init(fibptr);
503 515
504 dinfo = (struct aac_query_mount *)fib_data(fibptr); 516 dinfo = (struct aac_query_mount *)fib_data(fibptr);
505 517
506 dinfo->command = cpu_to_le32(VM_NameServe); 518 dinfo->command = cpu_to_le32(VM_NameServe64);
507 dinfo->count = cpu_to_le32(cid); 519 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
508 dinfo->type = cpu_to_le32(FT_FILESYS); 520 dinfo->type = cpu_to_le32(FT_FILESYS);
509 521
510 status = aac_fib_send(ContainerCommand, 522 status = aac_fib_send(ContainerCommand,
511 fibptr, 523 fibptr,
512 sizeof(struct aac_query_mount), 524 sizeof(struct aac_query_mount),
513 FsaNormal, 525 FsaNormal,
514 1, 1, 526 0, 1,
515 NULL, NULL); 527 (fib_callback) _aac_probe_container2,
528 (void *) scsicmd);
529 /*
530 * Check that the command queued to the controller
531 */
532 if (status == -EINPROGRESS) {
533 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
534 return 0;
535 }
516 if (status < 0) { 536 if (status < 0) {
517 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n"); 537 /* Inherit results from VM_NameServe, if any */
518 goto error; 538 dresp->status = cpu_to_le32(ST_OK);
539 return _aac_probe_container2(context, fibptr);
519 } 540 }
541 return 0;
542}
520 543
521 dresp = (struct aac_mount *) fib_data(fibptr); 544static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
545{
546 struct fib * fibptr;
547 int status = -ENOMEM;
522 548
523 if ((le32_to_cpu(dresp->status) == ST_OK) && 549 if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
524 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) { 550 struct aac_query_mount *dinfo;
525 dinfo->command = cpu_to_le32(VM_NameServe64);
526 dinfo->count = cpu_to_le32(cid);
527 dinfo->type = cpu_to_le32(FT_FILESYS);
528 551
529 if (aac_fib_send(ContainerCommand, 552 aac_fib_init(fibptr);
530 fibptr, 553
531 sizeof(struct aac_query_mount), 554 dinfo = (struct aac_query_mount *)fib_data(fibptr);
532 FsaNormal, 555
533 1, 1, 556 dinfo->command = cpu_to_le32(VM_NameServe);
534 NULL, NULL) < 0) 557 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
535 goto error; 558 dinfo->type = cpu_to_le32(FT_FILESYS);
536 } else 559 scsicmd->SCp.ptr = (char *)callback;
537 dresp->mnt[0].capacityhigh = 0;
538 560
539 if ((le32_to_cpu(dresp->status) == ST_OK) && 561 status = aac_fib_send(ContainerCommand,
540 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 562 fibptr,
541 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 563 sizeof(struct aac_query_mount),
542 fsa_dev_ptr[cid].valid = 1; 564 FsaNormal,
543 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 565 0, 1,
544 fsa_dev_ptr[cid].size 566 (fib_callback) _aac_probe_container1,
545 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + 567 (void *) scsicmd);
546 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); 568 /*
547 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 569 * Check that the command queued to the controller
548 fsa_dev_ptr[cid].ro = 1; 570 */
571 if (status == -EINPROGRESS) {
572 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
573 return 0;
574 }
575 if (status < 0) {
576 scsicmd->SCp.ptr = NULL;
577 aac_fib_complete(fibptr);
578 aac_fib_free(fibptr);
579 }
549 } 580 }
581 if (status < 0) {
582 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
583 if (fsa_dev_ptr) {
584 fsa_dev_ptr += scmd_id(scsicmd);
585 if ((fsa_dev_ptr->valid & 1) == 0) {
586 fsa_dev_ptr->valid = 0;
587 return (*callback)(scsicmd);
588 }
589 }
590 }
591 return status;
592}
550 593
551error: 594/**
552 aac_fib_complete(fibptr); 595 * aac_probe_container - query a logical volume
553 aac_fib_free(fibptr); 596 * @dev: device to query
597 * @cid: container identifier
598 *
599 * Queries the controller about the given volume. The volume information
600 * is updated in the struct fsa_dev_info structure rather than returned.
601 */
602static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
603{
604 scsicmd->device = NULL;
605 return 0;
606}
607
608int aac_probe_container(struct aac_dev *dev, int cid)
609{
610 struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
611 struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
612 int status;
554 613
614 if (!scsicmd || !scsidev) {
615 kfree(scsicmd);
616 kfree(scsidev);
617 return -ENOMEM;
618 }
619 scsicmd->list.next = NULL;
620 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1;
621
622 scsicmd->device = scsidev;
623 scsidev->sdev_state = 0;
624 scsidev->id = cid;
625 scsidev->host = dev->scsi_host_ptr;
626
627 if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
628 while (scsicmd->device == scsidev)
629 schedule();
630 kfree(scsidev);
631 status = scsicmd->SCp.Status;
632 kfree(scsicmd);
555 return status; 633 return status;
556} 634}
557 635
@@ -1115,6 +1193,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1115 printk(KERN_INFO "%s%d: serial %x\n", 1193 printk(KERN_INFO "%s%d: serial %x\n",
1116 dev->name, dev->id, 1194 dev->name, dev->id,
1117 le32_to_cpu(dev->adapter_info.serial[0])); 1195 le32_to_cpu(dev->adapter_info.serial[0]));
1196 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
1197 printk(KERN_INFO "%s%d: TSID %.*s\n",
1198 dev->name, dev->id,
1199 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
1200 dev->supplement_adapter_info.VpdInfo.Tsid);
1201 }
1118 } 1202 }
1119 1203
1120 dev->nondasd_support = 0; 1204 dev->nondasd_support = 0;
@@ -1241,7 +1325,9 @@ static void io_callback(void *context, struct fib * fibptr)
1241 u32 cid; 1325 u32 cid;
1242 1326
1243 scsicmd = (struct scsi_cmnd *) context; 1327 scsicmd = (struct scsi_cmnd *) context;
1244 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 1328
1329 if (!aac_valid_context(scsicmd, fibptr))
1330 return;
1245 1331
1246 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1332 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1247 cid = scmd_id(scsicmd); 1333 cid = scmd_id(scsicmd);
@@ -1317,7 +1403,7 @@ static void io_callback(void *context, struct fib * fibptr)
1317 scsicmd->scsi_done(scsicmd); 1403 scsicmd->scsi_done(scsicmd);
1318} 1404}
1319 1405
1320static int aac_read(struct scsi_cmnd * scsicmd, int cid) 1406static int aac_read(struct scsi_cmnd * scsicmd)
1321{ 1407{
1322 u64 lba; 1408 u64 lba;
1323 u32 count; 1409 u32 count;
@@ -1331,7 +1417,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1331 */ 1417 */
1332 switch (scsicmd->cmnd[0]) { 1418 switch (scsicmd->cmnd[0]) {
1333 case READ_6: 1419 case READ_6:
1334 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 1420 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
1335 1421
1336 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 1422 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
1337 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 1423 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
@@ -1341,7 +1427,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1341 count = 256; 1427 count = 256;
1342 break; 1428 break;
1343 case READ_16: 1429 case READ_16:
1344 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid)); 1430 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
1345 1431
1346 lba = ((u64)scsicmd->cmnd[2] << 56) | 1432 lba = ((u64)scsicmd->cmnd[2] << 56) |
1347 ((u64)scsicmd->cmnd[3] << 48) | 1433 ((u64)scsicmd->cmnd[3] << 48) |
@@ -1355,7 +1441,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1355 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1441 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1356 break; 1442 break;
1357 case READ_12: 1443 case READ_12:
1358 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid)); 1444 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
1359 1445
1360 lba = ((u64)scsicmd->cmnd[2] << 24) | 1446 lba = ((u64)scsicmd->cmnd[2] << 24) |
1361 (scsicmd->cmnd[3] << 16) | 1447 (scsicmd->cmnd[3] << 16) |
@@ -1365,7 +1451,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1365 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1451 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1366 break; 1452 break;
1367 default: 1453 default:
1368 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 1454 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
1369 1455
1370 lba = ((u64)scsicmd->cmnd[2] << 24) | 1456 lba = ((u64)scsicmd->cmnd[2] << 24) |
1371 (scsicmd->cmnd[3] << 16) | 1457 (scsicmd->cmnd[3] << 16) |
@@ -1405,7 +1491,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1405 return 0; 1491 return 0;
1406} 1492}
1407 1493
1408static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1494static int aac_write(struct scsi_cmnd * scsicmd)
1409{ 1495{
1410 u64 lba; 1496 u64 lba;
1411 u32 count; 1497 u32 count;
@@ -1424,7 +1510,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1424 if (count == 0) 1510 if (count == 0)
1425 count = 256; 1511 count = 256;
1426 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 1512 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1427 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid)); 1513 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
1428 1514
1429 lba = ((u64)scsicmd->cmnd[2] << 56) | 1515 lba = ((u64)scsicmd->cmnd[2] << 56) |
1430 ((u64)scsicmd->cmnd[3] << 48) | 1516 ((u64)scsicmd->cmnd[3] << 48) |
@@ -1436,14 +1522,14 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1436 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 1522 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1437 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1523 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1438 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ 1524 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1439 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid)); 1525 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
1440 1526
1441 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) 1527 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1442 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1528 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1443 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) 1529 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1444 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1530 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1445 } else { 1531 } else {
1446 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1532 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
1447 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1533 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1448 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1534 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1449 } 1535 }
@@ -1488,7 +1574,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1488 struct scsi_cmnd *cmd; 1574 struct scsi_cmnd *cmd;
1489 1575
1490 cmd = context; 1576 cmd = context;
1491 cmd->SCp.phase = AAC_OWNER_MIDLEVEL; 1577
1578 if (!aac_valid_context(cmd, fibptr))
1579 return;
1492 1580
1493 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 1581 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
1494 smp_processor_id(), jiffies)); 1582 smp_processor_id(), jiffies));
@@ -1523,7 +1611,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1523 cmd->scsi_done(cmd); 1611 cmd->scsi_done(cmd);
1524} 1612}
1525 1613
1526static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) 1614static int aac_synchronize(struct scsi_cmnd *scsicmd)
1527{ 1615{
1528 int status; 1616 int status;
1529 struct fib *cmd_fibcontext; 1617 struct fib *cmd_fibcontext;
@@ -1568,7 +1656,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1568 synchronizecmd = fib_data(cmd_fibcontext); 1656 synchronizecmd = fib_data(cmd_fibcontext);
1569 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1657 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
1570 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); 1658 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
1571 synchronizecmd->cid = cpu_to_le32(cid); 1659 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
1572 synchronizecmd->count = 1660 synchronizecmd->count =
1573 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 1661 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
1574 1662
@@ -1646,29 +1734,12 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1646 case TEST_UNIT_READY: 1734 case TEST_UNIT_READY:
1647 if (dev->in_reset) 1735 if (dev->in_reset)
1648 return -1; 1736 return -1;
1649 spin_unlock_irq(host->host_lock); 1737 return _aac_probe_container(scsicmd,
1650 aac_probe_container(dev, cid); 1738 aac_probe_container_callback2);
1651 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1652 fsa_dev_ptr[cid].valid = 0;
1653 spin_lock_irq(host->host_lock);
1654 if (fsa_dev_ptr[cid].valid == 0) {
1655 scsicmd->result = DID_NO_CONNECT << 16;
1656 scsicmd->scsi_done(scsicmd);
1657 return 0;
1658 }
1659 default: 1739 default:
1660 break; 1740 break;
1661 } 1741 }
1662 } 1742 }
1663 /*
1664 * If the target container still doesn't exist,
1665 * return failure
1666 */
1667 if (fsa_dev_ptr[cid].valid == 0) {
1668 scsicmd->result = DID_BAD_TARGET << 16;
1669 scsicmd->scsi_done(scsicmd);
1670 return 0;
1671 }
1672 } else { /* check for physical non-dasd devices */ 1743 } else { /* check for physical non-dasd devices */
1673 if ((dev->nondasd_support == 1) || expose_physicals) { 1744 if ((dev->nondasd_support == 1) || expose_physicals) {
1674 if (dev->in_reset) 1745 if (dev->in_reset)
@@ -1733,7 +1804,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1733 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1804 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1734 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1805 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1735 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1806 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1736 return aac_get_container_name(scsicmd, cid); 1807 return aac_get_container_name(scsicmd);
1737 } 1808 }
1738 case SERVICE_ACTION_IN: 1809 case SERVICE_ACTION_IN:
1739 if (!(dev->raw_io_interface) || 1810 if (!(dev->raw_io_interface) ||
@@ -1899,7 +1970,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1899 min(sizeof(fsa_dev_ptr[cid].devname), 1970 min(sizeof(fsa_dev_ptr[cid].devname),
1900 sizeof(scsicmd->request->rq_disk->disk_name) + 1)); 1971 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1901 1972
1902 return aac_read(scsicmd, cid); 1973 return aac_read(scsicmd);
1903 1974
1904 case WRITE_6: 1975 case WRITE_6:
1905 case WRITE_10: 1976 case WRITE_10:
@@ -1907,11 +1978,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1907 case WRITE_16: 1978 case WRITE_16:
1908 if (dev->in_reset) 1979 if (dev->in_reset)
1909 return -1; 1980 return -1;
1910 return aac_write(scsicmd, cid); 1981 return aac_write(scsicmd);
1911 1982
1912 case SYNCHRONIZE_CACHE: 1983 case SYNCHRONIZE_CACHE:
1913 /* Issue FIB to tell Firmware to flush it's cache */ 1984 /* Issue FIB to tell Firmware to flush it's cache */
1914 return aac_synchronize(scsicmd, cid); 1985 return aac_synchronize(scsicmd);
1915 1986
1916 default: 1987 default:
1917 /* 1988 /*
@@ -2058,7 +2129,10 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2058 struct scsi_cmnd *scsicmd; 2129 struct scsi_cmnd *scsicmd;
2059 2130
2060 scsicmd = (struct scsi_cmnd *) context; 2131 scsicmd = (struct scsi_cmnd *) context;
2061 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 2132
2133 if (!aac_valid_context(scsicmd, fibptr))
2134 return;
2135
2062 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2136 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2063 2137
2064 BUG_ON(fibptr == NULL); 2138 BUG_ON(fibptr == NULL);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39ecd0d22eb0..45ca3e801619 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,8 +12,8 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2423 15# define AAC_DRIVER_BUILD 2437
16# define AAC_DRIVER_BRANCH "-mh3" 16# define AAC_DRIVER_BRANCH "-mh4"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
19 19
@@ -48,49 +48,13 @@ struct diskparm
48 48
49 49
50/* 50/*
51 * DON'T CHANGE THE ORDER, this is set by the firmware 51 * Firmware constants
52 */ 52 */
53 53
54#define CT_NONE 0 54#define CT_NONE 0
55#define CT_VOLUME 1
56#define CT_MIRROR 2
57#define CT_STRIPE 3
58#define CT_RAID5 4
59#define CT_SSRW 5
60#define CT_SSRO 6
61#define CT_MORPH 7
62#define CT_PASSTHRU 8
63#define CT_RAID4 9
64#define CT_RAID10 10 /* stripe of mirror */
65#define CT_RAID00 11 /* stripe of stripe */
66#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
67#define CT_PSEUDO_RAID 13 /* really raid4 */
68#define CT_LAST_VOLUME_TYPE 14
69#define CT_OK 218 55#define CT_OK 218
70
71/*
72 * Types of objects addressable in some fashion by the client.
73 * This is a superset of those objects handled just by the filesystem
74 * and includes "raw" objects that an administrator would use to
75 * configure containers and filesystems.
76 */
77
78#define FT_REG 1 /* regular file */
79#define FT_DIR 2 /* directory */
80#define FT_BLK 3 /* "block" device - reserved */
81#define FT_CHR 4 /* "character special" device - reserved */
82#define FT_LNK 5 /* symbolic link */
83#define FT_SOCK 6 /* socket */
84#define FT_FIFO 7 /* fifo */
85#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */ 56#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
86#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */ 57#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
87#define FT_SLICE 10 /* virtual disk - raw volume - slice */
88#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
89#define FT_VOLUME 12 /* Container - Volume Set */
90#define FT_STRIPE 13 /* Container - Stripe Set */
91#define FT_MIRROR 14 /* Container - Mirror Set */
92#define FT_RAID5 15 /* Container - Raid 5 Set */
93#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
94 58
95/* 59/*
96 * Host side memory scatter gather list 60 * Host side memory scatter gather list
@@ -497,6 +461,7 @@ struct adapter_ops
497 void (*adapter_enable_int)(struct aac_dev *dev); 461 void (*adapter_enable_int)(struct aac_dev *dev);
498 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 462 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
499 int (*adapter_check_health)(struct aac_dev *dev); 463 int (*adapter_check_health)(struct aac_dev *dev);
464 int (*adapter_restart)(struct aac_dev *dev, int bled);
500 /* Transport operations */ 465 /* Transport operations */
501 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
502 irqreturn_t (*adapter_intr)(int irq, void *dev_id); 467 irqreturn_t (*adapter_intr)(int irq, void *dev_id);
@@ -833,7 +798,7 @@ struct fib {
833 */ 798 */
834 struct list_head fiblink; 799 struct list_head fiblink;
835 void *data; 800 void *data;
836 struct hw_fib *hw_fib; /* Actual shared object */ 801 struct hw_fib *hw_fib_va; /* Actual shared object */
837 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ 802 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
838}; 803};
839 804
@@ -878,10 +843,25 @@ struct aac_supplement_adapter_info
878 __le32 Version; 843 __le32 Version;
879 __le32 FeatureBits; 844 __le32 FeatureBits;
880 u8 SlotNumber; 845 u8 SlotNumber;
881 u8 ReservedPad0[0]; 846 u8 ReservedPad0[3];
882 u8 BuildDate[12]; 847 u8 BuildDate[12];
883 __le32 CurrentNumberPorts; 848 __le32 CurrentNumberPorts;
884 __le32 ReservedGrowth[24]; 849 struct {
850 u8 AssemblyPn[8];
851 u8 FruPn[8];
852 u8 BatteryFruPn[8];
853 u8 EcVersionString[8];
854 u8 Tsid[12];
855 } VpdInfo;
856 __le32 FlashFirmwareRevision;
857 __le32 FlashFirmwareBuild;
858 __le32 RaidTypeMorphOptions;
859 __le32 FlashFirmwareBootRevision;
860 __le32 FlashFirmwareBootBuild;
861 u8 MfgPcbaSerialNo[12];
862 u8 MfgWWNName[8];
863 __le32 MoreFeatureBits;
864 __le32 ReservedGrowth[1];
885}; 865};
886#define AAC_FEATURE_FALCON 0x00000010 866#define AAC_FEATURE_FALCON 0x00000010
887#define AAC_SIS_VERSION_V3 3 867#define AAC_SIS_VERSION_V3 3
@@ -970,7 +950,6 @@ struct aac_dev
970 struct fib *fibs; 950 struct fib *fibs;
971 951
972 struct fib *free_fib; 952 struct fib *free_fib;
973 struct fib *timeout_fib;
974 spinlock_t fib_lock; 953 spinlock_t fib_lock;
975 954
976 struct aac_queue_block *queues; 955 struct aac_queue_block *queues;
@@ -1060,6 +1039,9 @@ struct aac_dev
1060#define aac_adapter_check_health(dev) \ 1039#define aac_adapter_check_health(dev) \
1061 (dev)->a_ops.adapter_check_health(dev) 1040 (dev)->a_ops.adapter_check_health(dev)
1062 1041
1042#define aac_adapter_restart(dev,bled) \
1043 (dev)->a_ops.adapter_restart(dev,bled)
1044
1063#define aac_adapter_ioremap(dev, size) \ 1045#define aac_adapter_ioremap(dev, size) \
1064 (dev)->a_ops.adapter_ioremap(dev, size) 1046 (dev)->a_ops.adapter_ioremap(dev, size)
1065 1047
@@ -1516,8 +1498,7 @@ struct aac_mntent {
1516 struct creation_info create_info; /* if applicable */ 1498 struct creation_info create_info; /* if applicable */
1517 __le32 capacity; 1499 __le32 capacity;
1518 __le32 vol; /* substrate structure */ 1500 __le32 vol; /* substrate structure */
1519 __le32 obj; /* FT_FILESYS, 1501 __le32 obj; /* FT_FILESYS, etc. */
1520 FT_DATABASE, etc. */
1521 __le32 state; /* unready for mounting, 1502 __le32 state; /* unready for mounting,
1522 readonly, etc. */ 1503 readonly, etc. */
1523 union aac_contentinfo fileinfo; /* Info specific to content 1504 union aac_contentinfo fileinfo; /* Info specific to content
@@ -1817,7 +1798,7 @@ int aac_fib_send(u16 command, struct fib * context, unsigned long size, int prio
1817int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1798int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1818void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1799void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1819int aac_fib_complete(struct fib * context); 1800int aac_fib_complete(struct fib * context);
1820#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1801#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
1821struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1802struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1822int aac_get_config_status(struct aac_dev *dev, int commit_flag); 1803int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1823int aac_get_containers(struct aac_dev *dev); 1804int aac_get_containers(struct aac_dev *dev);
@@ -1840,8 +1821,11 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype);
1840int aac_get_adapter_info(struct aac_dev* dev); 1821int aac_get_adapter_info(struct aac_dev* dev);
1841int aac_send_shutdown(struct aac_dev *dev); 1822int aac_send_shutdown(struct aac_dev *dev);
1842int aac_probe_container(struct aac_dev *dev, int cid); 1823int aac_probe_container(struct aac_dev *dev, int cid);
1824int _aac_rx_init(struct aac_dev *dev);
1825int aac_rx_select_comm(struct aac_dev *dev, int comm);
1843extern int numacb; 1826extern int numacb;
1844extern int acbsize; 1827extern int acbsize;
1845extern char aac_driver_version[]; 1828extern char aac_driver_version[];
1846extern int startup_timeout; 1829extern int startup_timeout;
1847extern int aif_timeout; 1830extern int aif_timeout;
1831extern int expose_physicals;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e21070f4eac1..72b0393b4596 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -64,12 +64,15 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
64 unsigned size; 64 unsigned size;
65 int retval; 65 int retval;
66 66
67 if (dev->in_reset) {
68 return -EBUSY;
69 }
67 fibptr = aac_fib_alloc(dev); 70 fibptr = aac_fib_alloc(dev);
68 if(fibptr == NULL) { 71 if(fibptr == NULL) {
69 return -ENOMEM; 72 return -ENOMEM;
70 } 73 }
71 74
72 kfib = fibptr->hw_fib; 75 kfib = fibptr->hw_fib_va;
73 /* 76 /*
74 * First copy in the header so that we can check the size field. 77 * First copy in the header so that we can check the size field.
75 */ 78 */
@@ -91,9 +94,9 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
91 goto cleanup; 94 goto cleanup;
92 } 95 }
93 /* Highjack the hw_fib */ 96 /* Highjack the hw_fib */
94 hw_fib = fibptr->hw_fib; 97 hw_fib = fibptr->hw_fib_va;
95 hw_fib_pa = fibptr->hw_fib_pa; 98 hw_fib_pa = fibptr->hw_fib_pa;
96 fibptr->hw_fib = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa); 99 fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
97 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 100 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
98 memcpy(kfib, hw_fib, dev->max_fib_size); 101 memcpy(kfib, hw_fib, dev->max_fib_size);
99 } 102 }
@@ -137,7 +140,7 @@ cleanup:
137 if (hw_fib) { 140 if (hw_fib) {
138 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); 141 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
139 fibptr->hw_fib_pa = hw_fib_pa; 142 fibptr->hw_fib_pa = hw_fib_pa;
140 fibptr->hw_fib = hw_fib; 143 fibptr->hw_fib_va = hw_fib;
141 } 144 }
142 if (retval != -EINTR) 145 if (retval != -EINTR)
143 aac_fib_free(fibptr); 146 aac_fib_free(fibptr);
@@ -282,15 +285,15 @@ return_fib:
282 fib = list_entry(entry, struct fib, fiblink); 285 fib = list_entry(entry, struct fib, fiblink);
283 fibctx->count--; 286 fibctx->count--;
284 spin_unlock_irqrestore(&dev->fib_lock, flags); 287 spin_unlock_irqrestore(&dev->fib_lock, flags);
285 if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) { 288 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
286 kfree(fib->hw_fib); 289 kfree(fib->hw_fib_va);
287 kfree(fib); 290 kfree(fib);
288 return -EFAULT; 291 return -EFAULT;
289 } 292 }
290 /* 293 /*
291 * Free the space occupied by this copy of the fib. 294 * Free the space occupied by this copy of the fib.
292 */ 295 */
293 kfree(fib->hw_fib); 296 kfree(fib->hw_fib_va);
294 kfree(fib); 297 kfree(fib);
295 status = 0; 298 status = 0;
296 } else { 299 } else {
@@ -340,7 +343,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
340 /* 343 /*
341 * Free the space occupied by this copy of the fib. 344 * Free the space occupied by this copy of the fib.
342 */ 345 */
343 kfree(fib->hw_fib); 346 kfree(fib->hw_fib_va);
344 kfree(fib); 347 kfree(fib);
345 } 348 }
346 /* 349 /*
@@ -388,10 +391,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
388 /* 391 /*
389 * Extract the fibctx from the input parameters 392 * Extract the fibctx from the input parameters
390 */ 393 */
391 if (fibctx->unique == (u32)(unsigned long)arg) { 394 if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */
392 /* We found a winner */
393 break; 395 break;
394 }
395 entry = entry->next; 396 entry = entry->next;
396 fibctx = NULL; 397 fibctx = NULL;
397 } 398 }
@@ -465,16 +466,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
465 void *sg_list[32]; 466 void *sg_list[32];
466 u32 sg_indx = 0; 467 u32 sg_indx = 0;
467 u32 byte_count = 0; 468 u32 byte_count = 0;
468 u32 actual_fibsize = 0; 469 u32 actual_fibsize64, actual_fibsize = 0;
469 int i; 470 int i;
470 471
471 472
473 if (dev->in_reset) {
474 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
475 return -EBUSY;
476 }
472 if (!capable(CAP_SYS_ADMIN)){ 477 if (!capable(CAP_SYS_ADMIN)){
473 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 478 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
474 return -EPERM; 479 return -EPERM;
475 } 480 }
476 /* 481 /*
477 * Allocate and initialize a Fib then setup a BlockWrite command 482 * Allocate and initialize a Fib then setup a SRB command
478 */ 483 */
479 if (!(srbfib = aac_fib_alloc(dev))) { 484 if (!(srbfib = aac_fib_alloc(dev))) {
480 return -ENOMEM; 485 return -ENOMEM;
@@ -541,129 +546,183 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
541 rcode = -EINVAL; 546 rcode = -EINVAL;
542 goto cleanup; 547 goto cleanup;
543 } 548 }
544 if (dev->dac_support == 1) { 549 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
550 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
551 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
552 (sizeof(struct sgentry64) - sizeof(struct sgentry));
553 /* User made a mistake - should not continue */
554 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
555 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
556 "Raw SRB command calculated fibsize=%lu;%lu "
557 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
558 "issued fibsize=%d\n",
559 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
560 sizeof(struct aac_srb), sizeof(struct sgentry),
561 sizeof(struct sgentry64), fibsize));
562 rcode = -EINVAL;
563 goto cleanup;
564 }
565 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
566 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
567 rcode = -EINVAL;
568 goto cleanup;
569 }
570 byte_count = 0;
571 if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
545 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 572 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
546 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 573 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
547 struct user_sgmap* usg;
548 byte_count = 0;
549 574
550 /* 575 /*
551 * This should also catch if user used the 32 bit sgmap 576 * This should also catch if user used the 32 bit sgmap
552 */ 577 */
553 actual_fibsize = sizeof(struct aac_srb) - 578 if (actual_fibsize64 == fibsize) {
554 sizeof(struct sgentry) + 579 actual_fibsize = actual_fibsize64;
555 ((upsg->count & 0xff) * 580 for (i = 0; i < upsg->count; i++) {
556 sizeof(struct sgentry)); 581 u64 addr;
557 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 582 void* p;
558 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 583 /* Does this really need to be GFP_DMA? */
559 rcode = -EINVAL; 584 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
560 goto cleanup; 585 if(p == 0) {
561 } 586 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
562 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) 587 upsg->sg[i].count,i,upsg->count));
563 + sizeof(struct sgmap), GFP_KERNEL); 588 rcode = -ENOMEM;
564 if (!usg) { 589 goto cleanup;
565 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); 590 }
566 rcode = -ENOMEM; 591 addr = (u64)upsg->sg[i].addr[0];
567 goto cleanup; 592 addr += ((u64)upsg->sg[i].addr[1]) << 32;
568 } 593 sg_user[i] = (void __user *)(ptrdiff_t)addr;
569 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) 594 sg_list[i] = p; // save so we can clean up later
570 + sizeof(struct sgmap)); 595 sg_indx = i;
571 actual_fibsize = sizeof(struct aac_srb) - 596
572 sizeof(struct sgentry) + ((usg->count & 0xff) * 597 if( flags & SRB_DataOut ){
573 sizeof(struct sgentry64)); 598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
574 if ((data_dir == DMA_NONE) && upsg->count) { 599 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
575 kfree (usg); 600 rcode = -EFAULT;
576 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 601 goto cleanup;
577 rcode = -EINVAL; 602 }
578 goto cleanup; 603 }
579 } 604 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
580 605
581 for (i = 0; i < usg->count; i++) { 606 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
582 u64 addr; 607 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
583 void* p; 608 byte_count += upsg->sg[i].count;
584 /* Does this really need to be GFP_DMA? */ 609 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
585 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 610 }
586 if(p == 0) { 611 } else {
587 kfree (usg); 612 struct user_sgmap* usg;
588 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 613 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
589 usg->sg[i].count,i,usg->count)); 614 + sizeof(struct sgmap), GFP_KERNEL);
615 if (!usg) {
616 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
590 rcode = -ENOMEM; 617 rcode = -ENOMEM;
591 goto cleanup; 618 goto cleanup;
592 } 619 }
593 sg_user[i] = (void __user *)(long)usg->sg[i].addr; 620 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
594 sg_list[i] = p; // save so we can clean up later 621 + sizeof(struct sgmap));
595 sg_indx = i; 622 actual_fibsize = actual_fibsize64;
596 623
597 if( flags & SRB_DataOut ){ 624 for (i = 0; i < usg->count; i++) {
598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 625 u64 addr;
626 void* p;
627 /* Does this really need to be GFP_DMA? */
628 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
629 if(p == 0) {
599 kfree (usg); 630 kfree (usg);
600 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 631 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
601 rcode = -EFAULT; 632 usg->sg[i].count,i,usg->count));
633 rcode = -ENOMEM;
602 goto cleanup; 634 goto cleanup;
603 } 635 }
604 } 636 sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr;
605 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 637 sg_list[i] = p; // save so we can clean up later
638 sg_indx = i;
639
640 if( flags & SRB_DataOut ){
641 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
642 kfree (usg);
643 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
644 rcode = -EFAULT;
645 goto cleanup;
646 }
647 }
648 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
606 649
607 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 650 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
608 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 651 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
609 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 652 byte_count += usg->sg[i].count;
610 byte_count += usg->sg[i].count; 653 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
654 }
655 kfree (usg);
611 } 656 }
612 kfree (usg);
613
614 srbcmd->count = cpu_to_le32(byte_count); 657 srbcmd->count = cpu_to_le32(byte_count);
615 psg->count = cpu_to_le32(sg_indx+1); 658 psg->count = cpu_to_le32(sg_indx+1);
616 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 659 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
617 } else { 660 } else {
618 struct user_sgmap* upsg = &user_srbcmd->sg; 661 struct user_sgmap* upsg = &user_srbcmd->sg;
619 struct sgmap* psg = &srbcmd->sg; 662 struct sgmap* psg = &srbcmd->sg;
620 byte_count = 0; 663
621 664 if (actual_fibsize64 == fibsize) {
622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 665 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 666 for (i = 0; i < upsg->count; i++) {
624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in " 667 u64 addr;
625 "Raw SRB command calculated fibsize=%d " 668 void* p;
626 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d " 669 /* Does this really need to be GFP_DMA? */
627 "issued fibsize=%d\n", 670 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
628 actual_fibsize, user_srbcmd->sg.count, 671 if(p == 0) {
629 sizeof(struct aac_srb), sizeof(struct sgentry), 672 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
630 fibsize)); 673 usg->sg[i].count,i,usg->count));
631 rcode = -EINVAL; 674 rcode = -ENOMEM;
632 goto cleanup;
633 }
634 if ((data_dir == DMA_NONE) && upsg->count) {
635 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
636 rcode = -EINVAL;
637 goto cleanup;
638 }
639 for (i = 0; i < upsg->count; i++) {
640 dma_addr_t addr;
641 void* p;
642 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
643 if(p == 0) {
644 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
645 upsg->sg[i].count, i, upsg->count));
646 rcode = -ENOMEM;
647 goto cleanup;
648 }
649 sg_user[i] = (void __user *)(long)upsg->sg[i].addr;
650 sg_list[i] = p; // save so we can clean up later
651 sg_indx = i;
652
653 if( flags & SRB_DataOut ){
654 if(copy_from_user(p, sg_user[i],
655 upsg->sg[i].count)) {
656 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
657 rcode = -EFAULT;
658 goto cleanup; 675 goto cleanup;
659 } 676 }
677 addr = (u64)usg->sg[i].addr[0];
678 addr += ((u64)usg->sg[i].addr[1]) << 32;
679 sg_user[i] = (void __user *)(ptrdiff_t)addr;
680 sg_list[i] = p; // save so we can clean up later
681 sg_indx = i;
682
683 if( flags & SRB_DataOut ){
684 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
685 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
686 rcode = -EFAULT;
687 goto cleanup;
688 }
689 }
690 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
691
692 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
693 byte_count += usg->sg[i].count;
694 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
660 } 695 }
661 addr = pci_map_single(dev->pdev, p, 696 } else {
662 upsg->sg[i].count, data_dir); 697 for (i = 0; i < upsg->count; i++) {
698 dma_addr_t addr;
699 void* p;
700 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
701 if(p == 0) {
702 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
703 upsg->sg[i].count, i, upsg->count));
704 rcode = -ENOMEM;
705 goto cleanup;
706 }
707 sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr;
708 sg_list[i] = p; // save so we can clean up later
709 sg_indx = i;
710
711 if( flags & SRB_DataOut ){
712 if(copy_from_user(p, sg_user[i],
713 upsg->sg[i].count)) {
714 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
715 rcode = -EFAULT;
716 goto cleanup;
717 }
718 }
719 addr = pci_map_single(dev->pdev, p,
720 upsg->sg[i].count, data_dir);
663 721
664 psg->sg[i].addr = cpu_to_le32(addr); 722 psg->sg[i].addr = cpu_to_le32(addr);
665 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 723 byte_count += upsg->sg[i].count;
666 byte_count += upsg->sg[i].count; 724 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
725 }
667 } 726 }
668 srbcmd->count = cpu_to_le32(byte_count); 727 srbcmd->count = cpu_to_le32(byte_count);
669 psg->count = cpu_to_le32(sg_indx+1); 728 psg->count = cpu_to_le32(sg_indx+1);
@@ -682,7 +741,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
682 741
683 if( flags & SRB_DataIn ) { 742 if( flags & SRB_DataIn ) {
684 for(i = 0 ; i <= sg_indx; i++){ 743 for(i = 0 ; i <= sg_indx; i++){
685 byte_count = le32_to_cpu((dev->dac_support == 1) 744 byte_count = le32_to_cpu(
745 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
686 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count 746 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
687 : srbcmd->sg.sg[i].count); 747 : srbcmd->sg.sg[i].count);
688 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ 748 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index ae34768987a4..33682ce96a5d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
110 /* 110 /*
111 * Align the beginning of Headers to commalign 111 * Align the beginning of Headers to commalign
112 */ 112 */
113 align = (commalign - ((unsigned long)(base) & (commalign - 1))); 113 align = (commalign - ((ptrdiff_t)(base) & (commalign - 1)));
114 base = base + align; 114 base = base + align;
115 phys = phys + align; 115 phys = phys + align;
116 /* 116 /*
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1b97f60652ba..5824a757a753 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -94,7 +94,7 @@ void aac_fib_map_free(struct aac_dev *dev)
94int aac_fib_setup(struct aac_dev * dev) 94int aac_fib_setup(struct aac_dev * dev)
95{ 95{
96 struct fib *fibptr; 96 struct fib *fibptr;
97 struct hw_fib *hw_fib_va; 97 struct hw_fib *hw_fib;
98 dma_addr_t hw_fib_pa; 98 dma_addr_t hw_fib_pa;
99 int i; 99 int i;
100 100
@@ -106,24 +106,24 @@ int aac_fib_setup(struct aac_dev * dev)
106 if (i<0) 106 if (i<0)
107 return -ENOMEM; 107 return -ENOMEM;
108 108
109 hw_fib_va = dev->hw_fib_va; 109 hw_fib = dev->hw_fib_va;
110 hw_fib_pa = dev->hw_fib_pa; 110 hw_fib_pa = dev->hw_fib_pa;
111 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 111 memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112 /* 112 /*
113 * Initialise the fibs 113 * Initialise the fibs
114 */ 114 */
115 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 115 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116 { 116 {
117 fibptr->dev = dev; 117 fibptr->dev = dev;
118 fibptr->hw_fib = hw_fib_va; 118 fibptr->hw_fib_va = hw_fib;
119 fibptr->data = (void *) fibptr->hw_fib->data; 119 fibptr->data = (void *) fibptr->hw_fib_va->data;
120 fibptr->next = fibptr+1; /* Forward chain the fibs */ 120 fibptr->next = fibptr+1; /* Forward chain the fibs */
121 init_MUTEX_LOCKED(&fibptr->event_wait); 121 init_MUTEX_LOCKED(&fibptr->event_wait);
122 spin_lock_init(&fibptr->event_lock); 122 spin_lock_init(&fibptr->event_lock);
123 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 123 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
124 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 124 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125 fibptr->hw_fib_pa = hw_fib_pa; 125 fibptr->hw_fib_pa = hw_fib_pa;
126 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 126 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
127 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 127 hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128 } 128 }
129 /* 129 /*
@@ -166,7 +166,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
166 * Null out fields that depend on being zero at the start of 166 * Null out fields that depend on being zero at the start of
167 * each I/O 167 * each I/O
168 */ 168 */
169 fibptr->hw_fib->header.XferState = 0; 169 fibptr->hw_fib_va->header.XferState = 0;
170 fibptr->callback = NULL; 170 fibptr->callback = NULL;
171 fibptr->callback_data = NULL; 171 fibptr->callback_data = NULL;
172 172
@@ -178,7 +178,6 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
178 * @fibptr: fib to free up 178 * @fibptr: fib to free up
179 * 179 *
180 * Frees up a fib and places it on the appropriate queue 180 * Frees up a fib and places it on the appropriate queue
181 * (either free or timed out)
182 */ 181 */
183 182
184void aac_fib_free(struct fib *fibptr) 183void aac_fib_free(struct fib *fibptr)
@@ -186,19 +185,15 @@ void aac_fib_free(struct fib *fibptr)
186 unsigned long flags; 185 unsigned long flags;
187 186
188 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 187 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
189 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 188 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
190 aac_config.fib_timeouts++; 189 aac_config.fib_timeouts++;
191 fibptr->next = fibptr->dev->timeout_fib; 190 if (fibptr->hw_fib_va->header.XferState != 0) {
192 fibptr->dev->timeout_fib = fibptr; 191 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
193 } else { 192 (void*)fibptr,
194 if (fibptr->hw_fib->header.XferState != 0) { 193 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
195 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 194 }
196 (void*)fibptr, 195 fibptr->next = fibptr->dev->free_fib;
197 le32_to_cpu(fibptr->hw_fib->header.XferState)); 196 fibptr->dev->free_fib = fibptr;
198 }
199 fibptr->next = fibptr->dev->free_fib;
200 fibptr->dev->free_fib = fibptr;
201 }
202 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 197 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
203} 198}
204 199
@@ -211,7 +206,7 @@ void aac_fib_free(struct fib *fibptr)
211 206
212void aac_fib_init(struct fib *fibptr) 207void aac_fib_init(struct fib *fibptr)
213{ 208{
214 struct hw_fib *hw_fib = fibptr->hw_fib; 209 struct hw_fib *hw_fib = fibptr->hw_fib_va;
215 210
216 hw_fib->header.StructType = FIB_MAGIC; 211 hw_fib->header.StructType = FIB_MAGIC;
217 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 212 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
@@ -231,7 +226,7 @@ void aac_fib_init(struct fib *fibptr)
231 226
232static void fib_dealloc(struct fib * fibptr) 227static void fib_dealloc(struct fib * fibptr)
233{ 228{
234 struct hw_fib *hw_fib = fibptr->hw_fib; 229 struct hw_fib *hw_fib = fibptr->hw_fib_va;
235 BUG_ON(hw_fib->header.StructType != FIB_MAGIC); 230 BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
236 hw_fib->header.XferState = 0; 231 hw_fib->header.XferState = 0;
237} 232}
@@ -386,7 +381,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
386 void *callback_data) 381 void *callback_data)
387{ 382{
388 struct aac_dev * dev = fibptr->dev; 383 struct aac_dev * dev = fibptr->dev;
389 struct hw_fib * hw_fib = fibptr->hw_fib; 384 struct hw_fib * hw_fib = fibptr->hw_fib_va;
390 unsigned long flags = 0; 385 unsigned long flags = 0;
391 unsigned long qflags; 386 unsigned long qflags;
392 387
@@ -430,7 +425,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
430 */ 425 */
431 hw_fib->header.Command = cpu_to_le16(command); 426 hw_fib->header.Command = cpu_to_le16(command);
432 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 427 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
433 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 428 fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
434 /* 429 /*
435 * Set the size of the Fib we want to send to the adapter 430 * Set the size of the Fib we want to send to the adapter
436 */ 431 */
@@ -462,7 +457,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
462 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 457 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
463 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 458 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
464 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 459 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
465 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 460 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
466 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 461 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
467 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 462 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
468 463
@@ -513,22 +508,20 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
513 } 508 }
514 udelay(5); 509 udelay(5);
515 } 510 }
516 } else if (down_interruptible(&fibptr->event_wait)) { 511 } else
517 spin_lock_irqsave(&fibptr->event_lock, flags); 512 (void)down_interruptible(&fibptr->event_wait);
518 if (fibptr->done == 0) { 513 spin_lock_irqsave(&fibptr->event_lock, flags);
519 fibptr->done = 2; /* Tell interrupt we aborted */ 514 if (fibptr->done == 0) {
520 spin_unlock_irqrestore(&fibptr->event_lock, flags); 515 fibptr->done = 2; /* Tell interrupt we aborted */
521 return -EINTR;
522 }
523 spin_unlock_irqrestore(&fibptr->event_lock, flags); 516 spin_unlock_irqrestore(&fibptr->event_lock, flags);
517 return -EINTR;
524 } 518 }
519 spin_unlock_irqrestore(&fibptr->event_lock, flags);
525 BUG_ON(fibptr->done == 0); 520 BUG_ON(fibptr->done == 0);
526 521
527 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 522 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
528 return -ETIMEDOUT; 523 return -ETIMEDOUT;
529 } else { 524 return 0;
530 return 0;
531 }
532 } 525 }
533 /* 526 /*
534 * If the user does not want a response than return success otherwise 527 * If the user does not want a response than return success otherwise
@@ -624,7 +617,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
624 617
625int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 618int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
626{ 619{
627 struct hw_fib * hw_fib = fibptr->hw_fib; 620 struct hw_fib * hw_fib = fibptr->hw_fib_va;
628 struct aac_dev * dev = fibptr->dev; 621 struct aac_dev * dev = fibptr->dev;
629 struct aac_queue * q; 622 struct aac_queue * q;
630 unsigned long nointr = 0; 623 unsigned long nointr = 0;
@@ -688,7 +681,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
688 681
689int aac_fib_complete(struct fib *fibptr) 682int aac_fib_complete(struct fib *fibptr)
690{ 683{
691 struct hw_fib * hw_fib = fibptr->hw_fib; 684 struct hw_fib * hw_fib = fibptr->hw_fib_va;
692 685
693 /* 686 /*
694 * Check for a fib which has already been completed 687 * Check for a fib which has already been completed
@@ -774,9 +767,8 @@ void aac_printf(struct aac_dev *dev, u32 val)
774#define AIF_SNIFF_TIMEOUT (30*HZ) 767#define AIF_SNIFF_TIMEOUT (30*HZ)
775static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 768static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
776{ 769{
777 struct hw_fib * hw_fib = fibptr->hw_fib; 770 struct hw_fib * hw_fib = fibptr->hw_fib_va;
778 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 771 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
779 int busy;
780 u32 container; 772 u32 container;
781 struct scsi_device *device; 773 struct scsi_device *device;
782 enum { 774 enum {
@@ -988,9 +980,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
988 * behind you. 980 * behind you.
989 */ 981 */
990 982
991 busy = 0;
992
993
994 /* 983 /*
995 * Find the scsi_device associated with the SCSI address, 984 * Find the scsi_device associated with the SCSI address,
996 * and mark it as changed, invalidating the cache. This deals 985 * and mark it as changed, invalidating the cache. This deals
@@ -1035,7 +1024,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1035static int _aac_reset_adapter(struct aac_dev *aac) 1024static int _aac_reset_adapter(struct aac_dev *aac)
1036{ 1025{
1037 int index, quirks; 1026 int index, quirks;
1038 u32 ret;
1039 int retval; 1027 int retval;
1040 struct Scsi_Host *host; 1028 struct Scsi_Host *host;
1041 struct scsi_device *dev; 1029 struct scsi_device *dev;
@@ -1059,35 +1047,29 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1059 * If a positive health, means in a known DEAD PANIC 1047 * If a positive health, means in a known DEAD PANIC
1060 * state and the adapter could be reset to `try again'. 1048 * state and the adapter could be reset to `try again'.
1061 */ 1049 */
1062 retval = aac_adapter_check_health(aac); 1050 retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
1063 if (retval == 0)
1064 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1065 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1066 if (retval)
1067 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1068 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1069 1051
1070 if (retval) 1052 if (retval)
1071 goto out; 1053 goto out;
1072 if (ret != 0x00000001) {
1073 retval = -ENODEV;
1074 goto out;
1075 }
1076 1054
1077 /* 1055 /*
1078 * Loop through the fibs, close the synchronous FIBS 1056 * Loop through the fibs, close the synchronous FIBS
1079 */ 1057 */
1080 for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) { 1058 for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1081 struct fib *fib = &aac->fibs[index]; 1059 struct fib *fib = &aac->fibs[index];
1082 if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && 1060 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1083 (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) { 1061 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1084 unsigned long flagv; 1062 unsigned long flagv;
1085 spin_lock_irqsave(&fib->event_lock, flagv); 1063 spin_lock_irqsave(&fib->event_lock, flagv);
1086 up(&fib->event_wait); 1064 up(&fib->event_wait);
1087 spin_unlock_irqrestore(&fib->event_lock, flagv); 1065 spin_unlock_irqrestore(&fib->event_lock, flagv);
1088 schedule(); 1066 schedule();
1067 retval = 0;
1089 } 1068 }
1090 } 1069 }
1070 /* Give some extra time for ioctls to complete. */
1071 if (retval == 0)
1072 ssleep(2);
1091 index = aac->cardtype; 1073 index = aac->cardtype;
1092 1074
1093 /* 1075 /*
@@ -1248,7 +1230,7 @@ int aac_check_health(struct aac_dev * aac)
1248 1230
1249 memset(hw_fib, 0, sizeof(struct hw_fib)); 1231 memset(hw_fib, 0, sizeof(struct hw_fib));
1250 memset(fib, 0, sizeof(struct fib)); 1232 memset(fib, 0, sizeof(struct fib));
1251 fib->hw_fib = hw_fib; 1233 fib->hw_fib_va = hw_fib;
1252 fib->dev = aac; 1234 fib->dev = aac;
1253 aac_fib_init(fib); 1235 aac_fib_init(fib);
1254 fib->type = FSAFS_NTC_FIB_CONTEXT; 1236 fib->type = FSAFS_NTC_FIB_CONTEXT;
@@ -1354,11 +1336,11 @@ int aac_command_thread(void *data)
1354 * do anything at this point since we don't have 1336 * do anything at this point since we don't have
1355 * anything defined for this thread to do. 1337 * anything defined for this thread to do.
1356 */ 1338 */
1357 hw_fib = fib->hw_fib; 1339 hw_fib = fib->hw_fib_va;
1358 memset(fib, 0, sizeof(struct fib)); 1340 memset(fib, 0, sizeof(struct fib));
1359 fib->type = FSAFS_NTC_FIB_CONTEXT; 1341 fib->type = FSAFS_NTC_FIB_CONTEXT;
1360 fib->size = sizeof( struct fib ); 1342 fib->size = sizeof( struct fib );
1361 fib->hw_fib = hw_fib; 1343 fib->hw_fib_va = hw_fib;
1362 fib->data = hw_fib->data; 1344 fib->data = hw_fib->data;
1363 fib->dev = dev; 1345 fib->dev = dev;
1364 /* 1346 /*
@@ -1485,7 +1467,7 @@ int aac_command_thread(void *data)
1485 */ 1467 */
1486 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1468 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1487 memcpy(newfib, fib, sizeof(struct fib)); 1469 memcpy(newfib, fib, sizeof(struct fib));
1488 newfib->hw_fib = hw_newfib; 1470 newfib->hw_fib_va = hw_newfib;
1489 /* 1471 /*
1490 * Put the FIB onto the 1472 * Put the FIB onto the
1491 * fibctx's fibs 1473 * fibctx's fibs
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index d38b628be1ad..42c7dcda6d9b 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -32,7 +32,6 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h> 35#include <linux/spinlock.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <linux/completion.h> 37#include <linux/completion.h>
@@ -73,7 +72,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
73 u32 index = le32_to_cpu(entry->addr); 72 u32 index = le32_to_cpu(entry->addr);
74 fast = index & 0x01; 73 fast = index & 0x01;
75 fib = &dev->fibs[index >> 2]; 74 fib = &dev->fibs[index >> 2];
76 hwfib = fib->hw_fib; 75 hwfib = fib->hw_fib_va;
77 76
78 aac_consumer_free(dev, q, HostNormRespQueue); 77 aac_consumer_free(dev, q, HostNormRespQueue);
79 /* 78 /*
@@ -84,11 +83,13 @@ unsigned int aac_response_normal(struct aac_queue * q)
84 * continue. The caller has already been notified that 83 * continue. The caller has already been notified that
85 * the fib timed out. 84 * the fib timed out.
86 */ 85 */
87 if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 86 dev->queues->queue[AdapNormCmdQueue].numpending--;
88 dev->queues->queue[AdapNormCmdQueue].numpending--; 87
89 else { 88 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
90 printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); 89 spin_unlock_irqrestore(q->lock, flags);
91 printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); 90 aac_fib_complete(fib);
91 aac_fib_free(fib);
92 spin_lock_irqsave(q->lock, flags);
92 continue; 93 continue;
93 } 94 }
94 spin_unlock_irqrestore(q->lock, flags); 95 spin_unlock_irqrestore(q->lock, flags);
@@ -193,7 +194,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
193 INIT_LIST_HEAD(&fib->fiblink); 194 INIT_LIST_HEAD(&fib->fiblink);
194 fib->type = FSAFS_NTC_FIB_CONTEXT; 195 fib->type = FSAFS_NTC_FIB_CONTEXT;
195 fib->size = sizeof(struct fib); 196 fib->size = sizeof(struct fib);
196 fib->hw_fib = hw_fib; 197 fib->hw_fib_va = hw_fib;
197 fib->data = hw_fib->data; 198 fib->data = hw_fib->data;
198 fib->dev = dev; 199 fib->dev = dev;
199 200
@@ -254,12 +255,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
254 return 1; 255 return 1;
255 } 256 }
256 memset(hw_fib, 0, sizeof(struct hw_fib)); 257 memset(hw_fib, 0, sizeof(struct hw_fib));
257 memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib)); 258 memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
259 (index & ~0x00000002L)), sizeof(struct hw_fib));
258 memset(fib, 0, sizeof(struct fib)); 260 memset(fib, 0, sizeof(struct fib));
259 INIT_LIST_HEAD(&fib->fiblink); 261 INIT_LIST_HEAD(&fib->fiblink);
260 fib->type = FSAFS_NTC_FIB_CONTEXT; 262 fib->type = FSAFS_NTC_FIB_CONTEXT;
261 fib->size = sizeof(struct fib); 263 fib->size = sizeof(struct fib);
262 fib->hw_fib = hw_fib; 264 fib->hw_fib_va = hw_fib;
263 fib->data = hw_fib->data; 265 fib->data = hw_fib->data;
264 fib->dev = dev; 266 fib->dev = dev;
265 267
@@ -271,7 +273,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
271 } else { 273 } else {
272 int fast = index & 0x01; 274 int fast = index & 0x01;
273 struct fib * fib = &dev->fibs[index >> 2]; 275 struct fib * fib = &dev->fibs[index >> 2];
274 struct hw_fib * hwfib = fib->hw_fib; 276 struct hw_fib * hwfib = fib->hw_fib_va;
275 277
276 /* 278 /*
277 * Remove this fib from the Outstanding I/O queue. 279 * Remove this fib from the Outstanding I/O queue.
@@ -281,14 +283,14 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
281 * continue. The caller has already been notified that 283 * continue. The caller has already been notified that
282 * the fib timed out. 284 * the fib timed out.
283 */ 285 */
284 if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 286 dev->queues->queue[AdapNormCmdQueue].numpending--;
285 printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); 287
286 printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); 288 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
289 aac_fib_complete(fib);
290 aac_fib_free(fib);
287 return 0; 291 return 0;
288 } 292 }
289 293
290 dev->queues->queue[AdapNormCmdQueue].numpending--;
291
292 if (fast) { 294 if (fast) {
293 /* 295 /*
294 * Doctor the fib 296 * Doctor the fib
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0f948c2fb609..350ea7feb61d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -82,8 +82,6 @@ static LIST_HEAD(aac_devices);
82static int aac_cfg_major = -1; 82static int aac_cfg_major = -1;
83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; 83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 84
85extern int expose_physicals;
86
87/* 85/*
88 * Because of the way Linux names scsi devices, the order in this table has 86 * Because of the way Linux names scsi devices, the order in this table has
89 * become important. Check for on-board Raid first, add-in cards second. 87 * become important. Check for on-board Raid first, add-in cards second.
@@ -247,7 +245,19 @@ static struct aac_driver_ident aac_drivers[] = {
247 245
248static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 246static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
249{ 247{
248 struct Scsi_Host *host = cmd->device->host;
249 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
250 u32 count = 0;
250 cmd->scsi_done = done; 251 cmd->scsi_done = done;
252 for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
253 struct fib * fib = &dev->fibs[count];
254 struct scsi_cmnd * command;
255 if (fib->hw_fib_va->header.XferState &&
256 ((command = fib->callback_data)) &&
257 (command == cmd) &&
258 (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
259 return 0; /* Already owned by Adapter */
260 }
251 cmd->SCp.phase = AAC_OWNER_LOWLEVEL; 261 cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
252 return (aac_scsi_cmd(cmd) ? FAILED : 0); 262 return (aac_scsi_cmd(cmd) ? FAILED : 0);
253} 263}
@@ -446,6 +456,40 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
446 return aac_do_ioctl(dev, cmd, arg); 456 return aac_do_ioctl(dev, cmd, arg);
447} 457}
448 458
459static int aac_eh_abort(struct scsi_cmnd* cmd)
460{
461 struct scsi_device * dev = cmd->device;
462 struct Scsi_Host * host = dev->host;
463 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
464 int count;
465 int ret = FAILED;
466
467 printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
468 AAC_DRIVERNAME,
469 host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
470 switch (cmd->cmnd[0]) {
471 case SERVICE_ACTION_IN:
472 if (!(aac->raw_io_interface) ||
473 !(aac->raw_io_64) ||
474 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
475 break;
476 case INQUIRY:
477 case READ_CAPACITY:
478 case TEST_UNIT_READY:
479 /* Mark associated FIB to not complete, eh handler does this */
480 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
481 struct fib * fib = &aac->fibs[count];
482 if (fib->hw_fib_va->header.XferState &&
483 (fib->callback_data == cmd)) {
484 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
485 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
486 ret = SUCCESS;
487 }
488 }
489 }
490 return ret;
491}
492
449/* 493/*
450 * aac_eh_reset - Reset command handling 494 * aac_eh_reset - Reset command handling
451 * @scsi_cmd: SCSI command block causing the reset 495 * @scsi_cmd: SCSI command block causing the reset
@@ -457,12 +501,20 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
457 struct Scsi_Host * host = dev->host; 501 struct Scsi_Host * host = dev->host;
458 struct scsi_cmnd * command; 502 struct scsi_cmnd * command;
459 int count; 503 int count;
460 struct aac_dev * aac; 504 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
461 unsigned long flags; 505 unsigned long flags;
462 506
507 /* Mark the associated FIB to not complete, eh handler does this */
508 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
509 struct fib * fib = &aac->fibs[count];
510 if (fib->hw_fib_va->header.XferState &&
511 (fib->callback_data == cmd)) {
512 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
513 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
514 }
515 }
463 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 516 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
464 AAC_DRIVERNAME); 517 AAC_DRIVERNAME);
465 aac = (struct aac_dev *)host->hostdata;
466 518
467 if ((count = aac_check_health(aac))) 519 if ((count = aac_check_health(aac)))
468 return count; 520 return count;
@@ -496,7 +548,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
496 ssleep(1); 548 ssleep(1);
497 } 549 }
498 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME); 550 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
499 return -ETIMEDOUT; 551 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
500} 552}
501 553
502/** 554/**
@@ -796,6 +848,7 @@ static struct scsi_host_template aac_driver_template = {
796 .bios_param = aac_biosparm, 848 .bios_param = aac_biosparm,
797 .shost_attrs = aac_attrs, 849 .shost_attrs = aac_attrs,
798 .slave_configure = aac_slave_configure, 850 .slave_configure = aac_slave_configure,
851 .eh_abort_handler = aac_eh_abort,
799 .eh_host_reset_handler = aac_eh_reset, 852 .eh_host_reset_handler = aac_eh_reset,
800 .can_queue = AAC_NUM_IO_FIB, 853 .can_queue = AAC_NUM_IO_FIB,
801 .this_id = MAXIMUM_NUM_CONTAINERS, 854 .this_id = MAXIMUM_NUM_CONTAINERS,
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index c76b611b6afb..a8ace5677813 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -74,9 +74,6 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
74 74
75int aac_nark_init(struct aac_dev * dev) 75int aac_nark_init(struct aac_dev * dev)
76{ 76{
77 extern int _aac_rx_init(struct aac_dev *dev);
78 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
79
80 /* 77 /*
81 * Fill in the function dispatch table. 78 * Fill in the function dispatch table.
82 */ 79 */
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index d953c3fe998a..9c5fcfb398c2 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -45,7 +45,6 @@
45static int aac_rkt_select_comm(struct aac_dev *dev, int comm) 45static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
46{ 46{
47 int retval; 47 int retval;
48 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
49 retval = aac_rx_select_comm(dev, comm); 48 retval = aac_rx_select_comm(dev, comm);
50 if (comm == AAC_COMM_MESSAGE) { 49 if (comm == AAC_COMM_MESSAGE) {
51 /* 50 /*
@@ -97,8 +96,6 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
97 96
98int aac_rkt_init(struct aac_dev *dev) 97int aac_rkt_init(struct aac_dev *dev)
99{ 98{
100 extern int _aac_rx_init(struct aac_dev *dev);
101
102 /* 99 /*
103 * Fill in the function dispatch table. 100 * Fill in the function dispatch table.
104 */ 101 */
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index d242e2611d67..0c71315cbf1a 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -57,25 +57,25 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
57 * been enabled. 57 * been enabled.
58 * Check to see if this is our interrupt. If it isn't just return 58 * Check to see if this is our interrupt. If it isn't just return
59 */ 59 */
60 if (intstat & ~(dev->OIMR)) { 60 if (likely(intstat & ~(dev->OIMR))) {
61 bellbits = rx_readl(dev, OutboundDoorbellReg); 61 bellbits = rx_readl(dev, OutboundDoorbellReg);
62 if (bellbits & DoorBellPrintfReady) { 62 if (unlikely(bellbits & DoorBellPrintfReady)) {
63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); 63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
66 } 66 }
67 else if (bellbits & DoorBellAdapterNormCmdReady) { 67 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); 68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
70 } 70 }
71 else if (bellbits & DoorBellAdapterNormRespReady) { 71 else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); 72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); 73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
74 } 74 }
75 else if (bellbits & DoorBellAdapterNormCmdNotFull) { 75 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
77 } 77 }
78 else if (bellbits & DoorBellAdapterNormRespNotFull) { 78 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); 80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
81 } 81 }
@@ -88,11 +88,11 @@ static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
88{ 88{
89 struct aac_dev *dev = dev_id; 89 struct aac_dev *dev = dev_id;
90 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 90 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
91 if (Index == 0xFFFFFFFFL) 91 if (unlikely(Index == 0xFFFFFFFFL))
92 Index = rx_readl(dev, MUnit.OutboundQueue); 92 Index = rx_readl(dev, MUnit.OutboundQueue);
93 if (Index != 0xFFFFFFFFL) { 93 if (likely(Index != 0xFFFFFFFFL)) {
94 do { 94 do {
95 if (aac_intr_normal(dev, Index)) { 95 if (unlikely(aac_intr_normal(dev, Index))) {
96 rx_writel(dev, MUnit.OutboundQueue, Index); 96 rx_writel(dev, MUnit.OutboundQueue, Index);
97 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 97 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
98 } 98 }
@@ -204,7 +204,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
204 */ 204 */
205 msleep(1); 205 msleep(1);
206 } 206 }
207 if (ok != 1) { 207 if (unlikely(ok != 1)) {
208 /* 208 /*
209 * Restore interrupt mask even though we timed out 209 * Restore interrupt mask even though we timed out
210 */ 210 */
@@ -294,7 +294,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
294 * Start up processing on an i960 based AAC adapter 294 * Start up processing on an i960 based AAC adapter
295 */ 295 */
296 296
297void aac_rx_start_adapter(struct aac_dev *dev) 297static void aac_rx_start_adapter(struct aac_dev *dev)
298{ 298{
299 struct aac_init *init; 299 struct aac_init *init;
300 300
@@ -319,12 +319,12 @@ static int aac_rx_check_health(struct aac_dev *dev)
319 /* 319 /*
320 * Check to see if the board failed any self tests. 320 * Check to see if the board failed any self tests.
321 */ 321 */
322 if (status & SELF_TEST_FAILED) 322 if (unlikely(status & SELF_TEST_FAILED))
323 return -1; 323 return -1;
324 /* 324 /*
325 * Check to see if the board panic'd. 325 * Check to see if the board panic'd.
326 */ 326 */
327 if (status & KERNEL_PANIC) { 327 if (unlikely(status & KERNEL_PANIC)) {
328 char * buffer; 328 char * buffer;
329 struct POSTSTATUS { 329 struct POSTSTATUS {
330 __le32 Post_Command; 330 __le32 Post_Command;
@@ -333,15 +333,15 @@ static int aac_rx_check_health(struct aac_dev *dev)
333 dma_addr_t paddr, baddr; 333 dma_addr_t paddr, baddr;
334 int ret; 334 int ret;
335 335
336 if ((status & 0xFF000000L) == 0xBC000000L) 336 if (likely((status & 0xFF000000L) == 0xBC000000L))
337 return (status >> 16) & 0xFF; 337 return (status >> 16) & 0xFF;
338 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); 338 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
339 ret = -2; 339 ret = -2;
340 if (buffer == NULL) 340 if (unlikely(buffer == NULL))
341 return ret; 341 return ret;
342 post = pci_alloc_consistent(dev->pdev, 342 post = pci_alloc_consistent(dev->pdev,
343 sizeof(struct POSTSTATUS), &paddr); 343 sizeof(struct POSTSTATUS), &paddr);
344 if (post == NULL) { 344 if (unlikely(post == NULL)) {
345 pci_free_consistent(dev->pdev, 512, buffer, baddr); 345 pci_free_consistent(dev->pdev, 512, buffer, baddr);
346 return ret; 346 return ret;
347 } 347 }
@@ -353,7 +353,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
353 NULL, NULL, NULL, NULL, NULL); 353 NULL, NULL, NULL, NULL, NULL);
354 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), 354 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
355 post, paddr); 355 post, paddr);
356 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) { 356 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
357 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); 357 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
358 ret <<= 4; 358 ret <<= 4;
359 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); 359 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
@@ -364,7 +364,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
364 /* 364 /*
365 * Wait for the adapter to be up and running. 365 * Wait for the adapter to be up and running.
366 */ 366 */
367 if (!(status & KERNEL_UP_AND_RUNNING)) 367 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
368 return -3; 368 return -3;
369 /* 369 /*
370 * Everything is OK 370 * Everything is OK
@@ -387,7 +387,7 @@ static int aac_rx_deliver_producer(struct fib * fib)
387 unsigned long nointr = 0; 387 unsigned long nointr = 0;
388 388
389 spin_lock_irqsave(q->lock, qflags); 389 spin_lock_irqsave(q->lock, qflags);
390 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr); 390 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
391 391
392 q->numpending++; 392 q->numpending++;
393 *(q->headers.producer) = cpu_to_le32(Index + 1); 393 *(q->headers.producer) = cpu_to_le32(Index + 1);
@@ -419,9 +419,9 @@ static int aac_rx_deliver_message(struct fib * fib)
419 spin_unlock_irqrestore(q->lock, qflags); 419 spin_unlock_irqrestore(q->lock, qflags);
420 for(;;) { 420 for(;;) {
421 Index = rx_readl(dev, MUnit.InboundQueue); 421 Index = rx_readl(dev, MUnit.InboundQueue);
422 if (Index == 0xFFFFFFFFL) 422 if (unlikely(Index == 0xFFFFFFFFL))
423 Index = rx_readl(dev, MUnit.InboundQueue); 423 Index = rx_readl(dev, MUnit.InboundQueue);
424 if (Index != 0xFFFFFFFFL) 424 if (likely(Index != 0xFFFFFFFFL))
425 break; 425 break;
426 if (--count == 0) { 426 if (--count == 0) {
427 spin_lock_irqsave(q->lock, qflags); 427 spin_lock_irqsave(q->lock, qflags);
@@ -437,7 +437,7 @@ static int aac_rx_deliver_message(struct fib * fib)
437 device += sizeof(u32); 437 device += sizeof(u32);
438 writel((u32)(addr >> 32), device); 438 writel((u32)(addr >> 32), device);
439 device += sizeof(u32); 439 device += sizeof(u32);
440 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 440 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
441 rx_writel(dev, MUnit.InboundQueue, Index); 441 rx_writel(dev, MUnit.InboundQueue, Index);
442 return 0; 442 return 0;
443} 443}
@@ -460,22 +460,34 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
460 return 0; 460 return 0;
461} 461}
462 462
463static int aac_rx_restart_adapter(struct aac_dev *dev) 463static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
464{ 464{
465 u32 var; 465 u32 var;
466 466
467 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 467 if (bled)
468 dev->name, dev->id); 468 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
469 469 dev->name, dev->id, bled);
470 if (aac_rx_check_health(dev) <= 0) 470 else {
471 return 1; 471 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
472 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, 472 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
473 &var, NULL, NULL, NULL, NULL)) 473 if (!bled && (var != 0x00000001))
474 return 1; 474 bled = -EINVAL;
475 }
476 if (bled && (bled != -ETIMEDOUT))
477 bled = aac_adapter_sync_cmd(dev, IOP_RESET,
478 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
479
480 if (bled && (bled != -ETIMEDOUT))
481 return -EINVAL;
482 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
483 rx_writel(dev, MUnit.reserved2, 3);
484 msleep(5000); /* Delay 5 seconds */
485 var = 0x00000001;
486 }
475 if (var != 0x00000001) 487 if (var != 0x00000001)
476 return 1; 488 return -EINVAL;
477 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 489 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
478 return 1; 490 return -ENODEV;
479 return 0; 491 return 0;
480} 492}
481 493
@@ -517,24 +529,29 @@ int _aac_rx_init(struct aac_dev *dev)
517{ 529{
518 unsigned long start; 530 unsigned long start;
519 unsigned long status; 531 unsigned long status;
520 int instance; 532 int restart = 0;
521 const char * name; 533 int instance = dev->id;
522 534 const char * name = dev->name;
523 instance = dev->id;
524 name = dev->name;
525 535
526 if (aac_adapter_ioremap(dev, dev->base_size)) { 536 if (aac_adapter_ioremap(dev, dev->base_size)) {
527 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 537 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
528 goto error_iounmap; 538 goto error_iounmap;
529 } 539 }
530 540
541 /* Failure to reset here is an option ... */
542 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
543 if ((((status & 0xff) != 0xff) || reset_devices) &&
544 !aac_rx_restart_adapter(dev, 0))
545 ++restart;
531 /* 546 /*
532 * Check to see if the board panic'd while booting. 547 * Check to see if the board panic'd while booting.
533 */ 548 */
534 status = rx_readl(dev, MUnit.OMRx[0]); 549 status = rx_readl(dev, MUnit.OMRx[0]);
535 if (status & KERNEL_PANIC) 550 if (status & KERNEL_PANIC) {
536 if (aac_rx_restart_adapter(dev)) 551 if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
537 goto error_iounmap; 552 goto error_iounmap;
553 ++restart;
554 }
538 /* 555 /*
539 * Check to see if the board failed any self tests. 556 * Check to see if the board failed any self tests.
540 */ 557 */
@@ -556,12 +573,23 @@ int _aac_rx_init(struct aac_dev *dev)
556 */ 573 */
557 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) 574 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
558 { 575 {
559 if(time_after(jiffies, start+startup_timeout*HZ)) 576 if ((restart &&
560 { 577 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
578 time_after(jiffies, start+HZ*startup_timeout)) {
561 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 579 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
562 dev->name, instance, status); 580 dev->name, instance, status);
563 goto error_iounmap; 581 goto error_iounmap;
564 } 582 }
583 if (!restart &&
584 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
585 time_after(jiffies, start + HZ *
586 ((startup_timeout > 60)
587 ? (startup_timeout - 60)
588 : (startup_timeout / 2))))) {
589 if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
590 start = jiffies;
591 ++restart;
592 }
565 msleep(1); 593 msleep(1);
566 } 594 }
567 /* 595 /*
@@ -572,6 +600,7 @@ int _aac_rx_init(struct aac_dev *dev)
572 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 600 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
573 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 601 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
574 dev->a_ops.adapter_check_health = aac_rx_check_health; 602 dev->a_ops.adapter_check_health = aac_rx_check_health;
603 dev->a_ops.adapter_restart = aac_rx_restart_adapter;
575 604
576 /* 605 /*
577 * First clear out all interrupts. Then enable the one's that we 606 * First clear out all interrupts. Then enable the one's that we
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 6f1a1780efce..f4b5e9742ab0 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -31,7 +31,6 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h>
35#include <linux/spinlock.h> 34#include <linux/spinlock.h>
36#include <linux/slab.h> 35#include <linux/slab.h>
37#include <linux/blkdev.h> 36#include <linux/blkdev.h>
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 1d239f6c0103..cbbfbc9f3e0f 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -35,7 +35,6 @@
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/pci.h>
39#include <linux/isapnp.h> 38#include <linux/isapnp.h>
40#include <linux/blkdev.h> 39#include <linux/blkdev.h>
41#include <linux/mca.h> 40#include <linux/mca.h>
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 911ea1756e55..5e6620f8dabc 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -57,18 +57,6 @@ config AIC79XX_BUILD_FIRMWARE
57 or modify the assembler Makefile or the files it includes if your 57 or modify the assembler Makefile or the files it includes if your
58 build environment is different than that of the author. 58 build environment is different than that of the author.
59 59
60config AIC79XX_ENABLE_RD_STRM
61 bool "Enable Read Streaming for All Targets"
62 depends on SCSI_AIC79XX
63 default n
64 help
65 Read Streaming is a U320 protocol option that should enhance
66 performance. Early U320 drive firmware actually performs slower
67 with read streaming enabled so it is disabled by default. Read
68 Streaming can be configured in much the same way as tagged queueing
69 using the "rd_strm" command line option. See
70 drivers/scsi/aic7xxx/README.aic79xx for details.
71
72config AIC79XX_DEBUG_ENABLE 60config AIC79XX_DEBUG_ENABLE
73 bool "Compile in Debugging Code" 61 bool "Compile in Debugging Code"
74 depends on SCSI_AIC79XX 62 depends on SCSI_AIC79XX
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index cd93f9a8611f..88da670a7915 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -50,16 +50,6 @@ config AIC7XXX_RESET_DELAY_MS
50 50
51 Default: 5000 (5 seconds) 51 Default: 5000 (5 seconds)
52 52
53config AIC7XXX_PROBE_EISA_VL
54 bool "Probe for EISA and VL AIC7XXX Adapters"
55 depends on SCSI_AIC7XXX && EISA
56 help
57 Probe for EISA and VLB Aic7xxx controllers. In many newer systems,
58 the invasive probes necessary to detect these controllers can cause
59 other devices to fail. For this reason, the non-PCI probe code is
60 disabled by default. The current value of this option can be "toggled"
61 via the no_probe kernel command line option.
62
63config AIC7XXX_BUILD_FIRMWARE 53config AIC7XXX_BUILD_FIRMWARE
64 bool "Build Adapter Firmware with Kernel Build" 54 bool "Build Adapter Firmware with Kernel Build"
65 depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD 55 depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2be03e975d97..6054881f21f1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -363,6 +363,8 @@ static int ahd_linux_run_command(struct ahd_softc*,
363 struct scsi_cmnd *); 363 struct scsi_cmnd *);
364static void ahd_linux_setup_tag_info_global(char *p); 364static void ahd_linux_setup_tag_info_global(char *p);
365static int aic79xx_setup(char *c); 365static int aic79xx_setup(char *c);
366static void ahd_freeze_simq(struct ahd_softc *ahd);
367static void ahd_release_simq(struct ahd_softc *ahd);
366 368
367static int ahd_linux_unit; 369static int ahd_linux_unit;
368 370
@@ -2016,13 +2018,13 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
2016 cmd->scsi_done(cmd); 2018 cmd->scsi_done(cmd);
2017} 2019}
2018 2020
2019void 2021static void
2020ahd_freeze_simq(struct ahd_softc *ahd) 2022ahd_freeze_simq(struct ahd_softc *ahd)
2021{ 2023{
2022 scsi_block_requests(ahd->platform_data->host); 2024 scsi_block_requests(ahd->platform_data->host);
2023} 2025}
2024 2026
2025void 2027static void
2026ahd_release_simq(struct ahd_softc *ahd) 2028ahd_release_simq(struct ahd_softc *ahd)
2027{ 2029{
2028 scsi_unblock_requests(ahd->platform_data->host); 2030 scsi_unblock_requests(ahd->platform_data->host);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 147c83c456a5..9218f29314fa 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -837,8 +837,6 @@ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
837void ahd_platform_free(struct ahd_softc *ahd); 837void ahd_platform_free(struct ahd_softc *ahd);
838void ahd_platform_init(struct ahd_softc *ahd); 838void ahd_platform_init(struct ahd_softc *ahd);
839void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb); 839void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
840void ahd_freeze_simq(struct ahd_softc *ahd);
841void ahd_release_simq(struct ahd_softc *ahd);
842 840
843static __inline void 841static __inline void
844ahd_freeze_scb(struct scb *scb) 842ahd_freeze_scb(struct scb *scb)
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 954c7c24501d..e1bd57b9f23d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -1278,11 +1278,6 @@ typedef enum {
1278 AHC_QUEUE_TAGGED 1278 AHC_QUEUE_TAGGED
1279} ahc_queue_alg; 1279} ahc_queue_alg;
1280 1280
1281void ahc_set_tags(struct ahc_softc *ahc,
1282 struct scsi_cmnd *cmd,
1283 struct ahc_devinfo *devinfo,
1284 ahc_queue_alg alg);
1285
1286/**************************** Target Mode *************************************/ 1281/**************************** Target Mode *************************************/
1287#ifdef AHC_TARGET_MODE 1282#ifdef AHC_TARGET_MODE
1288void ahc_send_lstate_events(struct ahc_softc *, 1283void ahc_send_lstate_events(struct ahc_softc *,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 50ef785224de..75733b09f27a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2073,7 +2073,7 @@ ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2073/* 2073/*
2074 * Update the current state of tagged queuing for a given target. 2074 * Update the current state of tagged queuing for a given target.
2075 */ 2075 */
2076void 2076static void
2077ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, 2077ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2078 struct ahc_devinfo *devinfo, ahc_queue_alg alg) 2078 struct ahc_devinfo *devinfo, ahc_queue_alg alg)
2079{ 2079{
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 8f43ff772f23..db6ab1a3b81e 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -24,7 +24,6 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/pci.h>
28#include <scsi/scsi_host.h> 27#include <scsi/scsi_host.h>
29 28
30#include "aic94xx.h" 29#include "aic94xx.h"
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 12497da5529d..03bfed61bffc 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -49,7 +49,6 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/pci.h>
53 52
54#include <scsi/scsi_cmnd.h> 53#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 54#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 0f920c84ac0f..eff846ae0aff 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -1,19 +1,19 @@
1/* 1/*
2 * NCR 5380 generic driver routines. These should make it *trivial* 2 * NCR 5380 generic driver routines. These should make it *trivial*
3 * to implement 5380 SCSI drivers under Linux with a non-trantor 3 * to implement 5380 SCSI drivers under Linux with a non-trantor
4 * architecture. 4 * architecture.
5 * 5 *
6 * Note that these routines also work with NR53c400 family chips. 6 * Note that these routines also work with NR53c400 family chips.
7 * 7 *
8 * Copyright 1993, Drew Eckhardt 8 * Copyright 1993, Drew Eckhardt
9 * Visionary Computing 9 * Visionary Computing
10 * (Unix and Linux consulting and custom programming) 10 * (Unix and Linux consulting and custom programming)
11 * drew@colorado.edu 11 * drew@colorado.edu
12 * +1 (303) 666-5836 12 * +1 (303) 666-5836
13 * 13 *
14 * DISTRIBUTION RELEASE 6. 14 * DISTRIBUTION RELEASE 6.
15 * 15 *
16 * For more information, please consult 16 * For more information, please consult
17 * 17 *
18 * NCR 5380 Family 18 * NCR 5380 Family
19 * SCSI Protocol Controller 19 * SCSI Protocol Controller
@@ -57,7 +57,7 @@
57 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA 57 * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
58 * and USLEEP, because these were messing up readability and will never be 58 * and USLEEP, because these were messing up readability and will never be
59 * needed for Atari SCSI. 59 * needed for Atari SCSI.
60 * 60 *
61 * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' 61 * - I've revised the NCR5380_main() calling scheme (relax the 'main_running'
62 * stuff), and 'main' is executed in a bottom half if awoken by an 62 * stuff), and 'main' is executed in a bottom half if awoken by an
63 * interrupt. 63 * interrupt.
@@ -69,21 +69,29 @@
69 */ 69 */
70 70
71/* 71/*
72 * Further development / testing that should be done : 72 * Further development / testing that should be done :
73 * 1. Test linked command handling code after Eric is ready with 73 * 1. Test linked command handling code after Eric is ready with
74 * the high level code. 74 * the high level code.
75 */ 75 */
76#include <scsi/scsi_dbg.h> 76#include <scsi/scsi_dbg.h>
77#include <scsi/scsi_transport_spi.h> 77#include <scsi/scsi_transport_spi.h>
78 78
79#if (NDEBUG & NDEBUG_LISTS) 79#if (NDEBUG & NDEBUG_LISTS)
80#define LIST(x,y) \ 80#define LIST(x, y) \
81 { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ 81 do { \
82 if ((x)==(y)) udelay(5); } 82 printk("LINE:%d Adding %p to %p\n", \
83#define REMOVE(w,x,y,z) \ 83 __LINE__, (void*)(x), (void*)(y)); \
84 { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ 84 if ((x) == (y)) \
85 (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ 85 udelay(5); \
86 if ((x)==(y)) udelay(5); } 86 } while (0)
87#define REMOVE(w, x, y, z) \
88 do { \
89 printk("LINE:%d Removing: %p->%p %p->%p \n", \
90 __LINE__, (void*)(w), (void*)(x), \
91 (void*)(y), (void*)(z)); \
92 if ((x) == (y)) \
93 udelay(5); \
94 } while (0)
87#else 95#else
88#define LIST(x,y) 96#define LIST(x,y)
89#define REMOVE(w,x,y,z) 97#define REMOVE(w,x,y,z)
@@ -103,62 +111,62 @@
103 * more difficult than it has to be. 111 * more difficult than it has to be.
104 * 112 *
105 * Also, many of the SCSI drivers were written before the command queuing 113 * Also, many of the SCSI drivers were written before the command queuing
106 * routines were implemented, meaning their implementations of queued 114 * routines were implemented, meaning their implementations of queued
107 * commands were hacked on rather than designed in from the start. 115 * commands were hacked on rather than designed in from the start.
108 * 116 *
109 * When I designed the Linux SCSI drivers I figured that 117 * When I designed the Linux SCSI drivers I figured that
110 * while having two different SCSI boards in a system might be useful 118 * while having two different SCSI boards in a system might be useful
111 * for debugging things, two of the same type wouldn't be used. 119 * for debugging things, two of the same type wouldn't be used.
112 * Well, I was wrong and a number of users have mailed me about running 120 * Well, I was wrong and a number of users have mailed me about running
113 * multiple high-performance SCSI boards in a server. 121 * multiple high-performance SCSI boards in a server.
114 * 122 *
115 * Finally, when I get questions from users, I have no idea what 123 * Finally, when I get questions from users, I have no idea what
116 * revision of my driver they are running. 124 * revision of my driver they are running.
117 * 125 *
118 * This driver attempts to address these problems : 126 * This driver attempts to address these problems :
119 * This is a generic 5380 driver. To use it on a different platform, 127 * This is a generic 5380 driver. To use it on a different platform,
120 * one simply writes appropriate system specific macros (ie, data 128 * one simply writes appropriate system specific macros (ie, data
121 * transfer - some PC's will use the I/O bus, 68K's must use 129 * transfer - some PC's will use the I/O bus, 68K's must use
122 * memory mapped) and drops this file in their 'C' wrapper. 130 * memory mapped) and drops this file in their 'C' wrapper.
123 * 131 *
124 * As far as command queueing, two queues are maintained for 132 * As far as command queueing, two queues are maintained for
125 * each 5380 in the system - commands that haven't been issued yet, 133 * each 5380 in the system - commands that haven't been issued yet,
126 * and commands that are currently executing. This means that an 134 * and commands that are currently executing. This means that an
127 * unlimited number of commands may be queued, letting 135 * unlimited number of commands may be queued, letting
128 * more commands propagate from the higher driver levels giving higher 136 * more commands propagate from the higher driver levels giving higher
129 * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, 137 * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
130 * allowing multiple commands to propagate all the way to a SCSI-II device 138 * allowing multiple commands to propagate all the way to a SCSI-II device
131 * while a command is already executing. 139 * while a command is already executing.
132 * 140 *
133 * To solve the multiple-boards-in-the-same-system problem, 141 * To solve the multiple-boards-in-the-same-system problem,
134 * there is a separate instance structure for each instance 142 * there is a separate instance structure for each instance
135 * of a 5380 in the system. So, multiple NCR5380 drivers will 143 * of a 5380 in the system. So, multiple NCR5380 drivers will
136 * be able to coexist with appropriate changes to the high level 144 * be able to coexist with appropriate changes to the high level
137 * SCSI code. 145 * SCSI code.
138 * 146 *
139 * A NCR5380_PUBLIC_REVISION macro is provided, with the release 147 * A NCR5380_PUBLIC_REVISION macro is provided, with the release
140 * number (updated for each public release) printed by the 148 * number (updated for each public release) printed by the
141 * NCR5380_print_options command, which should be called from the 149 * NCR5380_print_options command, which should be called from the
142 * wrapper detect function, so that I know what release of the driver 150 * wrapper detect function, so that I know what release of the driver
143 * users are using. 151 * users are using.
144 * 152 *
145 * Issues specific to the NCR5380 : 153 * Issues specific to the NCR5380 :
146 * 154 *
147 * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead 155 * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
148 * piece of hardware that requires you to sit in a loop polling for 156 * piece of hardware that requires you to sit in a loop polling for
149 * the REQ signal as long as you are connected. Some devices are 157 * the REQ signal as long as you are connected. Some devices are
150 * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect 158 * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
151 * while doing long seek operations. 159 * while doing long seek operations.
152 * 160 *
153 * The workaround for this is to keep track of devices that have 161 * The workaround for this is to keep track of devices that have
154 * disconnected. If the device hasn't disconnected, for commands that 162 * disconnected. If the device hasn't disconnected, for commands that
155 * should disconnect, we do something like 163 * should disconnect, we do something like
156 * 164 *
157 * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } 165 * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
158 * 166 *
159 * Some tweaking of N and M needs to be done. An algorithm based 167 * Some tweaking of N and M needs to be done. An algorithm based
160 * on "time to data" would give the best results as long as short time 168 * on "time to data" would give the best results as long as short time
161 * to datas (ie, on the same track) were considered, however these 169 * to datas (ie, on the same track) were considered, however these
162 * broken devices are the exception rather than the rule and I'd rather 170 * broken devices are the exception rather than the rule and I'd rather
163 * spend my time optimizing for the normal case. 171 * spend my time optimizing for the normal case.
164 * 172 *
@@ -167,9 +175,9 @@
167 * At the heart of the design is a coroutine, NCR5380_main, 175 * At the heart of the design is a coroutine, NCR5380_main,
168 * which is started when not running by the interrupt handler, 176 * which is started when not running by the interrupt handler,
169 * timer, and queue command function. It attempts to establish 177 * timer, and queue command function. It attempts to establish
170 * I_T_L or I_T_L_Q nexuses by removing the commands from the 178 * I_T_L or I_T_L_Q nexuses by removing the commands from the
171 * issue queue and calling NCR5380_select() if a nexus 179 * issue queue and calling NCR5380_select() if a nexus
172 * is not established. 180 * is not established.
173 * 181 *
174 * Once a nexus is established, the NCR5380_information_transfer() 182 * Once a nexus is established, the NCR5380_information_transfer()
175 * phase goes through the various phases as instructed by the target. 183 * phase goes through the various phases as instructed by the target.
@@ -183,10 +191,10 @@
183 * calling NCR5380_intr() which will in turn call NCR5380_reselect 191 * calling NCR5380_intr() which will in turn call NCR5380_reselect
184 * to reestablish a nexus. This will run main if necessary. 192 * to reestablish a nexus. This will run main if necessary.
185 * 193 *
186 * On command termination, the done function will be called as 194 * On command termination, the done function will be called as
187 * appropriate. 195 * appropriate.
188 * 196 *
189 * SCSI pointers are maintained in the SCp field of SCSI command 197 * SCSI pointers are maintained in the SCp field of SCSI command
190 * structures, being initialized after the command is connected 198 * structures, being initialized after the command is connected
191 * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. 199 * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
192 * Note that in violation of the standard, an implicit SAVE POINTERS operation 200 * Note that in violation of the standard, an implicit SAVE POINTERS operation
@@ -196,12 +204,12 @@
196/* 204/*
197 * Using this file : 205 * Using this file :
198 * This file a skeleton Linux SCSI driver for the NCR 5380 series 206 * This file a skeleton Linux SCSI driver for the NCR 5380 series
199 * of chips. To use it, you write an architecture specific functions 207 * of chips. To use it, you write an architecture specific functions
200 * and macros and include this file in your driver. 208 * and macros and include this file in your driver.
201 * 209 *
202 * These macros control options : 210 * These macros control options :
203 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically 211 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
204 * for commands that return with a CHECK CONDITION status. 212 * for commands that return with a CHECK CONDITION status.
205 * 213 *
206 * LINKED - if defined, linked commands are supported. 214 * LINKED - if defined, linked commands are supported.
207 * 215 *
@@ -210,18 +218,18 @@
210 * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible 218 * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
211 * 219 *
212 * These macros MUST be defined : 220 * These macros MUST be defined :
213 * 221 *
214 * NCR5380_read(register) - read from the specified register 222 * NCR5380_read(register) - read from the specified register
215 * 223 *
216 * NCR5380_write(register, value) - write to the specific register 224 * NCR5380_write(register, value) - write to the specific register
217 * 225 *
218 * Either real DMA *or* pseudo DMA may be implemented 226 * Either real DMA *or* pseudo DMA may be implemented
219 * REAL functions : 227 * REAL functions :
220 * NCR5380_REAL_DMA should be defined if real DMA is to be used. 228 * NCR5380_REAL_DMA should be defined if real DMA is to be used.
221 * Note that the DMA setup functions should return the number of bytes 229 * Note that the DMA setup functions should return the number of bytes
222 * that they were able to program the controller for. 230 * that they were able to program the controller for.
223 * 231 *
224 * Also note that generic i386/PC versions of these macros are 232 * Also note that generic i386/PC versions of these macros are
225 * available as NCR5380_i386_dma_write_setup, 233 * available as NCR5380_i386_dma_write_setup,
226 * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. 234 * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
227 * 235 *
@@ -234,14 +242,14 @@
234 * NCR5380_pread(instance, dst, count); 242 * NCR5380_pread(instance, dst, count);
235 * 243 *
236 * If nothing specific to this implementation needs doing (ie, with external 244 * If nothing specific to this implementation needs doing (ie, with external
237 * hardware), you must also define 245 * hardware), you must also define
238 * 246 *
239 * NCR5380_queue_command 247 * NCR5380_queue_command
240 * NCR5380_reset 248 * NCR5380_reset
241 * NCR5380_abort 249 * NCR5380_abort
242 * NCR5380_proc_info 250 * NCR5380_proc_info
243 * 251 *
244 * to be the global entry points into the specific driver, ie 252 * to be the global entry points into the specific driver, ie
245 * #define NCR5380_queue_command t128_queue_command. 253 * #define NCR5380_queue_command t128_queue_command.
246 * 254 *
247 * If this is not done, the routines will be defined as static functions 255 * If this is not done, the routines will be defined as static functions
@@ -249,7 +257,7 @@
249 * accessible wrapper function. 257 * accessible wrapper function.
250 * 258 *
251 * The generic driver is initialized by calling NCR5380_init(instance), 259 * The generic driver is initialized by calling NCR5380_init(instance),
252 * after setting the appropriate host specific fields and ID. If the 260 * after setting the appropriate host specific fields and ID. If the
253 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, 261 * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
254 * possible) function may be used. Before the specific driver initialization 262 * possible) function may be used. Before the specific driver initialization
255 * code finishes, NCR5380_print_options should be called. 263 * code finishes, NCR5380_print_options should be called.
@@ -264,8 +272,9 @@ static struct scsi_host_template *the_template = NULL;
264 (struct NCR5380_hostdata *)(in)->hostdata 272 (struct NCR5380_hostdata *)(in)->hostdata
265#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) 273#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
266 274
267#define NEXT(cmd) ((Scsi_Cmnd *)((cmd)->host_scribble)) 275#define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble)
268#define NEXTADDR(cmd) ((Scsi_Cmnd **)&((cmd)->host_scribble)) 276#define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next))
277#define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble)
269 278
270#define HOSTNO instance->host_no 279#define HOSTNO instance->host_no
271#define H_NO(cmd) (cmd)->device->host->host_no 280#define H_NO(cmd) (cmd)->device->host->host_no
@@ -312,34 +321,34 @@ static struct scsi_host_template *the_template = NULL;
312#define TAG_NONE 0xff 321#define TAG_NONE 0xff
313 322
314typedef struct { 323typedef struct {
315 DECLARE_BITMAP(allocated, MAX_TAGS); 324 DECLARE_BITMAP(allocated, MAX_TAGS);
316 int nr_allocated; 325 int nr_allocated;
317 int queue_size; 326 int queue_size;
318} TAG_ALLOC; 327} TAG_ALLOC;
319 328
320static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ 329static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */
321 330
322 331
323static void __init init_tags( void ) 332static void __init init_tags(void)
324{ 333{
325 int target, lun; 334 int target, lun;
326 TAG_ALLOC *ta; 335 TAG_ALLOC *ta;
327 336
328 if (!setup_use_tagged_queuing) 337 if (!setup_use_tagged_queuing)
329 return; 338 return;
330 339
331 for( target = 0; target < 8; ++target ) { 340 for (target = 0; target < 8; ++target) {
332 for( lun = 0; lun < 8; ++lun ) { 341 for (lun = 0; lun < 8; ++lun) {
333 ta = &TagAlloc[target][lun]; 342 ta = &TagAlloc[target][lun];
334 bitmap_zero(ta->allocated, MAX_TAGS); 343 bitmap_zero(ta->allocated, MAX_TAGS);
335 ta->nr_allocated = 0; 344 ta->nr_allocated = 0;
336 /* At the beginning, assume the maximum queue size we could 345 /* At the beginning, assume the maximum queue size we could
337 * support (MAX_TAGS). This value will be decreased if the target 346 * support (MAX_TAGS). This value will be decreased if the target
338 * returns QUEUE_FULL status. 347 * returns QUEUE_FULL status.
339 */ 348 */
340 ta->queue_size = MAX_TAGS; 349 ta->queue_size = MAX_TAGS;
350 }
341 } 351 }
342 }
343} 352}
344 353
345 354
@@ -348,24 +357,24 @@ static void __init init_tags( void )
348 * check that there is a free tag and the target's queue won't overflow. This 357 * check that there is a free tag and the target's queue won't overflow. This
349 * function should be called with interrupts disabled to avoid race 358 * function should be called with interrupts disabled to avoid race
350 * conditions. 359 * conditions.
351 */ 360 */
352 361
353static int is_lun_busy( Scsi_Cmnd *cmd, int should_be_tagged ) 362static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
354{ 363{
355 SETUP_HOSTDATA(cmd->device->host); 364 SETUP_HOSTDATA(cmd->device->host);
356 365
357 if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) 366 if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))
358 return( 1 ); 367 return 1;
359 if (!should_be_tagged || 368 if (!should_be_tagged ||
360 !setup_use_tagged_queuing || !cmd->device->tagged_supported) 369 !setup_use_tagged_queuing || !cmd->device->tagged_supported)
361 return( 0 ); 370 return 0;
362 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= 371 if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
363 TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { 372 TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
364 TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", 373 TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n",
365 H_NO(cmd), cmd->device->id, cmd->device->lun ); 374 H_NO(cmd), cmd->device->id, cmd->device->lun);
366 return( 1 ); 375 return 1;
367 } 376 }
368 return( 0 ); 377 return 0;
369} 378}
370 379
371 380
@@ -374,31 +383,30 @@ static int is_lun_busy( Scsi_Cmnd *cmd, int should_be_tagged )
374 * untagged. 383 * untagged.
375 */ 384 */
376 385
377static void cmd_get_tag( Scsi_Cmnd *cmd, int should_be_tagged ) 386static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
378{ 387{
379 SETUP_HOSTDATA(cmd->device->host); 388 SETUP_HOSTDATA(cmd->device->host);
380 389
381 /* If we or the target don't support tagged queuing, allocate the LUN for 390 /* If we or the target don't support tagged queuing, allocate the LUN for
382 * an untagged command. 391 * an untagged command.
383 */ 392 */
384 if (!should_be_tagged || 393 if (!should_be_tagged ||
385 !setup_use_tagged_queuing || !cmd->device->tagged_supported) { 394 !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
386 cmd->tag = TAG_NONE; 395 cmd->tag = TAG_NONE;
387 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 396 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
388 TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " 397 TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged "
389 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); 398 "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
390 } 399 } else {
391 else { 400 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
392 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 401
393 402 cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
394 cmd->tag = find_first_zero_bit( ta->allocated, MAX_TAGS ); 403 set_bit(cmd->tag, ta->allocated);
395 set_bit( cmd->tag, ta->allocated ); 404 ta->nr_allocated++;
396 ta->nr_allocated++; 405 TAG_PRINTK("scsi%d: using tag %d for target %d lun %d "
397 TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " 406 "(now %d tags in use)\n",
398 "(now %d tags in use)\n", 407 H_NO(cmd), cmd->tag, cmd->device->id,
399 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, 408 cmd->device->lun, ta->nr_allocated);
400 ta->nr_allocated ); 409 }
401 }
402} 410}
403 411
404 412
@@ -406,44 +414,42 @@ static void cmd_get_tag( Scsi_Cmnd *cmd, int should_be_tagged )
406 * unlock the LUN. 414 * unlock the LUN.
407 */ 415 */
408 416
409static void cmd_free_tag( Scsi_Cmnd *cmd ) 417static void cmd_free_tag(Scsi_Cmnd *cmd)
410{ 418{
411 SETUP_HOSTDATA(cmd->device->host); 419 SETUP_HOSTDATA(cmd->device->host);
412 420
413 if (cmd->tag == TAG_NONE) { 421 if (cmd->tag == TAG_NONE) {
414 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 422 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
415 TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", 423 TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n",
416 H_NO(cmd), cmd->device->id, cmd->device->lun ); 424 H_NO(cmd), cmd->device->id, cmd->device->lun);
417 } 425 } else if (cmd->tag >= MAX_TAGS) {
418 else if (cmd->tag >= MAX_TAGS) { 426 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
419 printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", 427 H_NO(cmd), cmd->tag);
420 H_NO(cmd), cmd->tag ); 428 } else {
421 } 429 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
422 else { 430 clear_bit(cmd->tag, ta->allocated);
423 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 431 ta->nr_allocated--;
424 clear_bit( cmd->tag, ta->allocated ); 432 TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n",
425 ta->nr_allocated--; 433 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
426 TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", 434 }
427 H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
428 }
429} 435}
430 436
431 437
432static void free_all_tags( void ) 438static void free_all_tags(void)
433{ 439{
434 int target, lun; 440 int target, lun;
435 TAG_ALLOC *ta; 441 TAG_ALLOC *ta;
436 442
437 if (!setup_use_tagged_queuing) 443 if (!setup_use_tagged_queuing)
438 return; 444 return;
439 445
440 for( target = 0; target < 8; ++target ) { 446 for (target = 0; target < 8; ++target) {
441 for( lun = 0; lun < 8; ++lun ) { 447 for (lun = 0; lun < 8; ++lun) {
442 ta = &TagAlloc[target][lun]; 448 ta = &TagAlloc[target][lun];
443 bitmap_zero(ta->allocated, MAX_TAGS); 449 bitmap_zero(ta->allocated, MAX_TAGS);
444 ta->nr_allocated = 0; 450 ta->nr_allocated = 0;
451 }
445 } 452 }
446 }
447} 453}
448 454
449#endif /* SUPPORT_TAGS */ 455#endif /* SUPPORT_TAGS */
@@ -461,89 +467,94 @@ static void free_all_tags( void )
461 * assumed to be already transfered into ptr/this_residual. 467 * assumed to be already transfered into ptr/this_residual.
462 */ 468 */
463 469
464static void merge_contiguous_buffers( Scsi_Cmnd *cmd ) 470static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
465{ 471{
466 unsigned long endaddr; 472 unsigned long endaddr;
467#if (NDEBUG & NDEBUG_MERGING) 473#if (NDEBUG & NDEBUG_MERGING)
468 unsigned long oldlen = cmd->SCp.this_residual; 474 unsigned long oldlen = cmd->SCp.this_residual;
469 int cnt = 1; 475 int cnt = 1;
470#endif 476#endif
471 477
472 for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; 478 for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
473 cmd->SCp.buffers_residual && 479 cmd->SCp.buffers_residual &&
474 virt_to_phys(page_address(cmd->SCp.buffer[1].page)+ 480 virt_to_phys(page_address(cmd->SCp.buffer[1].page) +
475 cmd->SCp.buffer[1].offset) == endaddr; ) { 481 cmd->SCp.buffer[1].offset) == endaddr;) {
476 MER_PRINTK("VTOP(%p) == %08lx -> merging\n", 482 MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
477 cmd->SCp.buffer[1].address, endaddr); 483 page_address(cmd->SCp.buffer[1].page), endaddr);
478#if (NDEBUG & NDEBUG_MERGING) 484#if (NDEBUG & NDEBUG_MERGING)
479 ++cnt; 485 ++cnt;
480#endif 486#endif
481 ++cmd->SCp.buffer; 487 ++cmd->SCp.buffer;
482 --cmd->SCp.buffers_residual; 488 --cmd->SCp.buffers_residual;
483 cmd->SCp.this_residual += cmd->SCp.buffer->length; 489 cmd->SCp.this_residual += cmd->SCp.buffer->length;
484 endaddr += cmd->SCp.buffer->length; 490 endaddr += cmd->SCp.buffer->length;
485 } 491 }
486#if (NDEBUG & NDEBUG_MERGING) 492#if (NDEBUG & NDEBUG_MERGING)
487 if (oldlen != cmd->SCp.this_residual) 493 if (oldlen != cmd->SCp.this_residual)
488 MER_PRINTK("merged %d buffers from %p, new length %08x\n", 494 MER_PRINTK("merged %d buffers from %p, new length %08x\n",
489 cnt, cmd->SCp.ptr, cmd->SCp.this_residual); 495 cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
490#endif 496#endif
491} 497}
492 498
493/* 499/*
494 * Function : void initialize_SCp(Scsi_Cmnd *cmd) 500 * Function : void initialize_SCp(Scsi_Cmnd *cmd)
495 * 501 *
496 * Purpose : initialize the saved data pointers for cmd to point to the 502 * Purpose : initialize the saved data pointers for cmd to point to the
497 * start of the buffer. 503 * start of the buffer.
498 * 504 *
499 * Inputs : cmd - Scsi_Cmnd structure to have pointers reset. 505 * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
500 */ 506 */
501 507
502static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) 508static inline void initialize_SCp(Scsi_Cmnd *cmd)
503{ 509{
504 /* 510 /*
505 * Initialize the Scsi Pointer field so that all of the commands in the 511 * Initialize the Scsi Pointer field so that all of the commands in the
506 * various queues are valid. 512 * various queues are valid.
507 */
508
509 if (cmd->use_sg) {
510 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
511 cmd->SCp.buffers_residual = cmd->use_sg - 1;
512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
513 cmd->SCp.buffer->offset;
514 cmd->SCp.this_residual = cmd->SCp.buffer->length;
515 /* ++roman: Try to merge some scatter-buffers if they are at
516 * contiguous physical addresses.
517 */ 513 */
518 merge_contiguous_buffers( cmd ); 514
519 } else { 515 if (cmd->use_sg) {
520 cmd->SCp.buffer = NULL; 516 cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
521 cmd->SCp.buffers_residual = 0; 517 cmd->SCp.buffers_residual = cmd->use_sg - 1;
522 cmd->SCp.ptr = (char *) cmd->request_buffer; 518 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) +
523 cmd->SCp.this_residual = cmd->request_bufflen; 519 cmd->SCp.buffer->offset;
524 } 520 cmd->SCp.this_residual = cmd->SCp.buffer->length;
521 /* ++roman: Try to merge some scatter-buffers if they are at
522 * contiguous physical addresses.
523 */
524 merge_contiguous_buffers(cmd);
525 } else {
526 cmd->SCp.buffer = NULL;
527 cmd->SCp.buffers_residual = 0;
528 cmd->SCp.ptr = (char *)cmd->request_buffer;
529 cmd->SCp.this_residual = cmd->request_bufflen;
530 }
525} 531}
526 532
527#include <linux/delay.h> 533#include <linux/delay.h>
528 534
529#if NDEBUG 535#if NDEBUG
530static struct { 536static struct {
531 unsigned char mask; 537 unsigned char mask;
532 const char * name;} 538 const char *name;
533signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, 539} signals[] = {
534 { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, 540 { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
535 { SR_SEL, "SEL" }, {0, NULL}}, 541 { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
536basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, 542 { SR_SEL, "SEL" }, {0, NULL}
537icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, 543}, basrs[] = {
538 {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, 544 {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}
539 {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, 545}, icrs[] = {
540 {0, NULL}}, 546 {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
541mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, 547 {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
542 {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, 548 {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
543 "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, 549 {0, NULL}
544 {MR_MONITOR_BSY, "MODE MONITOR BSY"}, 550}, mrs[] = {
545 {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, 551 {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
546 {0, NULL}}; 552 {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
553 "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"},
554 {MR_MONITOR_BSY, "MODE MONITOR BSY"},
555 {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
556 {0, NULL}
557};
547 558
548/* 559/*
549 * Function : void NCR5380_print(struct Scsi_Host *instance) 560 * Function : void NCR5380_print(struct Scsi_Host *instance)
@@ -553,45 +564,47 @@ mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
553 * Input : instance - which NCR5380 564 * Input : instance - which NCR5380
554 */ 565 */
555 566
556static void NCR5380_print(struct Scsi_Host *instance) { 567static void NCR5380_print(struct Scsi_Host *instance)
557 unsigned char status, data, basr, mr, icr, i; 568{
558 unsigned long flags; 569 unsigned char status, data, basr, mr, icr, i;
559 570 unsigned long flags;
560 local_irq_save(flags); 571
561 data = NCR5380_read(CURRENT_SCSI_DATA_REG); 572 local_irq_save(flags);
562 status = NCR5380_read(STATUS_REG); 573 data = NCR5380_read(CURRENT_SCSI_DATA_REG);
563 mr = NCR5380_read(MODE_REG); 574 status = NCR5380_read(STATUS_REG);
564 icr = NCR5380_read(INITIATOR_COMMAND_REG); 575 mr = NCR5380_read(MODE_REG);
565 basr = NCR5380_read(BUS_AND_STATUS_REG); 576 icr = NCR5380_read(INITIATOR_COMMAND_REG);
566 local_irq_restore(flags); 577 basr = NCR5380_read(BUS_AND_STATUS_REG);
567 printk("STATUS_REG: %02x ", status); 578 local_irq_restore(flags);
568 for (i = 0; signals[i].mask ; ++i) 579 printk("STATUS_REG: %02x ", status);
569 if (status & signals[i].mask) 580 for (i = 0; signals[i].mask; ++i)
570 printk(",%s", signals[i].name); 581 if (status & signals[i].mask)
571 printk("\nBASR: %02x ", basr); 582 printk(",%s", signals[i].name);
572 for (i = 0; basrs[i].mask ; ++i) 583 printk("\nBASR: %02x ", basr);
573 if (basr & basrs[i].mask) 584 for (i = 0; basrs[i].mask; ++i)
574 printk(",%s", basrs[i].name); 585 if (basr & basrs[i].mask)
575 printk("\nICR: %02x ", icr); 586 printk(",%s", basrs[i].name);
576 for (i = 0; icrs[i].mask; ++i) 587 printk("\nICR: %02x ", icr);
577 if (icr & icrs[i].mask) 588 for (i = 0; icrs[i].mask; ++i)
578 printk(",%s", icrs[i].name); 589 if (icr & icrs[i].mask)
579 printk("\nMODE: %02x ", mr); 590 printk(",%s", icrs[i].name);
580 for (i = 0; mrs[i].mask; ++i) 591 printk("\nMODE: %02x ", mr);
581 if (mr & mrs[i].mask) 592 for (i = 0; mrs[i].mask; ++i)
582 printk(",%s", mrs[i].name); 593 if (mr & mrs[i].mask)
583 printk("\n"); 594 printk(",%s", mrs[i].name);
595 printk("\n");
584} 596}
585 597
586static struct { 598static struct {
587 unsigned char value; 599 unsigned char value;
588 const char *name; 600 const char *name;
589} phases[] = { 601} phases[] = {
590 {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, 602 {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
591 {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, 603 {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
592 {PHASE_UNKNOWN, "UNKNOWN"}}; 604 {PHASE_UNKNOWN, "UNKNOWN"}
605};
593 606
594/* 607/*
595 * Function : void NCR5380_print_phase(struct Scsi_Host *instance) 608 * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
596 * 609 *
597 * Purpose : print the current SCSI phase for debugging purposes 610 * Purpose : print the current SCSI phase for debugging purposes
@@ -601,30 +614,35 @@ static struct {
601 614
602static void NCR5380_print_phase(struct Scsi_Host *instance) 615static void NCR5380_print_phase(struct Scsi_Host *instance)
603{ 616{
604 unsigned char status; 617 unsigned char status;
605 int i; 618 int i;
606 619
607 status = NCR5380_read(STATUS_REG); 620 status = NCR5380_read(STATUS_REG);
608 if (!(status & SR_REQ)) 621 if (!(status & SR_REQ))
609 printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); 622 printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO);
610 else { 623 else {
611 for (i = 0; (phases[i].value != PHASE_UNKNOWN) && 624 for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
612 (phases[i].value != (status & PHASE_MASK)); ++i); 625 (phases[i].value != (status & PHASE_MASK)); ++i)
613 printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); 626 ;
614 } 627 printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name);
628 }
615} 629}
616 630
617#else /* !NDEBUG */ 631#else /* !NDEBUG */
618 632
619/* dummies... */ 633/* dummies... */
620__inline__ void NCR5380_print(struct Scsi_Host *instance) { }; 634static inline void NCR5380_print(struct Scsi_Host *instance)
621__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; 635{
636};
637static inline void NCR5380_print_phase(struct Scsi_Host *instance)
638{
639};
622 640
623#endif 641#endif
624 642
625/* 643/*
626 * ++roman: New scheme of calling NCR5380_main() 644 * ++roman: New scheme of calling NCR5380_main()
627 * 645 *
628 * If we're not in an interrupt, we can call our main directly, it cannot be 646 * If we're not in an interrupt, we can call our main directly, it cannot be
629 * already running. Else, we queue it on a task queue, if not 'main_running' 647 * already running. Else, we queue it on a task queue, if not 'main_running'
630 * tells us that a lower level is already executing it. This way, 648 * tells us that a lower level is already executing it. This way,
@@ -638,33 +656,33 @@ __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
638#include <linux/workqueue.h> 656#include <linux/workqueue.h>
639#include <linux/interrupt.h> 657#include <linux/interrupt.h>
640 658
641static volatile int main_running = 0; 659static volatile int main_running;
642static DECLARE_WORK(NCR5380_tqueue, (void (*)(void*))NCR5380_main, NULL); 660static DECLARE_WORK(NCR5380_tqueue, NCR5380_main);
643 661
644static __inline__ void queue_main(void) 662static inline void queue_main(void)
645{ 663{
646 if (!main_running) { 664 if (!main_running) {
647 /* If in interrupt and NCR5380_main() not already running, 665 /* If in interrupt and NCR5380_main() not already running,
648 queue it on the 'immediate' task queue, to be processed 666 queue it on the 'immediate' task queue, to be processed
649 immediately after the current interrupt processing has 667 immediately after the current interrupt processing has
650 finished. */ 668 finished. */
651 schedule_work(&NCR5380_tqueue); 669 schedule_work(&NCR5380_tqueue);
652 } 670 }
653 /* else: nothing to do: the running NCR5380_main() will pick up 671 /* else: nothing to do: the running NCR5380_main() will pick up
654 any newly queued command. */ 672 any newly queued command. */
655} 673}
656 674
657 675
658static inline void NCR5380_all_init (void) 676static inline void NCR5380_all_init(void)
659{ 677{
660 static int done = 0; 678 static int done = 0;
661 if (!done) { 679 if (!done) {
662 INI_PRINTK("scsi : NCR5380_all_init()\n"); 680 INI_PRINTK("scsi : NCR5380_all_init()\n");
663 done = 1; 681 done = 1;
664 } 682 }
665} 683}
666 684
667 685
668/* 686/*
669 * Function : void NCR58380_print_options (struct Scsi_Host *instance) 687 * Function : void NCR58380_print_options (struct Scsi_Host *instance)
670 * 688 *
@@ -674,23 +692,23 @@ static inline void NCR5380_all_init (void)
674 * Inputs : instance, pointer to this instance. Unused. 692 * Inputs : instance, pointer to this instance. Unused.
675 */ 693 */
676 694
677static void __init NCR5380_print_options (struct Scsi_Host *instance) 695static void __init NCR5380_print_options(struct Scsi_Host *instance)
678{ 696{
679 printk(" generic options" 697 printk(" generic options"
680#ifdef AUTOSENSE 698#ifdef AUTOSENSE
681 " AUTOSENSE" 699 " AUTOSENSE"
682#endif 700#endif
683#ifdef REAL_DMA 701#ifdef REAL_DMA
684 " REAL DMA" 702 " REAL DMA"
685#endif 703#endif
686#ifdef PARITY 704#ifdef PARITY
687 " PARITY" 705 " PARITY"
688#endif 706#endif
689#ifdef SUPPORT_TAGS 707#ifdef SUPPORT_TAGS
690 " SCSI-2 TAGGED QUEUING" 708 " SCSI-2 TAGGED QUEUING"
691#endif 709#endif
692 ); 710 );
693 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); 711 printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
694} 712}
695 713
696/* 714/*
@@ -699,27 +717,27 @@ static void __init NCR5380_print_options (struct Scsi_Host *instance)
699 * Purpose : print commands in the various queues, called from 717 * Purpose : print commands in the various queues, called from
700 * NCR5380_abort and NCR5380_debug to aid debugging. 718 * NCR5380_abort and NCR5380_debug to aid debugging.
701 * 719 *
702 * Inputs : instance, pointer to this instance. 720 * Inputs : instance, pointer to this instance.
703 */ 721 */
704 722
705static void NCR5380_print_status (struct Scsi_Host *instance) 723static void NCR5380_print_status(struct Scsi_Host *instance)
706{ 724{
707 char *pr_bfr; 725 char *pr_bfr;
708 char *start; 726 char *start;
709 int len; 727 int len;
710 728
711 NCR_PRINT(NDEBUG_ANY); 729 NCR_PRINT(NDEBUG_ANY);
712 NCR_PRINT_PHASE(NDEBUG_ANY); 730 NCR_PRINT_PHASE(NDEBUG_ANY);
713 731
714 pr_bfr = (char *) __get_free_page(GFP_ATOMIC); 732 pr_bfr = (char *)__get_free_page(GFP_ATOMIC);
715 if (!pr_bfr) { 733 if (!pr_bfr) {
716 printk("NCR5380_print_status: no memory for print buffer\n"); 734 printk("NCR5380_print_status: no memory for print buffer\n");
717 return; 735 return;
718 } 736 }
719 len = NCR5380_proc_info(pr_bfr, &start, 0, PAGE_SIZE, HOSTNO, 0); 737 len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0);
720 pr_bfr[len] = 0; 738 pr_bfr[len] = 0;
721 printk("\n%s\n", pr_bfr); 739 printk("\n%s\n", pr_bfr);
722 free_page((unsigned long) pr_bfr); 740 free_page((unsigned long)pr_bfr);
723} 741}
724 742
725 743
@@ -738,443 +756,478 @@ static void NCR5380_print_status (struct Scsi_Host *instance)
738*/ 756*/
739 757
740#undef SPRINTF 758#undef SPRINTF
741#define SPRINTF(fmt,args...) \ 759#define SPRINTF(fmt,args...) \
742 do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ 760 do { \
743 pos += sprintf(pos, fmt , ## args); } while(0) 761 if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \
744static 762 pos += sprintf(pos, fmt , ## args); \
745char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length); 763 } while(0)
746 764static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length);
747static 765
748int NCR5380_proc_info (struct Scsi_Host *instance, char *buffer, char **start, off_t offset, 766static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer,
749 int length, int inout) 767 char **start, off_t offset, int length, int inout)
750{ 768{
751 char *pos = buffer; 769 char *pos = buffer;
752 struct NCR5380_hostdata *hostdata; 770 struct NCR5380_hostdata *hostdata;
753 Scsi_Cmnd *ptr; 771 Scsi_Cmnd *ptr;
754 unsigned long flags; 772 unsigned long flags;
755 off_t begin = 0; 773 off_t begin = 0;
756#define check_offset() \ 774#define check_offset() \
757 do { \ 775 do { \
758 if (pos - buffer < offset - begin) { \ 776 if (pos - buffer < offset - begin) { \
759 begin += pos - buffer; \ 777 begin += pos - buffer; \
760 pos = buffer; \ 778 pos = buffer; \
761 } \ 779 } \
762 } while (0) 780 } while (0)
763 781
764 hostdata = (struct NCR5380_hostdata *)instance->hostdata; 782 hostdata = (struct NCR5380_hostdata *)instance->hostdata;
765 783
766 if (inout) { /* Has data been written to the file ? */ 784 if (inout) /* Has data been written to the file ? */
767 return(-ENOSYS); /* Currently this is a no-op */ 785 return -ENOSYS; /* Currently this is a no-op */
768 } 786 SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
769 SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE);
770 check_offset();
771 local_irq_save(flags);
772 SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't");
773 check_offset();
774 if (!hostdata->connected)
775 SPRINTF("scsi%d: no currently connected command\n", HOSTNO);
776 else
777 pos = lprint_Scsi_Cmnd ((Scsi_Cmnd *) hostdata->connected,
778 pos, buffer, length);
779 SPRINTF("scsi%d: issue_queue\n", HOSTNO);
780 check_offset();
781 for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) {
782 pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
783 check_offset(); 787 check_offset();
784 } 788 local_irq_save(flags);
789 SPRINTF("NCR5380: coroutine is%s running.\n",
790 main_running ? "" : "n't");
791 check_offset();
792 if (!hostdata->connected)
793 SPRINTF("scsi%d: no currently connected command\n", HOSTNO);
794 else
795 pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected,
796 pos, buffer, length);
797 SPRINTF("scsi%d: issue_queue\n", HOSTNO);
798 check_offset();
799 for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) {
800 pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
801 check_offset();
802 }
785 803
786 SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); 804 SPRINTF("scsi%d: disconnected_queue\n", HOSTNO);
787 check_offset();
788 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
789 ptr = NEXT(ptr)) {
790 pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
791 check_offset(); 805 check_offset();
792 } 806 for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
807 ptr = NEXT(ptr)) {
808 pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
809 check_offset();
810 }
793 811
794 local_irq_restore(flags); 812 local_irq_restore(flags);
795 *start = buffer + (offset - begin); 813 *start = buffer + (offset - begin);
796 if (pos - buffer < offset - begin) 814 if (pos - buffer < offset - begin)
797 return 0; 815 return 0;
798 else if (pos - buffer - (offset - begin) < length) 816 else if (pos - buffer - (offset - begin) < length)
799 return pos - buffer - (offset - begin); 817 return pos - buffer - (offset - begin);
800 return length; 818 return length;
801} 819}
802 820
803static char * 821static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length)
804lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length)
805{ 822{
806 int i, s; 823 int i, s;
807 unsigned char *command; 824 unsigned char *command;
808 SPRINTF("scsi%d: destination target %d, lun %d\n", 825 SPRINTF("scsi%d: destination target %d, lun %d\n",
809 H_NO(cmd), cmd->device->id, cmd->device->lun); 826 H_NO(cmd), cmd->device->id, cmd->device->lun);
810 SPRINTF(" command = "); 827 SPRINTF(" command = ");
811 command = cmd->cmnd; 828 command = cmd->cmnd;
812 SPRINTF("%2d (0x%02x)", command[0], command[0]); 829 SPRINTF("%2d (0x%02x)", command[0], command[0]);
813 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) 830 for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
814 SPRINTF(" %02x", command[i]); 831 SPRINTF(" %02x", command[i]);
815 SPRINTF("\n"); 832 SPRINTF("\n");
816 return pos; 833 return pos;
817} 834}
818 835
819 836
820/* 837/*
821 * Function : void NCR5380_init (struct Scsi_Host *instance) 838 * Function : void NCR5380_init (struct Scsi_Host *instance)
822 * 839 *
823 * Purpose : initializes *instance and corresponding 5380 chip. 840 * Purpose : initializes *instance and corresponding 5380 chip.
824 * 841 *
825 * Inputs : instance - instantiation of the 5380 driver. 842 * Inputs : instance - instantiation of the 5380 driver.
826 * 843 *
827 * Notes : I assume that the host, hostno, and id bits have been 844 * Notes : I assume that the host, hostno, and id bits have been
828 * set correctly. I don't care about the irq and other fields. 845 * set correctly. I don't care about the irq and other fields.
829 * 846 *
830 */ 847 */
831 848
832static int NCR5380_init (struct Scsi_Host *instance, int flags) 849static int NCR5380_init(struct Scsi_Host *instance, int flags)
833{ 850{
834 int i; 851 int i;
835 SETUP_HOSTDATA(instance); 852 SETUP_HOSTDATA(instance);
836 853
837 NCR5380_all_init(); 854 NCR5380_all_init();
838 855
839 hostdata->aborted = 0; 856 hostdata->aborted = 0;
840 hostdata->id_mask = 1 << instance->this_id; 857 hostdata->id_mask = 1 << instance->this_id;
841 hostdata->id_higher_mask = 0; 858 hostdata->id_higher_mask = 0;
842 for (i = hostdata->id_mask; i <= 0x80; i <<= 1) 859 for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
843 if (i > hostdata->id_mask) 860 if (i > hostdata->id_mask)
844 hostdata->id_higher_mask |= i; 861 hostdata->id_higher_mask |= i;
845 for (i = 0; i < 8; ++i) 862 for (i = 0; i < 8; ++i)
846 hostdata->busy[i] = 0; 863 hostdata->busy[i] = 0;
847#ifdef SUPPORT_TAGS 864#ifdef SUPPORT_TAGS
848 init_tags(); 865 init_tags();
849#endif 866#endif
850#if defined (REAL_DMA) 867#if defined (REAL_DMA)
851 hostdata->dma_len = 0; 868 hostdata->dma_len = 0;
852#endif 869#endif
853 hostdata->targets_present = 0; 870 hostdata->targets_present = 0;
854 hostdata->connected = NULL; 871 hostdata->connected = NULL;
855 hostdata->issue_queue = NULL; 872 hostdata->issue_queue = NULL;
856 hostdata->disconnected_queue = NULL; 873 hostdata->disconnected_queue = NULL;
857 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; 874 hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT;
858 875
859 if (!the_template) { 876 if (!the_template) {
860 the_template = instance->hostt; 877 the_template = instance->hostt;
861 first_instance = instance; 878 first_instance = instance;
862 } 879 }
863
864 880
865#ifndef AUTOSENSE 881#ifndef AUTOSENSE
866 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) 882 if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1))
867 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" 883 printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n"
868 " without AUTOSENSE option, contingent allegiance conditions may\n" 884 " without AUTOSENSE option, contingent allegiance conditions may\n"
869 " be incorrectly cleared.\n", HOSTNO); 885 " be incorrectly cleared.\n", HOSTNO);
870#endif /* def AUTOSENSE */ 886#endif /* def AUTOSENSE */
871 887
872 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 888 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
873 NCR5380_write(MODE_REG, MR_BASE); 889 NCR5380_write(MODE_REG, MR_BASE);
874 NCR5380_write(TARGET_COMMAND_REG, 0); 890 NCR5380_write(TARGET_COMMAND_REG, 0);
875 NCR5380_write(SELECT_ENABLE_REG, 0); 891 NCR5380_write(SELECT_ENABLE_REG, 0);
876 892
877 return 0; 893 return 0;
878} 894}
879 895
880/* 896/*
881 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, 897 * our own old-style timeout update
882 * void (*done)(Scsi_Cmnd *)) 898 */
899/*
900 * The strategy is to cause the timer code to call scsi_times_out()
901 * when the soonest timeout is pending.
902 * The arguments are used when we are queueing a new command, because
903 * we do not want to subtract the time used from this time, but when we
904 * set the timer, we want to take this value into account.
905 */
906
907int atari_scsi_update_timeout(Scsi_Cmnd * SCset, int timeout)
908{
909 int rtn;
910
911 /*
912 * We are using the new error handling code to actually register/deregister
913 * timers for timeout.
914 */
915
916 if (!timer_pending(&SCset->eh_timeout))
917 rtn = 0;
918 else
919 rtn = SCset->eh_timeout.expires - jiffies;
920
921 if (timeout == 0) {
922 del_timer(&SCset->eh_timeout);
923 SCset->eh_timeout.data = (unsigned long)NULL;
924 SCset->eh_timeout.expires = 0;
925 } else {
926 if (SCset->eh_timeout.data != (unsigned long)NULL)
927 del_timer(&SCset->eh_timeout);
928 SCset->eh_timeout.data = (unsigned long)SCset;
929 SCset->eh_timeout.expires = jiffies + timeout;
930 add_timer(&SCset->eh_timeout);
931 }
932 return rtn;
933}
934
935/*
936 * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
937 * void (*done)(Scsi_Cmnd *))
883 * 938 *
884 * Purpose : enqueues a SCSI command 939 * Purpose : enqueues a SCSI command
885 * 940 *
886 * Inputs : cmd - SCSI command, done - function called on completion, with 941 * Inputs : cmd - SCSI command, done - function called on completion, with
887 * a pointer to the command descriptor. 942 * a pointer to the command descriptor.
888 * 943 *
889 * Returns : 0 944 * Returns : 0
890 * 945 *
891 * Side effects : 946 * Side effects :
892 * cmd is added to the per instance issue_queue, with minor 947 * cmd is added to the per instance issue_queue, with minor
893 * twiddling done to the host specific fields of cmd. If the 948 * twiddling done to the host specific fields of cmd. If the
894 * main coroutine is not running, it is restarted. 949 * main coroutine is not running, it is restarted.
895 * 950 *
896 */ 951 */
897 952
898static 953static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
899int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
900{ 954{
901 SETUP_HOSTDATA(cmd->device->host); 955 SETUP_HOSTDATA(cmd->device->host);
902 Scsi_Cmnd *tmp; 956 Scsi_Cmnd *tmp;
903 int oldto; 957 int oldto;
904 unsigned long flags; 958 unsigned long flags;
905 extern int update_timeout(Scsi_Cmnd * SCset, int timeout); 959 // extern int update_timeout(Scsi_Cmnd * SCset, int timeout);
906 960
907#if (NDEBUG & NDEBUG_NO_WRITE) 961#if (NDEBUG & NDEBUG_NO_WRITE)
908 switch (cmd->cmnd[0]) { 962 switch (cmd->cmnd[0]) {
909 case WRITE_6: 963 case WRITE_6:
910 case WRITE_10: 964 case WRITE_10:
911 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", 965 printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n",
912 H_NO(cmd)); 966 H_NO(cmd));
913 cmd->result = (DID_ERROR << 16); 967 cmd->result = (DID_ERROR << 16);
914 done(cmd); 968 done(cmd);
915 return 0; 969 return 0;
916 } 970 }
917#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ 971#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
918 972
919
920#ifdef NCR5380_STATS 973#ifdef NCR5380_STATS
921# if 0 974# if 0
922 if (!hostdata->connected && !hostdata->issue_queue && 975 if (!hostdata->connected && !hostdata->issue_queue &&
923 !hostdata->disconnected_queue) { 976 !hostdata->disconnected_queue) {
924 hostdata->timebase = jiffies; 977 hostdata->timebase = jiffies;
925 } 978 }
926# endif 979# endif
927# ifdef NCR5380_STAT_LIMIT 980# ifdef NCR5380_STAT_LIMIT
928 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 981 if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
929# endif 982# endif
930 switch (cmd->cmnd[0]) 983 switch (cmd->cmnd[0]) {
931 { 984 case WRITE:
932 case WRITE: 985 case WRITE_6:
933 case WRITE_6: 986 case WRITE_10:
934 case WRITE_10: 987 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
935 hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); 988 hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
936 hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; 989 hostdata->pendingw++;
937 hostdata->pendingw++; 990 break;
938 break; 991 case READ:
939 case READ: 992 case READ_6:
940 case READ_6: 993 case READ_10:
941 case READ_10: 994 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
942 hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); 995 hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
943 hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; 996 hostdata->pendingr++;
944 hostdata->pendingr++; 997 break;
945 break; 998 }
946 }
947#endif 999#endif
948 1000
949 /* 1001 /*
950 * We use the host_scribble field as a pointer to the next command 1002 * We use the host_scribble field as a pointer to the next command
951 * in a queue 1003 * in a queue
952 */ 1004 */
953 1005
954 NEXT(cmd) = NULL; 1006 SET_NEXT(cmd, NULL);
955 cmd->scsi_done = done; 1007 cmd->scsi_done = done;
956 1008
957 cmd->result = 0; 1009 cmd->result = 0;
958 1010
959 1011 /*
960 /* 1012 * Insert the cmd into the issue queue. Note that REQUEST SENSE
961 * Insert the cmd into the issue queue. Note that REQUEST SENSE 1013 * commands are added to the head of the queue since any command will
962 * commands are added to the head of the queue since any command will 1014 * clear the contingent allegiance condition that exists and the
963 * clear the contingent allegiance condition that exists and the 1015 * sense data is only guaranteed to be valid while the condition exists.
964 * sense data is only guaranteed to be valid while the condition exists. 1016 */
965 */ 1017
966 1018 local_irq_save(flags);
967 local_irq_save(flags); 1019 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA.
968 /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. 1020 * Otherwise a running NCR5380_main may steal the lock.
969 * Otherwise a running NCR5380_main may steal the lock. 1021 * Lock before actually inserting due to fairness reasons explained in
970 * Lock before actually inserting due to fairness reasons explained in 1022 * atari_scsi.c. If we insert first, then it's impossible for this driver
971 * atari_scsi.c. If we insert first, then it's impossible for this driver 1023 * to release the lock.
972 * to release the lock. 1024 * Stop timer for this command while waiting for the lock, or timeouts
973 * Stop timer for this command while waiting for the lock, or timeouts 1025 * may happen (and they really do), and it's no good if the command doesn't
974 * may happen (and they really do), and it's no good if the command doesn't 1026 * appear in any of the queues.
975 * appear in any of the queues. 1027 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
976 * ++roman: Just disabling the NCR interrupt isn't sufficient here, 1028 * because also a timer int can trigger an abort or reset, which would
977 * because also a timer int can trigger an abort or reset, which would 1029 * alter queues and touch the lock.
978 * alter queues and touch the lock. 1030 */
979 */ 1031 if (!IS_A_TT()) {
980 if (!IS_A_TT()) { 1032 oldto = atari_scsi_update_timeout(cmd, 0);
981 oldto = update_timeout(cmd, 0); 1033 falcon_get_lock();
982 falcon_get_lock(); 1034 atari_scsi_update_timeout(cmd, oldto);
983 update_timeout(cmd, oldto); 1035 }
984 } 1036 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
985 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { 1037 LIST(cmd, hostdata->issue_queue);
986 LIST(cmd, hostdata->issue_queue); 1038 SET_NEXT(cmd, hostdata->issue_queue);
987 NEXT(cmd) = hostdata->issue_queue; 1039 hostdata->issue_queue = cmd;
988 hostdata->issue_queue = cmd; 1040 } else {
989 } else { 1041 for (tmp = (Scsi_Cmnd *)hostdata->issue_queue;
990 for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; 1042 NEXT(tmp); tmp = NEXT(tmp))
991 NEXT(tmp); tmp = NEXT(tmp)) 1043 ;
992 ; 1044 LIST(cmd, tmp);
993 LIST(cmd, tmp); 1045 SET_NEXT(tmp, cmd);
994 NEXT(tmp) = cmd; 1046 }
995 } 1047 local_irq_restore(flags);
996 local_irq_restore(flags); 1048
997 1049 QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
998 QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), 1050 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
999 (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); 1051
1000 1052 /* If queue_command() is called from an interrupt (real one or bottom
1001 /* If queue_command() is called from an interrupt (real one or bottom 1053 * half), we let queue_main() do the job of taking care about main. If it
1002 * half), we let queue_main() do the job of taking care about main. If it 1054 * is already running, this is a no-op, else main will be queued.
1003 * is already running, this is a no-op, else main will be queued. 1055 *
1004 * 1056 * If we're not in an interrupt, we can call NCR5380_main()
1005 * If we're not in an interrupt, we can call NCR5380_main() 1057 * unconditionally, because it cannot be already running.
1006 * unconditionally, because it cannot be already running. 1058 */
1007 */ 1059 if (in_interrupt() || ((flags >> 8) & 7) >= 6)
1008 if (in_interrupt() || ((flags >> 8) & 7) >= 6) 1060 queue_main();
1009 queue_main(); 1061 else
1010 else 1062 NCR5380_main(NULL);
1011 NCR5380_main(NULL); 1063 return 0;
1012 return 0;
1013} 1064}
1014 1065
1015/* 1066/*
1016 * Function : NCR5380_main (void) 1067 * Function : NCR5380_main (void)
1017 * 1068 *
1018 * Purpose : NCR5380_main is a coroutine that runs as long as more work can 1069 * Purpose : NCR5380_main is a coroutine that runs as long as more work can
1019 * be done on the NCR5380 host adapters in a system. Both 1070 * be done on the NCR5380 host adapters in a system. Both
1020 * NCR5380_queue_command() and NCR5380_intr() will try to start it 1071 * NCR5380_queue_command() and NCR5380_intr() will try to start it
1021 * in case it is not running. 1072 * in case it is not running.
1022 * 1073 *
1023 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should 1074 * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
1024 * reenable them. This prevents reentrancy and kernel stack overflow. 1075 * reenable them. This prevents reentrancy and kernel stack overflow.
1025 */ 1076 */
1026 1077
1027static void NCR5380_main (void *bl) 1078static void NCR5380_main(struct work_struct *work)
1028{ 1079{
1029 Scsi_Cmnd *tmp, *prev; 1080 Scsi_Cmnd *tmp, *prev;
1030 struct Scsi_Host *instance = first_instance; 1081 struct Scsi_Host *instance = first_instance;
1031 struct NCR5380_hostdata *hostdata = HOSTDATA(instance); 1082 struct NCR5380_hostdata *hostdata = HOSTDATA(instance);
1032 int done; 1083 int done;
1033 unsigned long flags; 1084 unsigned long flags;
1034 1085
1035 /* 1086 /*
1036 * We run (with interrupts disabled) until we're sure that none of 1087 * We run (with interrupts disabled) until we're sure that none of
1037 * the host adapters have anything that can be done, at which point 1088 * the host adapters have anything that can be done, at which point
1038 * we set main_running to 0 and exit. 1089 * we set main_running to 0 and exit.
1039 * 1090 *
1040 * Interrupts are enabled before doing various other internal 1091 * Interrupts are enabled before doing various other internal
1041 * instructions, after we've decided that we need to run through 1092 * instructions, after we've decided that we need to run through
1042 * the loop again. 1093 * the loop again.
1043 * 1094 *
1044 * this should prevent any race conditions. 1095 * this should prevent any race conditions.
1045 * 1096 *
1046 * ++roman: Just disabling the NCR interrupt isn't sufficient here, 1097 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
1047 * because also a timer int can trigger an abort or reset, which can 1098 * because also a timer int can trigger an abort or reset, which can
1048 * alter queues and touch the Falcon lock. 1099 * alter queues and touch the Falcon lock.
1049 */ 1100 */
1050 1101
1051 /* Tell int handlers main() is now already executing. Note that 1102 /* Tell int handlers main() is now already executing. Note that
1052 no races are possible here. If an int comes in before 1103 no races are possible here. If an int comes in before
1053 'main_running' is set here, and queues/executes main via the 1104 'main_running' is set here, and queues/executes main via the
1054 task queue, it doesn't do any harm, just this instance of main 1105 task queue, it doesn't do any harm, just this instance of main
1055 won't find any work left to do. */ 1106 won't find any work left to do. */
1056 if (main_running) 1107 if (main_running)
1057 return; 1108 return;
1058 main_running = 1; 1109 main_running = 1;
1059 1110
1060 local_save_flags(flags); 1111 local_save_flags(flags);
1061 do { 1112 do {
1062 local_irq_disable(); /* Freeze request queues */ 1113 local_irq_disable(); /* Freeze request queues */
1063 done = 1; 1114 done = 1;
1064 1115
1065 if (!hostdata->connected) { 1116 if (!hostdata->connected) {
1066 MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); 1117 MAIN_PRINTK("scsi%d: not connected\n", HOSTNO);
1067 /* 1118 /*
1068 * Search through the issue_queue for a command destined 1119 * Search through the issue_queue for a command destined
1069 * for a target that's not busy. 1120 * for a target that's not busy.
1070 */ 1121 */
1071#if (NDEBUG & NDEBUG_LISTS) 1122#if (NDEBUG & NDEBUG_LISTS)
1072 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; 1123 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL;
1073 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) 1124 tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp))
1074 ; 1125 ;
1075 /*printk("%p ", tmp);*/ 1126 /*printk("%p ", tmp);*/
1076 if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ 1127 if ((tmp == prev) && tmp)
1128 printk(" LOOP\n");
1129 /* else printk("\n"); */
1077#endif 1130#endif
1078 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, 1131 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
1079 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { 1132 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) {
1080 1133
1081#if (NDEBUG & NDEBUG_LISTS) 1134#if (NDEBUG & NDEBUG_LISTS)
1082 if (prev != tmp) 1135 if (prev != tmp)
1083 printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", 1136 printk("MAIN tmp=%p target=%d busy=%d lun=%d\n",
1084 tmp, tmp->device->id, hostdata->busy[tmp->device->id], 1137 tmp, tmp->device->id, hostdata->busy[tmp->device->id],
1085 tmp->device->lun); 1138 tmp->device->lun);
1086#endif 1139#endif
1087 /* When we find one, remove it from the issue queue. */ 1140 /* When we find one, remove it from the issue queue. */
1088 /* ++guenther: possible race with Falcon locking */ 1141 /* ++guenther: possible race with Falcon locking */
1089 if ( 1142 if (
1090#ifdef SUPPORT_TAGS 1143#ifdef SUPPORT_TAGS
1091 !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) 1144 !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE)
1092#else 1145#else
1093 !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) 1146 !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))
1094#endif 1147#endif
1095 ) { 1148 ) {
1096 /* ++guenther: just to be sure, this must be atomic */ 1149 /* ++guenther: just to be sure, this must be atomic */
1097 local_irq_disable(); 1150 local_irq_disable();
1098 if (prev) { 1151 if (prev) {
1099 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 1152 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
1100 NEXT(prev) = NEXT(tmp); 1153 SET_NEXT(prev, NEXT(tmp));
1101 } else { 1154 } else {
1102 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); 1155 REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
1103 hostdata->issue_queue = NEXT(tmp); 1156 hostdata->issue_queue = NEXT(tmp);
1104 } 1157 }
1105 NEXT(tmp) = NULL; 1158 SET_NEXT(tmp, NULL);
1106 falcon_dont_release++; 1159 falcon_dont_release++;
1107 1160
1108 /* reenable interrupts after finding one */ 1161 /* reenable interrupts after finding one */
1109 local_irq_restore(flags); 1162 local_irq_restore(flags);
1110 1163
1111 /* 1164 /*
1112 * Attempt to establish an I_T_L nexus here. 1165 * Attempt to establish an I_T_L nexus here.
1113 * On success, instance->hostdata->connected is set. 1166 * On success, instance->hostdata->connected is set.
1114 * On failure, we must add the command back to the 1167 * On failure, we must add the command back to the
1115 * issue queue so we can keep trying. 1168 * issue queue so we can keep trying.
1116 */ 1169 */
1117 MAIN_PRINTK("scsi%d: main(): command for target %d " 1170 MAIN_PRINTK("scsi%d: main(): command for target %d "
1118 "lun %d removed from issue_queue\n", 1171 "lun %d removed from issue_queue\n",
1119 HOSTNO, tmp->device->id, tmp->device->lun); 1172 HOSTNO, tmp->device->id, tmp->device->lun);
1120 /* 1173 /*
1121 * REQUEST SENSE commands are issued without tagged 1174 * REQUEST SENSE commands are issued without tagged
1122 * queueing, even on SCSI-II devices because the 1175 * queueing, even on SCSI-II devices because the
1123 * contingent allegiance condition exists for the 1176 * contingent allegiance condition exists for the
1124 * entire unit. 1177 * entire unit.
1125 */ 1178 */
1126 /* ++roman: ...and the standard also requires that 1179 /* ++roman: ...and the standard also requires that
1127 * REQUEST SENSE command are untagged. 1180 * REQUEST SENSE command are untagged.
1128 */ 1181 */
1129 1182
1130#ifdef SUPPORT_TAGS 1183#ifdef SUPPORT_TAGS
1131 cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); 1184 cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE);
1132#endif 1185#endif
1133 if (!NCR5380_select(instance, tmp, 1186 if (!NCR5380_select(instance, tmp,
1134 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : 1187 (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
1135 TAG_NEXT)) { 1188 TAG_NEXT)) {
1136 falcon_dont_release--; 1189 falcon_dont_release--;
1137 /* release if target did not response! */ 1190 /* release if target did not response! */
1138 falcon_release_lock_if_possible( hostdata ); 1191 falcon_release_lock_if_possible(hostdata);
1139 break; 1192 break;
1140 } else { 1193 } else {
1141 local_irq_disable(); 1194 local_irq_disable();
1142 LIST(tmp, hostdata->issue_queue); 1195 LIST(tmp, hostdata->issue_queue);
1143 NEXT(tmp) = hostdata->issue_queue; 1196 SET_NEXT(tmp, hostdata->issue_queue);
1144 hostdata->issue_queue = tmp; 1197 hostdata->issue_queue = tmp;
1145#ifdef SUPPORT_TAGS 1198#ifdef SUPPORT_TAGS
1146 cmd_free_tag( tmp ); 1199 cmd_free_tag(tmp);
1147#endif 1200#endif
1148 falcon_dont_release--; 1201 falcon_dont_release--;
1149 local_irq_restore(flags); 1202 local_irq_restore(flags);
1150 MAIN_PRINTK("scsi%d: main(): select() failed, " 1203 MAIN_PRINTK("scsi%d: main(): select() failed, "
1151 "returned to issue_queue\n", HOSTNO); 1204 "returned to issue_queue\n", HOSTNO);
1152 if (hostdata->connected) 1205 if (hostdata->connected)
1153 break; 1206 break;
1154 } 1207 }
1155 } /* if target/lun/target queue is not busy */ 1208 } /* if target/lun/target queue is not busy */
1156 } /* for issue_queue */ 1209 } /* for issue_queue */
1157 } /* if (!hostdata->connected) */ 1210 } /* if (!hostdata->connected) */
1158 1211
1159 if (hostdata->connected 1212 if (hostdata->connected
1160#ifdef REAL_DMA 1213#ifdef REAL_DMA
1161 && !hostdata->dma_len 1214 && !hostdata->dma_len
1162#endif 1215#endif
1163 ) { 1216 ) {
1164 local_irq_restore(flags); 1217 local_irq_restore(flags);
1165 MAIN_PRINTK("scsi%d: main: performing information transfer\n", 1218 MAIN_PRINTK("scsi%d: main: performing information transfer\n",
1166 HOSTNO); 1219 HOSTNO);
1167 NCR5380_information_transfer(instance); 1220 NCR5380_information_transfer(instance);
1168 MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); 1221 MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
1169 done = 0; 1222 done = 0;
1170 } 1223 }
1171 } while (!done); 1224 } while (!done);
1172 1225
1173 /* Better allow ints _after_ 'main_running' has been cleared, else 1226 /* Better allow ints _after_ 'main_running' has been cleared, else
1174 an interrupt could believe we'll pick up the work it left for 1227 an interrupt could believe we'll pick up the work it left for
1175 us, but we won't see it anymore here... */ 1228 us, but we won't see it anymore here... */
1176 main_running = 0; 1229 main_running = 0;
1177 local_irq_restore(flags); 1230 local_irq_restore(flags);
1178} 1231}
1179 1232
1180 1233
@@ -1183,1441 +1236,1439 @@ static void NCR5380_main (void *bl)
1183 * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) 1236 * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
1184 * 1237 *
1185 * Purpose : Called by interrupt handler when DMA finishes or a phase 1238 * Purpose : Called by interrupt handler when DMA finishes or a phase
1186 * mismatch occurs (which would finish the DMA transfer). 1239 * mismatch occurs (which would finish the DMA transfer).
1187 * 1240 *
1188 * Inputs : instance - this instance of the NCR5380. 1241 * Inputs : instance - this instance of the NCR5380.
1189 * 1242 *
1190 */ 1243 */
1191 1244
1192static void NCR5380_dma_complete( struct Scsi_Host *instance ) 1245static void NCR5380_dma_complete(struct Scsi_Host *instance)
1193{ 1246{
1194 SETUP_HOSTDATA(instance); 1247 SETUP_HOSTDATA(instance);
1195 int transfered, saved_data = 0, overrun = 0, cnt, toPIO; 1248 int transfered, saved_data = 0, overrun = 0, cnt, toPIO;
1196 unsigned char **data, p; 1249 unsigned char **data, p;
1197 volatile int *count; 1250 volatile int *count;
1198 1251
1199 if (!hostdata->connected) { 1252 if (!hostdata->connected) {
1200 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " 1253 printk(KERN_WARNING "scsi%d: received end of DMA interrupt with "
1201 "no connected cmd\n", HOSTNO); 1254 "no connected cmd\n", HOSTNO);
1202 return; 1255 return;
1203 }
1204
1205 if (atari_read_overruns) {
1206 p = hostdata->connected->SCp.phase;
1207 if (p & SR_IO) {
1208 udelay(10);
1209 if ((((NCR5380_read(BUS_AND_STATUS_REG)) &
1210 (BASR_PHASE_MATCH|BASR_ACK)) ==
1211 (BASR_PHASE_MATCH|BASR_ACK))) {
1212 saved_data = NCR5380_read(INPUT_DATA_REG);
1213 overrun = 1;
1214 DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO);
1215 }
1216 } 1256 }
1217 } 1257
1218 1258 if (atari_read_overruns) {
1219 DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", 1259 p = hostdata->connected->SCp.phase;
1220 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), 1260 if (p & SR_IO) {
1221 NCR5380_read(STATUS_REG)); 1261 udelay(10);
1222 1262 if ((NCR5380_read(BUS_AND_STATUS_REG) &
1223 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1263 (BASR_PHASE_MATCH|BASR_ACK)) ==
1224 NCR5380_write(MODE_REG, MR_BASE); 1264 (BASR_PHASE_MATCH|BASR_ACK)) {
1225 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1265 saved_data = NCR5380_read(INPUT_DATA_REG);
1226 1266 overrun = 1;
1227 transfered = hostdata->dma_len - NCR5380_dma_residual(instance); 1267 DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO);
1228 hostdata->dma_len = 0; 1268 }
1229 1269 }
1230 data = (unsigned char **) &(hostdata->connected->SCp.ptr); 1270 }
1231 count = &(hostdata->connected->SCp.this_residual); 1271
1232 *data += transfered; 1272 DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
1233 *count -= transfered; 1273 HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
1234 1274 NCR5380_read(STATUS_REG));
1235 if (atari_read_overruns) { 1275
1236 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { 1276 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1237 cnt = toPIO = atari_read_overruns; 1277 NCR5380_write(MODE_REG, MR_BASE);
1238 if (overrun) { 1278 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1239 DMA_PRINTK("Got an input overrun, using saved byte\n"); 1279
1240 *(*data)++ = saved_data; 1280 transfered = hostdata->dma_len - NCR5380_dma_residual(instance);
1241 (*count)--; 1281 hostdata->dma_len = 0;
1242 cnt--; 1282
1243 toPIO--; 1283 data = (unsigned char **)&hostdata->connected->SCp.ptr;
1244 } 1284 count = &hostdata->connected->SCp.this_residual;
1245 DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); 1285 *data += transfered;
1246 NCR5380_transfer_pio(instance, &p, &cnt, data); 1286 *count -= transfered;
1247 *count -= toPIO - cnt; 1287
1288 if (atari_read_overruns) {
1289 if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
1290 cnt = toPIO = atari_read_overruns;
1291 if (overrun) {
1292 DMA_PRINTK("Got an input overrun, using saved byte\n");
1293 *(*data)++ = saved_data;
1294 (*count)--;
1295 cnt--;
1296 toPIO--;
1297 }
1298 DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
1299 NCR5380_transfer_pio(instance, &p, &cnt, data);
1300 *count -= toPIO - cnt;
1301 }
1248 } 1302 }
1249 }
1250} 1303}
1251#endif /* REAL_DMA */ 1304#endif /* REAL_DMA */
1252 1305
1253 1306
1254/* 1307/*
1255 * Function : void NCR5380_intr (int irq) 1308 * Function : void NCR5380_intr (int irq)
1256 * 1309 *
1257 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses 1310 * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
1258 * from the disconnected queue, and restarting NCR5380_main() 1311 * from the disconnected queue, and restarting NCR5380_main()
1259 * as required. 1312 * as required.
1260 * 1313 *
1261 * Inputs : int irq, irq that caused this interrupt. 1314 * Inputs : int irq, irq that caused this interrupt.
1262 * 1315 *
1263 */ 1316 */
1264 1317
1265static irqreturn_t NCR5380_intr (int irq, void *dev_id) 1318static irqreturn_t NCR5380_intr(int irq, void *dev_id)
1266{ 1319{
1267 struct Scsi_Host *instance = first_instance; 1320 struct Scsi_Host *instance = first_instance;
1268 int done = 1, handled = 0; 1321 int done = 1, handled = 0;
1269 unsigned char basr; 1322 unsigned char basr;
1270 1323
1271 INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); 1324 INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
1272 1325
1273 /* Look for pending interrupts */ 1326 /* Look for pending interrupts */
1274 basr = NCR5380_read(BUS_AND_STATUS_REG); 1327 basr = NCR5380_read(BUS_AND_STATUS_REG);
1275 INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); 1328 INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
1276 /* dispatch to appropriate routine if found and done=0 */ 1329 /* dispatch to appropriate routine if found and done=0 */
1277 if (basr & BASR_IRQ) { 1330 if (basr & BASR_IRQ) {
1278 NCR_PRINT(NDEBUG_INTR); 1331 NCR_PRINT(NDEBUG_INTR);
1279 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { 1332 if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
1280 done = 0; 1333 done = 0;
1281 ENABLE_IRQ(); 1334 ENABLE_IRQ();
1282 INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); 1335 INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
1283 NCR5380_reselect(instance); 1336 NCR5380_reselect(instance);
1284 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1337 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1285 } 1338 } else if (basr & BASR_PARITY_ERROR) {
1286 else if (basr & BASR_PARITY_ERROR) { 1339 INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
1287 INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); 1340 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1288 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1341 } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
1289 } 1342 INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
1290 else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { 1343 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1291 INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); 1344 } else {
1292 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1345 /*
1293 } 1346 * The rest of the interrupt conditions can occur only during a
1294 else { 1347 * DMA transfer
1295 /* 1348 */
1296 * The rest of the interrupt conditions can occur only during a
1297 * DMA transfer
1298 */
1299 1349
1300#if defined(REAL_DMA) 1350#if defined(REAL_DMA)
1301 /* 1351 /*
1302 * We should only get PHASE MISMATCH and EOP interrupts if we have 1352 * We should only get PHASE MISMATCH and EOP interrupts if we have
1303 * DMA enabled, so do a sanity check based on the current setting 1353 * DMA enabled, so do a sanity check based on the current setting
1304 * of the MODE register. 1354 * of the MODE register.
1305 */ 1355 */
1306 1356
1307 if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && 1357 if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) &&
1308 ((basr & BASR_END_DMA_TRANSFER) || 1358 ((basr & BASR_END_DMA_TRANSFER) ||
1309 !(basr & BASR_PHASE_MATCH))) { 1359 !(basr & BASR_PHASE_MATCH))) {
1310 1360
1311 INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); 1361 INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
1312 NCR5380_dma_complete( instance ); 1362 NCR5380_dma_complete( instance );
1313 done = 0; 1363 done = 0;
1314 ENABLE_IRQ(); 1364 ENABLE_IRQ();
1315 } else 1365 } else
1316#endif /* REAL_DMA */ 1366#endif /* REAL_DMA */
1317 { 1367 {
1318/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ 1368/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
1319 if (basr & BASR_PHASE_MATCH) 1369 if (basr & BASR_PHASE_MATCH)
1320 printk(KERN_NOTICE "scsi%d: unknown interrupt, " 1370 printk(KERN_NOTICE "scsi%d: unknown interrupt, "
1321 "BASR 0x%x, MR 0x%x, SR 0x%x\n", 1371 "BASR 0x%x, MR 0x%x, SR 0x%x\n",
1322 HOSTNO, basr, NCR5380_read(MODE_REG), 1372 HOSTNO, basr, NCR5380_read(MODE_REG),
1323 NCR5380_read(STATUS_REG)); 1373 NCR5380_read(STATUS_REG));
1324 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1374 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1325 } 1375 }
1326 } /* if !(SELECTION || PARITY) */ 1376 } /* if !(SELECTION || PARITY) */
1327 handled = 1; 1377 handled = 1;
1328 } /* BASR & IRQ */ 1378 } /* BASR & IRQ */ else {
1329 else { 1379 printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, "
1330 printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " 1380 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr,
1331 "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, 1381 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
1332 NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); 1382 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
1333 (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); 1383 }
1334 } 1384
1335 1385 if (!done) {
1336 if (!done) { 1386 INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
1337 INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); 1387 /* Put a call to NCR5380_main() on the queue... */
1338 /* Put a call to NCR5380_main() on the queue... */ 1388 queue_main();
1339 queue_main(); 1389 }
1340 } 1390 return IRQ_RETVAL(handled);
1341 return IRQ_RETVAL(handled);
1342} 1391}
1343 1392
1344#ifdef NCR5380_STATS 1393#ifdef NCR5380_STATS
1345static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd* cmd) 1394static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
1346{ 1395{
1347# ifdef NCR5380_STAT_LIMIT 1396# ifdef NCR5380_STAT_LIMIT
1348 if (cmd->request_bufflen > NCR5380_STAT_LIMIT) 1397 if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
1349# endif 1398# endif
1350 switch (cmd->cmnd[0]) 1399 switch (cmd->cmnd[0]) {
1351 { 1400 case WRITE:
1352 case WRITE: 1401 case WRITE_6:
1353 case WRITE_6: 1402 case WRITE_10:
1354 case WRITE_10: 1403 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
1355 hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); 1404 /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/
1356 /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/ 1405 hostdata->pendingw--;
1357 hostdata->pendingw--; 1406 break;
1358 break; 1407 case READ:
1359 case READ: 1408 case READ_6:
1360 case READ_6: 1409 case READ_10:
1361 case READ_10: 1410 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
1362 hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); 1411 /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/
1363 /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/ 1412 hostdata->pendingr--;
1364 hostdata->pendingr--; 1413 break;
1365 break; 1414 }
1366 }
1367} 1415}
1368#endif 1416#endif
1369 1417
1370/* 1418/*
1371 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, 1419 * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
1372 * int tag); 1420 * int tag);
1373 * 1421 *
1374 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, 1422 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
1375 * including ARBITRATION, SELECTION, and initial message out for 1423 * including ARBITRATION, SELECTION, and initial message out for
1376 * IDENTIFY and queue messages. 1424 * IDENTIFY and queue messages.
1377 * 1425 *
1378 * Inputs : instance - instantiation of the 5380 driver on which this 1426 * Inputs : instance - instantiation of the 5380 driver on which this
1379 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for 1427 * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
1380 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for 1428 * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
1381 * the command that is presently connected. 1429 * the command that is presently connected.
1382 * 1430 *
1383 * Returns : -1 if selection could not execute for some reason, 1431 * Returns : -1 if selection could not execute for some reason,
1384 * 0 if selection succeeded or failed because the target 1432 * 0 if selection succeeded or failed because the target
1385 * did not respond. 1433 * did not respond.
1386 * 1434 *
1387 * Side effects : 1435 * Side effects :
1388 * If bus busy, arbitration failed, etc, NCR5380_select() will exit 1436 * If bus busy, arbitration failed, etc, NCR5380_select() will exit
1389 * with registers as they should have been on entry - ie 1437 * with registers as they should have been on entry - ie
1390 * SELECT_ENABLE will be set appropriately, the NCR5380 1438 * SELECT_ENABLE will be set appropriately, the NCR5380
1391 * will cease to drive any SCSI bus signals. 1439 * will cease to drive any SCSI bus signals.
1392 * 1440 *
1393 * If successful : I_T_L or I_T_L_Q nexus will be established, 1441 * If successful : I_T_L or I_T_L_Q nexus will be established,
1394 * instance->connected will be set to cmd. 1442 * instance->connected will be set to cmd.
1395 * SELECT interrupt will be disabled. 1443 * SELECT interrupt will be disabled.
1396 * 1444 *
1397 * If failed (no target) : cmd->scsi_done() will be called, and the 1445 * If failed (no target) : cmd->scsi_done() will be called, and the
1398 * cmd->result host byte set to DID_BAD_TARGET. 1446 * cmd->result host byte set to DID_BAD_TARGET.
1399 */ 1447 */
1400 1448
1401static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) 1449static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
1402{ 1450{
1403 SETUP_HOSTDATA(instance); 1451 SETUP_HOSTDATA(instance);
1404 unsigned char tmp[3], phase; 1452 unsigned char tmp[3], phase;
1405 unsigned char *data; 1453 unsigned char *data;
1406 int len; 1454 int len;
1407 unsigned long timeout; 1455 unsigned long timeout;
1408 unsigned long flags; 1456 unsigned long flags;
1409 1457
1410 hostdata->restart_select = 0; 1458 hostdata->restart_select = 0;
1411 NCR_PRINT(NDEBUG_ARBITRATION); 1459 NCR_PRINT(NDEBUG_ARBITRATION);
1412 ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, 1460 ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
1413 instance->this_id); 1461 instance->this_id);
1414 1462
1415 /* 1463 /*
1416 * Set the phase bits to 0, otherwise the NCR5380 won't drive the 1464 * Set the phase bits to 0, otherwise the NCR5380 won't drive the
1417 * data bus during SELECTION. 1465 * data bus during SELECTION.
1418 */ 1466 */
1419 1467
1420 local_irq_save(flags); 1468 local_irq_save(flags);
1421 if (hostdata->connected) { 1469 if (hostdata->connected) {
1470 local_irq_restore(flags);
1471 return -1;
1472 }
1473 NCR5380_write(TARGET_COMMAND_REG, 0);
1474
1475 /*
1476 * Start arbitration.
1477 */
1478
1479 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
1480 NCR5380_write(MODE_REG, MR_ARBITRATE);
1481
1422 local_irq_restore(flags); 1482 local_irq_restore(flags);
1423 return -1; 1483
1424 } 1484 /* Wait for arbitration logic to complete */
1425 NCR5380_write(TARGET_COMMAND_REG, 0); 1485#if defined(NCR_TIMEOUT)
1426 1486 {
1427 1487 unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
1428 /* 1488
1429 * Start arbitration. 1489 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) &&
1430 */ 1490 time_before(jiffies, timeout) && !hostdata->connected)
1431 1491 ;
1432 NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); 1492 if (time_after_eq(jiffies, timeout)) {
1433 NCR5380_write(MODE_REG, MR_ARBITRATE); 1493 printk("scsi : arbitration timeout at %d\n", __LINE__);
1434 1494 NCR5380_write(MODE_REG, MR_BASE);
1435 local_irq_restore(flags); 1495 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1436 1496 return -1;
1437 /* Wait for arbitration logic to complete */ 1497 }
1438#if NCR_TIMEOUT 1498 }
1439 {
1440 unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
1441
1442 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
1443 && time_before(jiffies, timeout) && !hostdata->connected)
1444 ;
1445 if (time_after_eq(jiffies, timeout))
1446 {
1447 printk("scsi : arbitration timeout at %d\n", __LINE__);
1448 NCR5380_write(MODE_REG, MR_BASE);
1449 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1450 return -1;
1451 }
1452 }
1453#else /* NCR_TIMEOUT */ 1499#else /* NCR_TIMEOUT */
1454 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) 1500 while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) &&
1455 && !hostdata->connected); 1501 !hostdata->connected)
1502 ;
1456#endif 1503#endif
1457 1504
1458 ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); 1505 ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
1459
1460 if (hostdata->connected) {
1461 NCR5380_write(MODE_REG, MR_BASE);
1462 return -1;
1463 }
1464 /*
1465 * The arbitration delay is 2.2us, but this is a minimum and there is
1466 * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
1467 * the integral nature of udelay().
1468 *
1469 */
1470
1471 udelay(3);
1472
1473 /* Check for lost arbitration */
1474 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1475 (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
1476 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1477 hostdata->connected) {
1478 NCR5380_write(MODE_REG, MR_BASE);
1479 ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1480 HOSTNO);
1481 return -1;
1482 }
1483
1484 /* after/during arbitration, BSY should be asserted.
1485 IBM DPES-31080 Version S31Q works now */
1486 /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */
1487 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL |
1488 ICR_ASSERT_BSY ) ;
1489
1490 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1491 hostdata->connected) {
1492 NCR5380_write(MODE_REG, MR_BASE);
1493 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1494 ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1495 HOSTNO);
1496 return -1;
1497 }
1498 1506
1499 /* 1507 if (hostdata->connected) {
1500 * Again, bus clear + bus settle time is 1.2us, however, this is 1508 NCR5380_write(MODE_REG, MR_BASE);
1501 * a minimum so we'll udelay ceil(1.2) 1509 return -1;
1502 */ 1510 }
1511 /*
1512 * The arbitration delay is 2.2us, but this is a minimum and there is
1513 * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
1514 * the integral nature of udelay().
1515 *
1516 */
1517
1518 udelay(3);
1519
1520 /* Check for lost arbitration */
1521 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1522 (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
1523 (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1524 hostdata->connected) {
1525 NCR5380_write(MODE_REG, MR_BASE);
1526 ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
1527 HOSTNO);
1528 return -1;
1529 }
1530
1531 /* after/during arbitration, BSY should be asserted.
1532 IBM DPES-31080 Version S31Q works now */
1533 /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */
1534 NCR5380_write(INITIATOR_COMMAND_REG,
1535 ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY);
1536
1537 if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
1538 hostdata->connected) {
1539 NCR5380_write(MODE_REG, MR_BASE);
1540 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1541 ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
1542 HOSTNO);
1543 return -1;
1544 }
1545
1546 /*
1547 * Again, bus clear + bus settle time is 1.2us, however, this is
1548 * a minimum so we'll udelay ceil(1.2)
1549 */
1503 1550
1504#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY 1551#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY
1505 /* ++roman: But some targets (see above :-) seem to need a bit more... */ 1552 /* ++roman: But some targets (see above :-) seem to need a bit more... */
1506 udelay(15); 1553 udelay(15);
1507#else 1554#else
1508 udelay(2); 1555 udelay(2);
1509#endif 1556#endif
1510 1557
1511 if (hostdata->connected) { 1558 if (hostdata->connected) {
1559 NCR5380_write(MODE_REG, MR_BASE);
1560 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1561 return -1;
1562 }
1563
1564 ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
1565
1566 /*
1567 * Now that we have won arbitration, start Selection process, asserting
1568 * the host and target ID's on the SCSI bus.
1569 */
1570
1571 NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id)));
1572
1573 /*
1574 * Raise ATN while SEL is true before BSY goes false from arbitration,
1575 * since this is the only way to guarantee that we'll get a MESSAGE OUT
1576 * phase immediately after selection.
1577 */
1578
1579 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
1580 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
1512 NCR5380_write(MODE_REG, MR_BASE); 1581 NCR5380_write(MODE_REG, MR_BASE);
1513 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1514 return -1;
1515 }
1516 1582
1517 ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); 1583 /*
1584 * Reselect interrupts must be turned off prior to the dropping of BSY,
1585 * otherwise we will trigger an interrupt.
1586 */
1587
1588 if (hostdata->connected) {
1589 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1590 return -1;
1591 }
1518 1592
1519 /* 1593 NCR5380_write(SELECT_ENABLE_REG, 0);
1520 * Now that we have won arbitration, start Selection process, asserting 1594
1521 * the host and target ID's on the SCSI bus. 1595 /*
1522 */ 1596 * The initiator shall then wait at least two deskew delays and release
1597 * the BSY signal.
1598 */
1599 udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
1600
1601 /* Reset BSY */
1602 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
1603 ICR_ASSERT_ATN | ICR_ASSERT_SEL));
1604
1605 /*
1606 * Something weird happens when we cease to drive BSY - looks
1607 * like the board/chip is letting us do another read before the
1608 * appropriate propagation delay has expired, and we're confusing
1609 * a BSY signal from ourselves as the target's response to SELECTION.
1610 *
1611 * A small delay (the 'C++' frontend breaks the pipeline with an
1612 * unnecessary jump, making it work on my 386-33/Trantor T128, the
1613 * tighter 'C' code breaks and requires this) solves the problem -
1614 * the 1 us delay is arbitrary, and only used because this delay will
1615 * be the same on other platforms and since it works here, it should
1616 * work there.
1617 *
1618 * wingel suggests that this could be due to failing to wait
1619 * one deskew delay.
1620 */
1523 1621
1524 NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); 1622 udelay(1);
1525 1623
1526 /* 1624 SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1527 * Raise ATN while SEL is true before BSY goes false from arbitration,
1528 * since this is the only way to guarantee that we'll get a MESSAGE OUT
1529 * phase immediately after selection.
1530 */
1531 1625
1532 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | 1626 /*
1533 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); 1627 * The SCSI specification calls for a 250 ms timeout for the actual
1534 NCR5380_write(MODE_REG, MR_BASE); 1628 * selection.
1629 */
1535 1630
1536 /* 1631 timeout = jiffies + 25;
1537 * Reselect interrupts must be turned off prior to the dropping of BSY,
1538 * otherwise we will trigger an interrupt.
1539 */
1540 1632
1541 if (hostdata->connected) { 1633 /*
1542 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1634 * XXX very interesting - we're seeing a bounce where the BSY we
1543 return -1; 1635 * asserted is being reflected / still asserted (propagation delay?)
1544 } 1636 * and it's detecting as true. Sigh.
1545 1637 */
1546 NCR5380_write(SELECT_ENABLE_REG, 0);
1547
1548 /*
1549 * The initiator shall then wait at least two deskew delays and release
1550 * the BSY signal.
1551 */
1552 udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
1553
1554 /* Reset BSY */
1555 NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
1556 ICR_ASSERT_ATN | ICR_ASSERT_SEL));
1557
1558 /*
1559 * Something weird happens when we cease to drive BSY - looks
1560 * like the board/chip is letting us do another read before the
1561 * appropriate propagation delay has expired, and we're confusing
1562 * a BSY signal from ourselves as the target's response to SELECTION.
1563 *
1564 * A small delay (the 'C++' frontend breaks the pipeline with an
1565 * unnecessary jump, making it work on my 386-33/Trantor T128, the
1566 * tighter 'C' code breaks and requires this) solves the problem -
1567 * the 1 us delay is arbitrary, and only used because this delay will
1568 * be the same on other platforms and since it works here, it should
1569 * work there.
1570 *
1571 * wingel suggests that this could be due to failing to wait
1572 * one deskew delay.
1573 */
1574
1575 udelay(1);
1576
1577 SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
1578
1579 /*
1580 * The SCSI specification calls for a 250 ms timeout for the actual
1581 * selection.
1582 */
1583
1584 timeout = jiffies + 25;
1585
1586 /*
1587 * XXX very interesting - we're seeing a bounce where the BSY we
1588 * asserted is being reflected / still asserted (propagation delay?)
1589 * and it's detecting as true. Sigh.
1590 */
1591 1638
1592#if 0 1639#if 0
1593 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert 1640 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert
1594 * IO while SEL is true. But again, there are some disks out the in the 1641 * IO while SEL is true. But again, there are some disks out the in the
1595 * world that do that nevertheless. (Somebody claimed that this announces 1642 * world that do that nevertheless. (Somebody claimed that this announces
1596 * reselection capability of the target.) So we better skip that test and 1643 * reselection capability of the target.) So we better skip that test and
1597 * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) 1644 * only wait for BSY... (Famous german words: Der Klügere gibt nach :-)
1598 */ 1645 */
1599 1646
1600 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & 1647 while (time_before(jiffies, timeout) &&
1601 (SR_BSY | SR_IO))); 1648 !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO)))
1602 1649 ;
1603 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == 1650
1604 (SR_SEL | SR_IO)) { 1651 if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
1605 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1652 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1606 NCR5380_reselect(instance); 1653 NCR5380_reselect(instance);
1607 printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", 1654 printk(KERN_ERR "scsi%d: reselection after won arbitration?\n",
1608 HOSTNO); 1655 HOSTNO);
1609 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1656 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1610 return -1; 1657 return -1;
1611 } 1658 }
1612#else 1659#else
1613 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); 1660 while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY))
1661 ;
1614#endif 1662#endif
1615 1663
1616 /* 1664 /*
1617 * No less than two deskew delays after the initiator detects the 1665 * No less than two deskew delays after the initiator detects the
1618 * BSY signal is true, it shall release the SEL signal and may 1666 * BSY signal is true, it shall release the SEL signal and may
1619 * change the DATA BUS. -wingel 1667 * change the DATA BUS. -wingel
1620 */ 1668 */
1621 1669
1622 udelay(1); 1670 udelay(1);
1623 1671
1624 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1672 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1625 1673
1626 if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { 1674 if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
1627 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1675 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1628 if (hostdata->targets_present & (1 << cmd->device->id)) { 1676 if (hostdata->targets_present & (1 << cmd->device->id)) {
1629 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); 1677 printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
1630 if (hostdata->restart_select) 1678 if (hostdata->restart_select)
1631 printk(KERN_NOTICE "\trestart select\n"); 1679 printk(KERN_NOTICE "\trestart select\n");
1632 NCR_PRINT(NDEBUG_ANY); 1680 NCR_PRINT(NDEBUG_ANY);
1633 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1681 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1634 return -1; 1682 return -1;
1635 } 1683 }
1636 cmd->result = DID_BAD_TARGET << 16; 1684 cmd->result = DID_BAD_TARGET << 16;
1637#ifdef NCR5380_STATS 1685#ifdef NCR5380_STATS
1638 collect_stats(hostdata, cmd); 1686 collect_stats(hostdata, cmd);
1639#endif 1687#endif
1640#ifdef SUPPORT_TAGS 1688#ifdef SUPPORT_TAGS
1641 cmd_free_tag( cmd ); 1689 cmd_free_tag(cmd);
1642#endif 1690#endif
1643 cmd->scsi_done(cmd); 1691 cmd->scsi_done(cmd);
1644 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1692 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1645 SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); 1693 SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
1646 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1694 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1647 return 0; 1695 return 0;
1648 } 1696 }
1649 1697
1650 hostdata->targets_present |= (1 << cmd->device->id); 1698 hostdata->targets_present |= (1 << cmd->device->id);
1651 1699
1652 /* 1700 /*
1653 * Since we followed the SCSI spec, and raised ATN while SEL 1701 * Since we followed the SCSI spec, and raised ATN while SEL
1654 * was true but before BSY was false during selection, the information 1702 * was true but before BSY was false during selection, the information
1655 * transfer phase should be a MESSAGE OUT phase so that we can send the 1703 * transfer phase should be a MESSAGE OUT phase so that we can send the
1656 * IDENTIFY message. 1704 * IDENTIFY message.
1657 * 1705 *
1658 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG 1706 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
1659 * message (2 bytes) with a tag ID that we increment with every command 1707 * message (2 bytes) with a tag ID that we increment with every command
1660 * until it wraps back to 0. 1708 * until it wraps back to 0.
1661 * 1709 *
1662 * XXX - it turns out that there are some broken SCSI-II devices, 1710 * XXX - it turns out that there are some broken SCSI-II devices,
1663 * which claim to support tagged queuing but fail when more than 1711 * which claim to support tagged queuing but fail when more than
1664 * some number of commands are issued at once. 1712 * some number of commands are issued at once.
1665 */ 1713 */
1666 1714
1667 /* Wait for start of REQ/ACK handshake */ 1715 /* Wait for start of REQ/ACK handshake */
1668 while (!(NCR5380_read(STATUS_REG) & SR_REQ)); 1716 while (!(NCR5380_read(STATUS_REG) & SR_REQ))
1669 1717 ;
1670 SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", 1718
1671 HOSTNO, cmd->device->id); 1719 SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
1672 tmp[0] = IDENTIFY(1, cmd->device->lun); 1720 HOSTNO, cmd->device->id);
1721 tmp[0] = IDENTIFY(1, cmd->device->lun);
1673 1722
1674#ifdef SUPPORT_TAGS 1723#ifdef SUPPORT_TAGS
1675 if (cmd->tag != TAG_NONE) { 1724 if (cmd->tag != TAG_NONE) {
1676 tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; 1725 tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
1677 tmp[2] = cmd->tag; 1726 tmp[2] = cmd->tag;
1678 len = 3; 1727 len = 3;
1679 } else 1728 } else
1680 len = 1; 1729 len = 1;
1681#else 1730#else
1682 len = 1; 1731 len = 1;
1683 cmd->tag=0; 1732 cmd->tag = 0;
1684#endif /* SUPPORT_TAGS */ 1733#endif /* SUPPORT_TAGS */
1685 1734
1686 /* Send message(s) */ 1735 /* Send message(s) */
1687 data = tmp; 1736 data = tmp;
1688 phase = PHASE_MSGOUT; 1737 phase = PHASE_MSGOUT;
1689 NCR5380_transfer_pio(instance, &phase, &len, &data); 1738 NCR5380_transfer_pio(instance, &phase, &len, &data);
1690 SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); 1739 SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
1691 /* XXX need to handle errors here */ 1740 /* XXX need to handle errors here */
1692 hostdata->connected = cmd; 1741 hostdata->connected = cmd;
1693#ifndef SUPPORT_TAGS 1742#ifndef SUPPORT_TAGS
1694 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 1743 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
1695#endif 1744#endif
1696
1697 initialize_SCp(cmd);
1698 1745
1746 initialize_SCp(cmd);
1699 1747
1700 return 0; 1748 return 0;
1701} 1749}
1702 1750
1703/* 1751/*
1704 * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, 1752 * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
1705 * unsigned char *phase, int *count, unsigned char **data) 1753 * unsigned char *phase, int *count, unsigned char **data)
1706 * 1754 *
1707 * Purpose : transfers data in given phase using polled I/O 1755 * Purpose : transfers data in given phase using polled I/O
1708 * 1756 *
1709 * Inputs : instance - instance of driver, *phase - pointer to 1757 * Inputs : instance - instance of driver, *phase - pointer to
1710 * what phase is expected, *count - pointer to number of 1758 * what phase is expected, *count - pointer to number of
1711 * bytes to transfer, **data - pointer to data pointer. 1759 * bytes to transfer, **data - pointer to data pointer.
1712 * 1760 *
1713 * Returns : -1 when different phase is entered without transferring 1761 * Returns : -1 when different phase is entered without transferring
1714 * maximum number of bytes, 0 if all bytes are transfered or exit 1762 * maximum number of bytes, 0 if all bytes are transfered or exit
1715 * is in same phase. 1763 * is in same phase.
1716 * 1764 *
1717 * Also, *phase, *count, *data are modified in place. 1765 * Also, *phase, *count, *data are modified in place.
1718 * 1766 *
1719 * XXX Note : handling for bus free may be useful. 1767 * XXX Note : handling for bus free may be useful.
1720 */ 1768 */
1721 1769
1722/* 1770/*
1723 * Note : this code is not as quick as it could be, however it 1771 * Note : this code is not as quick as it could be, however it
1724 * IS 100% reliable, and for the actual data transfer where speed 1772 * IS 100% reliable, and for the actual data transfer where speed
1725 * counts, we will always do a pseudo DMA or DMA transfer. 1773 * counts, we will always do a pseudo DMA or DMA transfer.
1726 */ 1774 */
1727 1775
1728static int NCR5380_transfer_pio( struct Scsi_Host *instance, 1776static int NCR5380_transfer_pio(struct Scsi_Host *instance,
1729 unsigned char *phase, int *count, 1777 unsigned char *phase, int *count,
1730 unsigned char **data) 1778 unsigned char **data)
1731{ 1779{
1732 register unsigned char p = *phase, tmp; 1780 register unsigned char p = *phase, tmp;
1733 register int c = *count; 1781 register int c = *count;
1734 register unsigned char *d = *data; 1782 register unsigned char *d = *data;
1735 1783
1736 /* 1784 /*
1737 * The NCR5380 chip will only drive the SCSI bus when the 1785 * The NCR5380 chip will only drive the SCSI bus when the
1738 * phase specified in the appropriate bits of the TARGET COMMAND 1786 * phase specified in the appropriate bits of the TARGET COMMAND
1739 * REGISTER match the STATUS REGISTER 1787 * REGISTER match the STATUS REGISTER
1740 */
1741
1742 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
1743
1744 do {
1745 /*
1746 * Wait for assertion of REQ, after which the phase bits will be
1747 * valid
1748 */ 1788 */
1749 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
1750 1789
1751 HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); 1790 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
1752 1791
1753 /* Check for phase mismatch */ 1792 do {
1754 if ((tmp & PHASE_MASK) != p) { 1793 /*
1755 PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); 1794 * Wait for assertion of REQ, after which the phase bits will be
1756 NCR_PRINT_PHASE(NDEBUG_PIO); 1795 * valid
1757 break; 1796 */
1758 } 1797 while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
1798 ;
1759 1799
1760 /* Do actual transfer from SCSI bus to / from memory */ 1800 HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
1761 if (!(p & SR_IO))
1762 NCR5380_write(OUTPUT_DATA_REG, *d);
1763 else
1764 *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
1765 1801
1766 ++d; 1802 /* Check for phase mismatch */
1803 if ((tmp & PHASE_MASK) != p) {
1804 PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
1805 NCR_PRINT_PHASE(NDEBUG_PIO);
1806 break;
1807 }
1767 1808
1768 /* 1809 /* Do actual transfer from SCSI bus to / from memory */
1769 * The SCSI standard suggests that in MSGOUT phase, the initiator 1810 if (!(p & SR_IO))
1770 * should drop ATN on the last byte of the message phase 1811 NCR5380_write(OUTPUT_DATA_REG, *d);
1771 * after REQ has been asserted for the handshake but before 1812 else
1772 * the initiator raises ACK. 1813 *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
1773 */
1774 1814
1775 if (!(p & SR_IO)) { 1815 ++d;
1776 if (!((p & SR_MSG) && c > 1)) {
1777 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1778 ICR_ASSERT_DATA);
1779 NCR_PRINT(NDEBUG_PIO);
1780 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1781 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1782 } else {
1783 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1784 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1785 NCR_PRINT(NDEBUG_PIO);
1786 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1787 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1788 }
1789 } else {
1790 NCR_PRINT(NDEBUG_PIO);
1791 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1792 }
1793 1816
1794 while (NCR5380_read(STATUS_REG) & SR_REQ); 1817 /*
1818 * The SCSI standard suggests that in MSGOUT phase, the initiator
1819 * should drop ATN on the last byte of the message phase
1820 * after REQ has been asserted for the handshake but before
1821 * the initiator raises ACK.
1822 */
1795 1823
1796 HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); 1824 if (!(p & SR_IO)) {
1825 if (!((p & SR_MSG) && c > 1)) {
1826 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
1827 NCR_PRINT(NDEBUG_PIO);
1828 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1829 ICR_ASSERT_DATA | ICR_ASSERT_ACK);
1830 } else {
1831 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1832 ICR_ASSERT_DATA | ICR_ASSERT_ATN);
1833 NCR_PRINT(NDEBUG_PIO);
1834 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
1835 ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
1836 }
1837 } else {
1838 NCR_PRINT(NDEBUG_PIO);
1839 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
1840 }
1797 1841
1798/* 1842 while (NCR5380_read(STATUS_REG) & SR_REQ)
1799 * We have several special cases to consider during REQ/ACK handshaking : 1843 ;
1800 * 1. We were in MSGOUT phase, and we are on the last byte of the 1844
1801 * message. ATN must be dropped as ACK is dropped. 1845 HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
1802 * 1846
1803 * 2. We are in a MSGIN phase, and we are on the last byte of the 1847 /*
1804 * message. We must exit with ACK asserted, so that the calling 1848 * We have several special cases to consider during REQ/ACK handshaking :
1805 * code may raise ATN before dropping ACK to reject the message. 1849 * 1. We were in MSGOUT phase, and we are on the last byte of the
1806 * 1850 * message. ATN must be dropped as ACK is dropped.
1807 * 3. ACK and ATN are clear and the target may proceed as normal. 1851 *
1808 */ 1852 * 2. We are in a MSGIN phase, and we are on the last byte of the
1809 if (!(p == PHASE_MSGIN && c == 1)) { 1853 * message. We must exit with ACK asserted, so that the calling
1810 if (p == PHASE_MSGOUT && c > 1) 1854 * code may raise ATN before dropping ACK to reject the message.
1811 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1855 *
1812 else 1856 * 3. ACK and ATN are clear and the target may proceed as normal.
1813 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1857 */
1814 } 1858 if (!(p == PHASE_MSGIN && c == 1)) {
1815 } while (--c); 1859 if (p == PHASE_MSGOUT && c > 1)
1816 1860 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1817 PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); 1861 else
1818 1862 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1819 *count = c; 1863 }
1820 *data = d; 1864 } while (--c);
1821 tmp = NCR5380_read(STATUS_REG); 1865
1822 /* The phase read from the bus is valid if either REQ is (already) 1866 PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
1823 * asserted or if ACK hasn't been released yet. The latter is the case if 1867
1824 * we're in MSGIN and all wanted bytes have been received. */ 1868 *count = c;
1825 if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) 1869 *data = d;
1826 *phase = tmp & PHASE_MASK; 1870 tmp = NCR5380_read(STATUS_REG);
1827 else 1871 /* The phase read from the bus is valid if either REQ is (already)
1828 *phase = PHASE_UNKNOWN; 1872 * asserted or if ACK hasn't been released yet. The latter is the case if
1829 1873 * we're in MSGIN and all wanted bytes have been received.
1830 if (!c || (*phase == p)) 1874 */
1831 return 0; 1875 if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0))
1832 else 1876 *phase = tmp & PHASE_MASK;
1833 return -1; 1877 else
1878 *phase = PHASE_UNKNOWN;
1879
1880 if (!c || (*phase == p))
1881 return 0;
1882 else
1883 return -1;
1834} 1884}
1835 1885
1836/* 1886/*
1837 * Function : do_abort (Scsi_Host *host) 1887 * Function : do_abort (Scsi_Host *host)
1838 * 1888 *
1839 * Purpose : abort the currently established nexus. Should only be 1889 * Purpose : abort the currently established nexus. Should only be
1840 * called from a routine which can drop into a 1890 * called from a routine which can drop into a
1841 * 1891 *
1842 * Returns : 0 on success, -1 on failure. 1892 * Returns : 0 on success, -1 on failure.
1843 */ 1893 */
1844 1894
1845static int do_abort (struct Scsi_Host *host) 1895static int do_abort(struct Scsi_Host *host)
1846{ 1896{
1847 unsigned char tmp, *msgptr, phase; 1897 unsigned char tmp, *msgptr, phase;
1848 int len; 1898 int len;
1849 1899
1850 /* Request message out phase */ 1900 /* Request message out phase */
1851 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1852
1853 /*
1854 * Wait for the target to indicate a valid phase by asserting
1855 * REQ. Once this happens, we'll have either a MSGOUT phase
1856 * and can immediately send the ABORT message, or we'll have some
1857 * other phase and will have to source/sink data.
1858 *
1859 * We really don't care what value was on the bus or what value
1860 * the target sees, so we just handshake.
1861 */
1862
1863 while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ);
1864
1865 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1866
1867 if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
1868 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1869 ICR_ASSERT_ACK);
1870 while (NCR5380_read(STATUS_REG) & SR_REQ);
1871 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1901 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1872 } 1902
1873 1903 /*
1874 tmp = ABORT; 1904 * Wait for the target to indicate a valid phase by asserting
1875 msgptr = &tmp; 1905 * REQ. Once this happens, we'll have either a MSGOUT phase
1876 len = 1; 1906 * and can immediately send the ABORT message, or we'll have some
1877 phase = PHASE_MSGOUT; 1907 * other phase and will have to source/sink data.
1878 NCR5380_transfer_pio (host, &phase, &len, &msgptr); 1908 *
1879 1909 * We really don't care what value was on the bus or what value
1880 /* 1910 * the target sees, so we just handshake.
1881 * If we got here, and the command completed successfully, 1911 */
1882 * we're about to go into bus free state. 1912
1883 */ 1913 while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ)
1884 1914 ;
1885 return len ? -1 : 0; 1915
1916 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
1917
1918 if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
1919 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
1920 ICR_ASSERT_ACK);
1921 while (NCR5380_read(STATUS_REG) & SR_REQ)
1922 ;
1923 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
1924 }
1925
1926 tmp = ABORT;
1927 msgptr = &tmp;
1928 len = 1;
1929 phase = PHASE_MSGOUT;
1930 NCR5380_transfer_pio(host, &phase, &len, &msgptr);
1931
1932 /*
1933 * If we got here, and the command completed successfully,
1934 * we're about to go into bus free state.
1935 */
1936
1937 return len ? -1 : 0;
1886} 1938}
1887 1939
1888#if defined(REAL_DMA) 1940#if defined(REAL_DMA)
1889/* 1941/*
1890 * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, 1942 * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
1891 * unsigned char *phase, int *count, unsigned char **data) 1943 * unsigned char *phase, int *count, unsigned char **data)
1892 * 1944 *
1893 * Purpose : transfers data in given phase using either real 1945 * Purpose : transfers data in given phase using either real
1894 * or pseudo DMA. 1946 * or pseudo DMA.
1895 * 1947 *
1896 * Inputs : instance - instance of driver, *phase - pointer to 1948 * Inputs : instance - instance of driver, *phase - pointer to
1897 * what phase is expected, *count - pointer to number of 1949 * what phase is expected, *count - pointer to number of
1898 * bytes to transfer, **data - pointer to data pointer. 1950 * bytes to transfer, **data - pointer to data pointer.
1899 * 1951 *
1900 * Returns : -1 when different phase is entered without transferring 1952 * Returns : -1 when different phase is entered without transferring
1901 * maximum number of bytes, 0 if all bytes or transfered or exit 1953 * maximum number of bytes, 0 if all bytes or transfered or exit
1902 * is in same phase. 1954 * is in same phase.
1903 * 1955 *
1904 * Also, *phase, *count, *data are modified in place. 1956 * Also, *phase, *count, *data are modified in place.
1905 * 1957 *
1906 */ 1958 */
1907 1959
1908 1960
1909static int NCR5380_transfer_dma( struct Scsi_Host *instance, 1961static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1910 unsigned char *phase, int *count, 1962 unsigned char *phase, int *count,
1911 unsigned char **data) 1963 unsigned char **data)
1912{ 1964{
1913 SETUP_HOSTDATA(instance); 1965 SETUP_HOSTDATA(instance);
1914 register int c = *count; 1966 register int c = *count;
1915 register unsigned char p = *phase; 1967 register unsigned char p = *phase;
1916 register unsigned char *d = *data; 1968 register unsigned char *d = *data;
1917 unsigned char tmp; 1969 unsigned char tmp;
1918 unsigned long flags; 1970 unsigned long flags;
1919 1971
1920 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { 1972 if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
1921 *phase = tmp; 1973 *phase = tmp;
1922 return -1; 1974 return -1;
1923 } 1975 }
1924 1976
1925 if (atari_read_overruns && (p & SR_IO)) { 1977 if (atari_read_overruns && (p & SR_IO))
1926 c -= atari_read_overruns; 1978 c -= atari_read_overruns;
1927 }
1928 1979
1929 DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", 1980 DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
1930 HOSTNO, (p & SR_IO) ? "reading" : "writing", 1981 HOSTNO, (p & SR_IO) ? "reading" : "writing",
1931 c, (p & SR_IO) ? "to" : "from", d); 1982 c, (p & SR_IO) ? "to" : "from", d);
1932 1983
1933 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); 1984 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
1934 1985
1935#ifdef REAL_DMA 1986#ifdef REAL_DMA
1936 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); 1987 NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
1937#endif /* def REAL_DMA */ 1988#endif /* def REAL_DMA */
1938 1989
1939 if (IS_A_TT()) { 1990 if (IS_A_TT()) {
1940 /* On the Medusa, it is a must to initialize the DMA before 1991 /* On the Medusa, it is a must to initialize the DMA before
1941 * starting the NCR. This is also the cleaner way for the TT. 1992 * starting the NCR. This is also the cleaner way for the TT.
1942 */ 1993 */
1943 local_irq_save(flags); 1994 local_irq_save(flags);
1944 hostdata->dma_len = (p & SR_IO) ? 1995 hostdata->dma_len = (p & SR_IO) ?
1945 NCR5380_dma_read_setup(instance, d, c) : 1996 NCR5380_dma_read_setup(instance, d, c) :
1946 NCR5380_dma_write_setup(instance, d, c); 1997 NCR5380_dma_write_setup(instance, d, c);
1947 local_irq_restore(flags); 1998 local_irq_restore(flags);
1948 } 1999 }
1949 2000
1950 if (p & SR_IO) 2001 if (p & SR_IO)
1951 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); 2002 NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
1952 else { 2003 else {
1953 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); 2004 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
1954 NCR5380_write(START_DMA_SEND_REG, 0); 2005 NCR5380_write(START_DMA_SEND_REG, 0);
1955 } 2006 }
1956 2007
1957 if (!IS_A_TT()) { 2008 if (!IS_A_TT()) {
1958 /* On the Falcon, the DMA setup must be done after the last */ 2009 /* On the Falcon, the DMA setup must be done after the last */
1959 /* NCR access, else the DMA setup gets trashed! 2010 /* NCR access, else the DMA setup gets trashed!
1960 */ 2011 */
1961 local_irq_save(flags); 2012 local_irq_save(flags);
1962 hostdata->dma_len = (p & SR_IO) ? 2013 hostdata->dma_len = (p & SR_IO) ?
1963 NCR5380_dma_read_setup(instance, d, c) : 2014 NCR5380_dma_read_setup(instance, d, c) :
1964 NCR5380_dma_write_setup(instance, d, c); 2015 NCR5380_dma_write_setup(instance, d, c);
1965 local_irq_restore(flags); 2016 local_irq_restore(flags);
1966 } 2017 }
1967 return 0; 2018 return 0;
1968} 2019}
1969#endif /* defined(REAL_DMA) */ 2020#endif /* defined(REAL_DMA) */
1970 2021
1971/* 2022/*
1972 * Function : NCR5380_information_transfer (struct Scsi_Host *instance) 2023 * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
1973 * 2024 *
1974 * Purpose : run through the various SCSI phases and do as the target 2025 * Purpose : run through the various SCSI phases and do as the target
1975 * directs us to. Operates on the currently connected command, 2026 * directs us to. Operates on the currently connected command,
1976 * instance->connected. 2027 * instance->connected.
1977 * 2028 *
1978 * Inputs : instance, instance for which we are doing commands 2029 * Inputs : instance, instance for which we are doing commands
1979 * 2030 *
1980 * Side effects : SCSI things happen, the disconnected queue will be 2031 * Side effects : SCSI things happen, the disconnected queue will be
1981 * modified if a command disconnects, *instance->connected will 2032 * modified if a command disconnects, *instance->connected will
1982 * change. 2033 * change.
1983 * 2034 *
1984 * XXX Note : we need to watch for bus free or a reset condition here 2035 * XXX Note : we need to watch for bus free or a reset condition here
1985 * to recover from an unexpected bus free condition. 2036 * to recover from an unexpected bus free condition.
1986 */ 2037 */
1987 2038
1988static void NCR5380_information_transfer (struct Scsi_Host *instance) 2039static void NCR5380_information_transfer(struct Scsi_Host *instance)
1989{ 2040{
1990 SETUP_HOSTDATA(instance); 2041 SETUP_HOSTDATA(instance);
1991 unsigned long flags; 2042 unsigned long flags;
1992 unsigned char msgout = NOP; 2043 unsigned char msgout = NOP;
1993 int sink = 0; 2044 int sink = 0;
1994 int len; 2045 int len;
1995#if defined(REAL_DMA) 2046#if defined(REAL_DMA)
1996 int transfersize; 2047 int transfersize;
1997#endif 2048#endif
1998 unsigned char *data; 2049 unsigned char *data;
1999 unsigned char phase, tmp, extended_msg[10], old_phase=0xff; 2050 unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
2000 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; 2051 Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
2052
2053 while (1) {
2054 tmp = NCR5380_read(STATUS_REG);
2055 /* We only have a valid SCSI phase when REQ is asserted */
2056 if (tmp & SR_REQ) {
2057 phase = (tmp & PHASE_MASK);
2058 if (phase != old_phase) {
2059 old_phase = phase;
2060 NCR_PRINT_PHASE(NDEBUG_INFORMATION);
2061 }
2001 2062
2002 while (1) { 2063 if (sink && (phase != PHASE_MSGOUT)) {
2003 tmp = NCR5380_read(STATUS_REG); 2064 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
2004 /* We only have a valid SCSI phase when REQ is asserted */ 2065
2005 if (tmp & SR_REQ) { 2066 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
2006 phase = (tmp & PHASE_MASK); 2067 ICR_ASSERT_ACK);
2007 if (phase != old_phase) { 2068 while (NCR5380_read(STATUS_REG) & SR_REQ)
2008 old_phase = phase; 2069 ;
2009 NCR_PRINT_PHASE(NDEBUG_INFORMATION); 2070 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2010 } 2071 ICR_ASSERT_ATN);
2011 2072 sink = 0;
2012 if (sink && (phase != PHASE_MSGOUT)) { 2073 continue;
2013 NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); 2074 }
2014 2075
2015 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | 2076 switch (phase) {
2016 ICR_ASSERT_ACK); 2077 case PHASE_DATAOUT:
2017 while (NCR5380_read(STATUS_REG) & SR_REQ);
2018 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2019 ICR_ASSERT_ATN);
2020 sink = 0;
2021 continue;
2022 }
2023
2024 switch (phase) {
2025 case PHASE_DATAOUT:
2026#if (NDEBUG & NDEBUG_NO_DATAOUT) 2078#if (NDEBUG & NDEBUG_NO_DATAOUT)
2027 printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " 2079 printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT "
2028 "aborted\n", HOSTNO); 2080 "aborted\n", HOSTNO);
2029 sink = 1; 2081 sink = 1;
2030 do_abort(instance); 2082 do_abort(instance);
2031 cmd->result = DID_ERROR << 16; 2083 cmd->result = DID_ERROR << 16;
2032 cmd->done(cmd); 2084 cmd->done(cmd);
2033 return; 2085 return;
2034#endif 2086#endif
2035 case PHASE_DATAIN: 2087 case PHASE_DATAIN:
2036 /* 2088 /*
2037 * If there is no room left in the current buffer in the 2089 * If there is no room left in the current buffer in the
2038 * scatter-gather list, move onto the next one. 2090 * scatter-gather list, move onto the next one.
2039 */ 2091 */
2040 2092
2041 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { 2093 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
2042 ++cmd->SCp.buffer; 2094 ++cmd->SCp.buffer;
2043 --cmd->SCp.buffers_residual; 2095 --cmd->SCp.buffers_residual;
2044 cmd->SCp.this_residual = cmd->SCp.buffer->length; 2096 cmd->SCp.this_residual = cmd->SCp.buffer->length;
2045 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+ 2097 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
2046 cmd->SCp.buffer->offset; 2098 cmd->SCp.buffer->offset;
2047 /* ++roman: Try to merge some scatter-buffers if 2099 /* ++roman: Try to merge some scatter-buffers if
2048 * they are at contiguous physical addresses. 2100 * they are at contiguous physical addresses.
2049 */ 2101 */
2050 merge_contiguous_buffers( cmd ); 2102 merge_contiguous_buffers(cmd);
2051 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", 2103 INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
2052 HOSTNO, cmd->SCp.this_residual, 2104 HOSTNO, cmd->SCp.this_residual,
2053 cmd->SCp.buffers_residual); 2105 cmd->SCp.buffers_residual);
2054 } 2106 }
2055 2107
2056 /* 2108 /*
2057 * The preferred transfer method is going to be 2109 * The preferred transfer method is going to be
2058 * PSEUDO-DMA for systems that are strictly PIO, 2110 * PSEUDO-DMA for systems that are strictly PIO,
2059 * since we can let the hardware do the handshaking. 2111 * since we can let the hardware do the handshaking.
2060 * 2112 *
2061 * For this to work, we need to know the transfersize 2113 * For this to work, we need to know the transfersize
2062 * ahead of time, since the pseudo-DMA code will sit 2114 * ahead of time, since the pseudo-DMA code will sit
2063 * in an unconditional loop. 2115 * in an unconditional loop.
2064 */ 2116 */
2065 2117
2066/* ++roman: I suggest, this should be 2118 /* ++roman: I suggest, this should be
2067 * #if def(REAL_DMA) 2119 * #if def(REAL_DMA)
2068 * instead of leaving REAL_DMA out. 2120 * instead of leaving REAL_DMA out.
2069 */ 2121 */
2070 2122
2071#if defined(REAL_DMA) 2123#if defined(REAL_DMA)
2072 if (!cmd->device->borken && 2124 if (!cmd->device->borken &&
2073 (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { 2125 (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) {
2074 len = transfersize; 2126 len = transfersize;
2075 cmd->SCp.phase = phase; 2127 cmd->SCp.phase = phase;
2076 if (NCR5380_transfer_dma(instance, &phase, 2128 if (NCR5380_transfer_dma(instance, &phase,
2077 &len, (unsigned char **) &cmd->SCp.ptr)) { 2129 &len, (unsigned char **)&cmd->SCp.ptr)) {
2078 /* 2130 /*
2079 * If the watchdog timer fires, all future 2131 * If the watchdog timer fires, all future
2080 * accesses to this device will use the 2132 * accesses to this device will use the
2081 * polled-IO. */ 2133 * polled-IO. */
2082 printk(KERN_NOTICE "scsi%d: switching target %d " 2134 printk(KERN_NOTICE "scsi%d: switching target %d "
2083 "lun %d to slow handshake\n", HOSTNO, 2135 "lun %d to slow handshake\n", HOSTNO,
2084 cmd->device->id, cmd->device->lun); 2136 cmd->device->id, cmd->device->lun);
2085 cmd->device->borken = 1; 2137 cmd->device->borken = 1;
2086 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 2138 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
2087 ICR_ASSERT_ATN); 2139 ICR_ASSERT_ATN);
2088 sink = 1; 2140 sink = 1;
2089 do_abort(instance); 2141 do_abort(instance);
2090 cmd->result = DID_ERROR << 16; 2142 cmd->result = DID_ERROR << 16;
2091 cmd->done(cmd); 2143 cmd->done(cmd);
2092 /* XXX - need to source or sink data here, as appropriate */ 2144 /* XXX - need to source or sink data here, as appropriate */
2093 } else { 2145 } else {
2094#ifdef REAL_DMA 2146#ifdef REAL_DMA
2095 /* ++roman: When using real DMA, 2147 /* ++roman: When using real DMA,
2096 * information_transfer() should return after 2148 * information_transfer() should return after
2097 * starting DMA since it has nothing more to 2149 * starting DMA since it has nothing more to
2098 * do. 2150 * do.
2099 */ 2151 */
2100 return; 2152 return;
2101#else 2153#else
2102 cmd->SCp.this_residual -= transfersize - len; 2154 cmd->SCp.this_residual -= transfersize - len;
2103#endif 2155#endif
2104 } 2156 }
2105 } else 2157 } else
2106#endif /* defined(REAL_DMA) */ 2158#endif /* defined(REAL_DMA) */
2107 NCR5380_transfer_pio(instance, &phase, 2159 NCR5380_transfer_pio(instance, &phase,
2108 (int *) &cmd->SCp.this_residual, (unsigned char **) 2160 (int *)&cmd->SCp.this_residual,
2109 &cmd->SCp.ptr); 2161 (unsigned char **)&cmd->SCp.ptr);
2110 break; 2162 break;
2111 case PHASE_MSGIN: 2163 case PHASE_MSGIN:
2112 len = 1; 2164 len = 1;
2113 data = &tmp; 2165 data = &tmp;
2114 NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ 2166 NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */
2115 NCR5380_transfer_pio(instance, &phase, &len, &data); 2167 NCR5380_transfer_pio(instance, &phase, &len, &data);
2116 cmd->SCp.Message = tmp; 2168 cmd->SCp.Message = tmp;
2117 2169
2118 switch (tmp) { 2170 switch (tmp) {
2119 /* 2171 /*
2120 * Linking lets us reduce the time required to get the 2172 * Linking lets us reduce the time required to get the
2121 * next command out to the device, hopefully this will 2173 * next command out to the device, hopefully this will
2122 * mean we don't waste another revolution due to the delays 2174 * mean we don't waste another revolution due to the delays
2123 * required by ARBITRATION and another SELECTION. 2175 * required by ARBITRATION and another SELECTION.
2124 * 2176 *
2125 * In the current implementation proposal, low level drivers 2177 * In the current implementation proposal, low level drivers
2126 * merely have to start the next command, pointed to by 2178 * merely have to start the next command, pointed to by
2127 * next_link, done() is called as with unlinked commands. 2179 * next_link, done() is called as with unlinked commands.
2128 */ 2180 */
2129#ifdef LINKED 2181#ifdef LINKED
2130 case LINKED_CMD_COMPLETE: 2182 case LINKED_CMD_COMPLETE:
2131 case LINKED_FLG_CMD_COMPLETE: 2183 case LINKED_FLG_CMD_COMPLETE:
2132 /* Accept message by clearing ACK */ 2184 /* Accept message by clearing ACK */
2133 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2185 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2134 2186
2135 LNK_PRINTK("scsi%d: target %d lun %d linked command " 2187 LNK_PRINTK("scsi%d: target %d lun %d linked command "
2136 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); 2188 "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
2137 2189
2138 /* Enable reselect interrupts */ 2190 /* Enable reselect interrupts */
2139 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2191 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2140 /* 2192 /*
2141 * Sanity check : A linked command should only terminate 2193 * Sanity check : A linked command should only terminate
2142 * with one of these messages if there are more linked 2194 * with one of these messages if there are more linked
2143 * commands available. 2195 * commands available.
2144 */ 2196 */
2145 2197
2146 if (!cmd->next_link) { 2198 if (!cmd->next_link) {
2147 printk(KERN_NOTICE "scsi%d: target %d lun %d " 2199 printk(KERN_NOTICE "scsi%d: target %d lun %d "
2148 "linked command complete, no next_link\n", 2200 "linked command complete, no next_link\n",
2149 HOSTNO, cmd->device->id, cmd->device->lun); 2201 HOSTNO, cmd->device->id, cmd->device->lun);
2150 sink = 1; 2202 sink = 1;
2151 do_abort (instance); 2203 do_abort(instance);
2152 return; 2204 return;
2153 } 2205 }
2154 2206
2155 initialize_SCp(cmd->next_link); 2207 initialize_SCp(cmd->next_link);
2156 /* The next command is still part of this process; copy it 2208 /* The next command is still part of this process; copy it
2157 * and don't free it! */ 2209 * and don't free it! */
2158 cmd->next_link->tag = cmd->tag; 2210 cmd->next_link->tag = cmd->tag;
2159 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2211 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2160 LNK_PRINTK("scsi%d: target %d lun %d linked request " 2212 LNK_PRINTK("scsi%d: target %d lun %d linked request "
2161 "done, calling scsi_done().\n", 2213 "done, calling scsi_done().\n",
2162 HOSTNO, cmd->device->id, cmd->device->lun); 2214 HOSTNO, cmd->device->id, cmd->device->lun);
2163#ifdef NCR5380_STATS 2215#ifdef NCR5380_STATS
2164 collect_stats(hostdata, cmd); 2216 collect_stats(hostdata, cmd);
2165#endif 2217#endif
2166 cmd->scsi_done(cmd); 2218 cmd->scsi_done(cmd);
2167 cmd = hostdata->connected; 2219 cmd = hostdata->connected;
2168 break; 2220 break;
2169#endif /* def LINKED */ 2221#endif /* def LINKED */
2170 case ABORT: 2222 case ABORT:
2171 case COMMAND_COMPLETE: 2223 case COMMAND_COMPLETE:
2172 /* Accept message by clearing ACK */ 2224 /* Accept message by clearing ACK */
2173 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2225 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2174 /* ++guenther: possible race with Falcon locking */ 2226 /* ++guenther: possible race with Falcon locking */
2175 falcon_dont_release++; 2227 falcon_dont_release++;
2176 hostdata->connected = NULL; 2228 hostdata->connected = NULL;
2177 QU_PRINTK("scsi%d: command for target %d, lun %d " 2229 QU_PRINTK("scsi%d: command for target %d, lun %d "
2178 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); 2230 "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
2179#ifdef SUPPORT_TAGS 2231#ifdef SUPPORT_TAGS
2180 cmd_free_tag( cmd ); 2232 cmd_free_tag(cmd);
2181 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { 2233 if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
2182 /* Turn a QUEUE FULL status into BUSY, I think the 2234 /* Turn a QUEUE FULL status into BUSY, I think the
2183 * mid level cannot handle QUEUE FULL :-( (The 2235 * mid level cannot handle QUEUE FULL :-( (The
2184 * command is retried after BUSY). Also update our 2236 * command is retried after BUSY). Also update our
2185 * queue size to the number of currently issued 2237 * queue size to the number of currently issued
2186 * commands now. 2238 * commands now.
2187 */ 2239 */
2188 /* ++Andreas: the mid level code knows about 2240 /* ++Andreas: the mid level code knows about
2189 QUEUE_FULL now. */ 2241 QUEUE_FULL now. */
2190 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; 2242 TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
2191 TAG_PRINTK("scsi%d: target %d lun %d returned " 2243 TAG_PRINTK("scsi%d: target %d lun %d returned "
2192 "QUEUE_FULL after %d commands\n", 2244 "QUEUE_FULL after %d commands\n",
2193 HOSTNO, cmd->device->id, cmd->device->lun, 2245 HOSTNO, cmd->device->id, cmd->device->lun,
2194 ta->nr_allocated); 2246 ta->nr_allocated);
2195 if (ta->queue_size > ta->nr_allocated) 2247 if (ta->queue_size > ta->nr_allocated)
2196 ta->nr_allocated = ta->queue_size; 2248 ta->nr_allocated = ta->queue_size;
2197 } 2249 }
2198#else 2250#else
2199 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2251 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2200#endif 2252#endif
2201 /* Enable reselect interrupts */ 2253 /* Enable reselect interrupts */
2202 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2254 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2203 2255
2204 /* 2256 /*
2205 * I'm not sure what the correct thing to do here is : 2257 * I'm not sure what the correct thing to do here is :
2206 * 2258 *
2207 * If the command that just executed is NOT a request 2259 * If the command that just executed is NOT a request
2208 * sense, the obvious thing to do is to set the result 2260 * sense, the obvious thing to do is to set the result
2209 * code to the values of the stored parameters. 2261 * code to the values of the stored parameters.
2210 * 2262 *
2211 * If it was a REQUEST SENSE command, we need some way to 2263 * If it was a REQUEST SENSE command, we need some way to
2212 * differentiate between the failure code of the original 2264 * differentiate between the failure code of the original
2213 * and the failure code of the REQUEST sense - the obvious 2265 * and the failure code of the REQUEST sense - the obvious
2214 * case is success, where we fall through and leave the 2266 * case is success, where we fall through and leave the
2215 * result code unchanged. 2267 * result code unchanged.
2216 * 2268 *
2217 * The non-obvious place is where the REQUEST SENSE failed 2269 * The non-obvious place is where the REQUEST SENSE failed
2218 */ 2270 */
2219 2271
2220 if (cmd->cmnd[0] != REQUEST_SENSE) 2272 if (cmd->cmnd[0] != REQUEST_SENSE)
2221 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 2273 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
2222 else if (status_byte(cmd->SCp.Status) != GOOD) 2274 else if (status_byte(cmd->SCp.Status) != GOOD)
2223 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); 2275 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
2224
2225#ifdef AUTOSENSE
2226 if ((cmd->cmnd[0] != REQUEST_SENSE) &&
2227 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2228 ASEN_PRINTK("scsi%d: performing request sense\n",
2229 HOSTNO);
2230 cmd->cmnd[0] = REQUEST_SENSE;
2231 cmd->cmnd[1] &= 0xe0;
2232 cmd->cmnd[2] = 0;
2233 cmd->cmnd[3] = 0;
2234 cmd->cmnd[4] = sizeof(cmd->sense_buffer);
2235 cmd->cmnd[5] = 0;
2236 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
2237
2238 cmd->use_sg = 0;
2239 /* this is initialized from initialize_SCp
2240 cmd->SCp.buffer = NULL;
2241 cmd->SCp.buffers_residual = 0;
2242 */
2243 cmd->request_buffer = (char *) cmd->sense_buffer;
2244 cmd->request_bufflen = sizeof(cmd->sense_buffer);
2245 2276
2246 local_irq_save(flags); 2277#ifdef AUTOSENSE
2247 LIST(cmd,hostdata->issue_queue); 2278 if ((cmd->cmnd[0] != REQUEST_SENSE) &&
2248 NEXT(cmd) = hostdata->issue_queue; 2279 (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
2249 hostdata->issue_queue = (Scsi_Cmnd *) cmd; 2280 ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO);
2250 local_irq_restore(flags); 2281 cmd->cmnd[0] = REQUEST_SENSE;
2251 QU_PRINTK("scsi%d: REQUEST SENSE added to head of " 2282 cmd->cmnd[1] &= 0xe0;
2252 "issue queue\n", H_NO(cmd)); 2283 cmd->cmnd[2] = 0;
2253 } else 2284 cmd->cmnd[3] = 0;
2285 cmd->cmnd[4] = sizeof(cmd->sense_buffer);
2286 cmd->cmnd[5] = 0;
2287 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
2288
2289 cmd->use_sg = 0;
2290 /* this is initialized from initialize_SCp
2291 cmd->SCp.buffer = NULL;
2292 cmd->SCp.buffers_residual = 0;
2293 */
2294 cmd->request_buffer = (char *) cmd->sense_buffer;
2295 cmd->request_bufflen = sizeof(cmd->sense_buffer);
2296
2297 local_irq_save(flags);
2298 LIST(cmd,hostdata->issue_queue);
2299 SET_NEXT(cmd, hostdata->issue_queue);
2300 hostdata->issue_queue = (Scsi_Cmnd *) cmd;
2301 local_irq_restore(flags);
2302 QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
2303 "issue queue\n", H_NO(cmd));
2304 } else
2254#endif /* def AUTOSENSE */ 2305#endif /* def AUTOSENSE */
2255 { 2306 {
2256#ifdef NCR5380_STATS 2307#ifdef NCR5380_STATS
2257 collect_stats(hostdata, cmd); 2308 collect_stats(hostdata, cmd);
2258#endif 2309#endif
2259 cmd->scsi_done(cmd); 2310 cmd->scsi_done(cmd);
2260 } 2311 }
2261 2312
2262 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2313 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2263 /* 2314 /*
2264 * Restore phase bits to 0 so an interrupted selection, 2315 * Restore phase bits to 0 so an interrupted selection,
2265 * arbitration can resume. 2316 * arbitration can resume.
2266 */ 2317 */
2267 NCR5380_write(TARGET_COMMAND_REG, 0); 2318 NCR5380_write(TARGET_COMMAND_REG, 0);
2268 2319
2269 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2320 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2270 barrier(); 2321 barrier();
2271 2322
2272 falcon_dont_release--; 2323 falcon_dont_release--;
2273 /* ++roman: For Falcon SCSI, release the lock on the 2324 /* ++roman: For Falcon SCSI, release the lock on the
2274 * ST-DMA here if no other commands are waiting on the 2325 * ST-DMA here if no other commands are waiting on the
2275 * disconnected queue. 2326 * disconnected queue.
2276 */ 2327 */
2277 falcon_release_lock_if_possible( hostdata ); 2328 falcon_release_lock_if_possible(hostdata);
2278 return; 2329 return;
2279 case MESSAGE_REJECT: 2330 case MESSAGE_REJECT:
2280 /* Accept message by clearing ACK */ 2331 /* Accept message by clearing ACK */
2281 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2332 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2282 /* Enable reselect interrupts */ 2333 /* Enable reselect interrupts */
2283 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2334 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2284 switch (hostdata->last_message) { 2335 switch (hostdata->last_message) {
2285 case HEAD_OF_QUEUE_TAG: 2336 case HEAD_OF_QUEUE_TAG:
2286 case ORDERED_QUEUE_TAG: 2337 case ORDERED_QUEUE_TAG:
2287 case SIMPLE_QUEUE_TAG: 2338 case SIMPLE_QUEUE_TAG:
2288 /* The target obviously doesn't support tagged 2339 /* The target obviously doesn't support tagged
2289 * queuing, even though it announced this ability in 2340 * queuing, even though it announced this ability in
2290 * its INQUIRY data ?!? (maybe only this LUN?) Ok, 2341 * its INQUIRY data ?!? (maybe only this LUN?) Ok,
2291 * clear 'tagged_supported' and lock the LUN, since 2342 * clear 'tagged_supported' and lock the LUN, since
2292 * the command is treated as untagged further on. 2343 * the command is treated as untagged further on.
2293 */ 2344 */
2294 cmd->device->tagged_supported = 0; 2345 cmd->device->tagged_supported = 0;
2295 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); 2346 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
2296 cmd->tag = TAG_NONE; 2347 cmd->tag = TAG_NONE;
2297 TAG_PRINTK("scsi%d: target %d lun %d rejected " 2348 TAG_PRINTK("scsi%d: target %d lun %d rejected "
2298 "QUEUE_TAG message; tagged queuing " 2349 "QUEUE_TAG message; tagged queuing "
2299 "disabled\n", 2350 "disabled\n",
2300 HOSTNO, cmd->device->id, cmd->device->lun); 2351 HOSTNO, cmd->device->id, cmd->device->lun);
2301 break; 2352 break;
2302 } 2353 }
2303 break; 2354 break;
2304 case DISCONNECT: 2355 case DISCONNECT:
2305 /* Accept message by clearing ACK */ 2356 /* Accept message by clearing ACK */
2306 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2357 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2307 local_irq_save(flags); 2358 local_irq_save(flags);
2308 cmd->device->disconnect = 1; 2359 cmd->device->disconnect = 1;
2309 LIST(cmd,hostdata->disconnected_queue); 2360 LIST(cmd,hostdata->disconnected_queue);
2310 NEXT(cmd) = hostdata->disconnected_queue; 2361 SET_NEXT(cmd, hostdata->disconnected_queue);
2311 hostdata->connected = NULL; 2362 hostdata->connected = NULL;
2312 hostdata->disconnected_queue = cmd; 2363 hostdata->disconnected_queue = cmd;
2313 local_irq_restore(flags); 2364 local_irq_restore(flags);
2314 QU_PRINTK("scsi%d: command for target %d lun %d was " 2365 QU_PRINTK("scsi%d: command for target %d lun %d was "
2315 "moved from connected to the " 2366 "moved from connected to the "
2316 "disconnected_queue\n", HOSTNO, 2367 "disconnected_queue\n", HOSTNO,
2317 cmd->device->id, cmd->device->lun); 2368 cmd->device->id, cmd->device->lun);
2318 /* 2369 /*
2319 * Restore phase bits to 0 so an interrupted selection, 2370 * Restore phase bits to 0 so an interrupted selection,
2320 * arbitration can resume. 2371 * arbitration can resume.
2321 */ 2372 */
2322 NCR5380_write(TARGET_COMMAND_REG, 0); 2373 NCR5380_write(TARGET_COMMAND_REG, 0);
2323 2374
2324 /* Enable reselect interrupts */ 2375 /* Enable reselect interrupts */
2325 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2376 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2326 /* Wait for bus free to avoid nasty timeouts */ 2377 /* Wait for bus free to avoid nasty timeouts */
2327 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) 2378 while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
2328 barrier(); 2379 barrier();
2329 return; 2380 return;
2330 /* 2381 /*
2331 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect 2382 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
2332 * operation, in violation of the SCSI spec so we can safely 2383 * operation, in violation of the SCSI spec so we can safely
2333 * ignore SAVE/RESTORE pointers calls. 2384 * ignore SAVE/RESTORE pointers calls.
2334 * 2385 *
2335 * Unfortunately, some disks violate the SCSI spec and 2386 * Unfortunately, some disks violate the SCSI spec and
2336 * don't issue the required SAVE_POINTERS message before 2387 * don't issue the required SAVE_POINTERS message before
2337 * disconnecting, and we have to break spec to remain 2388 * disconnecting, and we have to break spec to remain
2338 * compatible. 2389 * compatible.
2339 */ 2390 */
2340 case SAVE_POINTERS: 2391 case SAVE_POINTERS:
2341 case RESTORE_POINTERS: 2392 case RESTORE_POINTERS:
2342 /* Accept message by clearing ACK */ 2393 /* Accept message by clearing ACK */
2343 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2394 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2344 /* Enable reselect interrupts */ 2395 /* Enable reselect interrupts */
2345 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2396 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2346 break; 2397 break;
2347 case EXTENDED_MESSAGE: 2398 case EXTENDED_MESSAGE:
2348/* 2399 /*
2349 * Extended messages are sent in the following format : 2400 * Extended messages are sent in the following format :
2350 * Byte 2401 * Byte
2351 * 0 EXTENDED_MESSAGE == 1 2402 * 0 EXTENDED_MESSAGE == 1
2352 * 1 length (includes one byte for code, doesn't 2403 * 1 length (includes one byte for code, doesn't
2353 * include first two bytes) 2404 * include first two bytes)
2354 * 2 code 2405 * 2 code
2355 * 3..length+1 arguments 2406 * 3..length+1 arguments
2356 * 2407 *
2357 * Start the extended message buffer with the EXTENDED_MESSAGE 2408 * Start the extended message buffer with the EXTENDED_MESSAGE
2358 * byte, since spi_print_msg() wants the whole thing. 2409 * byte, since spi_print_msg() wants the whole thing.
2359 */ 2410 */
2360 extended_msg[0] = EXTENDED_MESSAGE; 2411 extended_msg[0] = EXTENDED_MESSAGE;
2361 /* Accept first byte by clearing ACK */ 2412 /* Accept first byte by clearing ACK */
2362 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2413 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2363 2414
2364 EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); 2415 EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
2365 2416
2366 len = 2; 2417 len = 2;
2367 data = extended_msg + 1; 2418 data = extended_msg + 1;
2368 phase = PHASE_MSGIN; 2419 phase = PHASE_MSGIN;
2369 NCR5380_transfer_pio(instance, &phase, &len, &data); 2420 NCR5380_transfer_pio(instance, &phase, &len, &data);
2370 EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, 2421 EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
2371 (int)extended_msg[1], (int)extended_msg[2]); 2422 (int)extended_msg[1], (int)extended_msg[2]);
2372 2423
2373 if (!len && extended_msg[1] <= 2424 if (!len && extended_msg[1] <=
2374 (sizeof (extended_msg) - 1)) { 2425 (sizeof(extended_msg) - 1)) {
2375 /* Accept third byte by clearing ACK */ 2426 /* Accept third byte by clearing ACK */
2376 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2427 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2377 len = extended_msg[1] - 1; 2428 len = extended_msg[1] - 1;
2378 data = extended_msg + 3; 2429 data = extended_msg + 3;
2379 phase = PHASE_MSGIN; 2430 phase = PHASE_MSGIN;
2380 2431
2381 NCR5380_transfer_pio(instance, &phase, &len, &data); 2432 NCR5380_transfer_pio(instance, &phase, &len, &data);
2382 EXT_PRINTK("scsi%d: message received, residual %d\n", 2433 EXT_PRINTK("scsi%d: message received, residual %d\n",
2383 HOSTNO, len); 2434 HOSTNO, len);
2384 2435
2385 switch (extended_msg[2]) { 2436 switch (extended_msg[2]) {
2386 case EXTENDED_SDTR: 2437 case EXTENDED_SDTR:
2387 case EXTENDED_WDTR: 2438 case EXTENDED_WDTR:
2388 case EXTENDED_MODIFY_DATA_POINTER: 2439 case EXTENDED_MODIFY_DATA_POINTER:
2389 case EXTENDED_EXTENDED_IDENTIFY: 2440 case EXTENDED_EXTENDED_IDENTIFY:
2390 tmp = 0; 2441 tmp = 0;
2391 } 2442 }
2392 } else if (len) { 2443 } else if (len) {
2393 printk(KERN_NOTICE "scsi%d: error receiving " 2444 printk(KERN_NOTICE "scsi%d: error receiving "
2394 "extended message\n", HOSTNO); 2445 "extended message\n", HOSTNO);
2395 tmp = 0; 2446 tmp = 0;
2396 } else { 2447 } else {
2397 printk(KERN_NOTICE "scsi%d: extended message " 2448 printk(KERN_NOTICE "scsi%d: extended message "
2398 "code %02x length %d is too long\n", 2449 "code %02x length %d is too long\n",
2399 HOSTNO, extended_msg[2], extended_msg[1]); 2450 HOSTNO, extended_msg[2], extended_msg[1]);
2400 tmp = 0; 2451 tmp = 0;
2401 } 2452 }
2402 /* Fall through to reject message */ 2453 /* Fall through to reject message */
2403 2454
2404 /* 2455 /*
2405 * If we get something weird that we aren't expecting, 2456 * If we get something weird that we aren't expecting,
2406 * reject it. 2457 * reject it.
2407 */ 2458 */
2408 default: 2459 default:
2409 if (!tmp) { 2460 if (!tmp) {
2410 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); 2461 printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO);
2411 spi_print_msg(extended_msg); 2462 spi_print_msg(extended_msg);
2412 printk("\n"); 2463 printk("\n");
2413 } else if (tmp != EXTENDED_MESSAGE) 2464 } else if (tmp != EXTENDED_MESSAGE)
2414 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2465 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2415 "message %02x from target %d, lun %d\n", 2466 "message %02x from target %d, lun %d\n",
2416 HOSTNO, tmp, cmd->device->id, cmd->device->lun); 2467 HOSTNO, tmp, cmd->device->id, cmd->device->lun);
2417 else 2468 else
2418 printk(KERN_DEBUG "scsi%d: rejecting unknown " 2469 printk(KERN_DEBUG "scsi%d: rejecting unknown "
2419 "extended message " 2470 "extended message "
2420 "code %02x, length %d from target %d, lun %d\n", 2471 "code %02x, length %d from target %d, lun %d\n",
2421 HOSTNO, extended_msg[1], extended_msg[0], 2472 HOSTNO, extended_msg[1], extended_msg[0],
2422 cmd->device->id, cmd->device->lun); 2473 cmd->device->id, cmd->device->lun);
2423 2474
2424 2475
2425 msgout = MESSAGE_REJECT; 2476 msgout = MESSAGE_REJECT;
2426 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 2477 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
2427 ICR_ASSERT_ATN); 2478 break;
2428 break; 2479 } /* switch (tmp) */
2429 } /* switch (tmp) */ 2480 break;
2430 break; 2481 case PHASE_MSGOUT:
2431 case PHASE_MSGOUT: 2482 len = 1;
2432 len = 1; 2483 data = &msgout;
2433 data = &msgout; 2484 hostdata->last_message = msgout;
2434 hostdata->last_message = msgout; 2485 NCR5380_transfer_pio(instance, &phase, &len, &data);
2435 NCR5380_transfer_pio(instance, &phase, &len, &data); 2486 if (msgout == ABORT) {
2436 if (msgout == ABORT) {
2437#ifdef SUPPORT_TAGS 2487#ifdef SUPPORT_TAGS
2438 cmd_free_tag( cmd ); 2488 cmd_free_tag(cmd);
2439#else 2489#else
2440 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2490 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2441#endif 2491#endif
2442 hostdata->connected = NULL; 2492 hostdata->connected = NULL;
2443 cmd->result = DID_ERROR << 16; 2493 cmd->result = DID_ERROR << 16;
2444#ifdef NCR5380_STATS 2494#ifdef NCR5380_STATS
2445 collect_stats(hostdata, cmd); 2495 collect_stats(hostdata, cmd);
2446#endif 2496#endif
2447 cmd->scsi_done(cmd); 2497 cmd->scsi_done(cmd);
2448 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 2498 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
2449 falcon_release_lock_if_possible( hostdata ); 2499 falcon_release_lock_if_possible(hostdata);
2450 return; 2500 return;
2451 } 2501 }
2452 msgout = NOP; 2502 msgout = NOP;
2453 break; 2503 break;
2454 case PHASE_CMDOUT: 2504 case PHASE_CMDOUT:
2455 len = cmd->cmd_len; 2505 len = cmd->cmd_len;
2456 data = cmd->cmnd; 2506 data = cmd->cmnd;
2457 /* 2507 /*
2458 * XXX for performance reasons, on machines with a 2508 * XXX for performance reasons, on machines with a
2459 * PSEUDO-DMA architecture we should probably 2509 * PSEUDO-DMA architecture we should probably
2460 * use the dma transfer function. 2510 * use the dma transfer function.
2461 */ 2511 */
2462 NCR5380_transfer_pio(instance, &phase, &len, 2512 NCR5380_transfer_pio(instance, &phase, &len, &data);
2463 &data); 2513 break;
2464 break; 2514 case PHASE_STATIN:
2465 case PHASE_STATIN: 2515 len = 1;
2466 len = 1; 2516 data = &tmp;
2467 data = &tmp; 2517 NCR5380_transfer_pio(instance, &phase, &len, &data);
2468 NCR5380_transfer_pio(instance, &phase, &len, &data); 2518 cmd->SCp.Status = tmp;
2469 cmd->SCp.Status = tmp; 2519 break;
2470 break; 2520 default:
2471 default: 2521 printk("scsi%d: unknown phase\n", HOSTNO);
2472 printk("scsi%d: unknown phase\n", HOSTNO); 2522 NCR_PRINT(NDEBUG_ANY);
2473 NCR_PRINT(NDEBUG_ANY); 2523 } /* switch(phase) */
2474 } /* switch(phase) */ 2524 } /* if (tmp * SR_REQ) */
2475 } /* if (tmp * SR_REQ) */ 2525 } /* while (1) */
2476 } /* while (1) */
2477} 2526}
2478 2527
2479/* 2528/*
2480 * Function : void NCR5380_reselect (struct Scsi_Host *instance) 2529 * Function : void NCR5380_reselect (struct Scsi_Host *instance)
2481 * 2530 *
2482 * Purpose : does reselection, initializing the instance->connected 2531 * Purpose : does reselection, initializing the instance->connected
2483 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q 2532 * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q
2484 * nexus has been reestablished, 2533 * nexus has been reestablished,
2485 * 2534 *
2486 * Inputs : instance - this instance of the NCR5380. 2535 * Inputs : instance - this instance of the NCR5380.
2487 * 2536 *
2488 */ 2537 */
2489 2538
2490 2539
2491static void NCR5380_reselect (struct Scsi_Host *instance) 2540static void NCR5380_reselect(struct Scsi_Host *instance)
2492{ 2541{
2493 SETUP_HOSTDATA(instance); 2542 SETUP_HOSTDATA(instance);
2494 unsigned char target_mask; 2543 unsigned char target_mask;
2495 unsigned char lun, phase; 2544 unsigned char lun, phase;
2496 int len; 2545 int len;
2497#ifdef SUPPORT_TAGS 2546#ifdef SUPPORT_TAGS
2498 unsigned char tag; 2547 unsigned char tag;
2499#endif 2548#endif
2500 unsigned char msg[3]; 2549 unsigned char msg[3];
2501 unsigned char *data; 2550 unsigned char *data;
2502 Scsi_Cmnd *tmp = NULL, *prev; 2551 Scsi_Cmnd *tmp = NULL, *prev;
2503/* unsigned long flags; */ 2552/* unsigned long flags; */
2504 2553
2505 /* 2554 /*
2506 * Disable arbitration, etc. since the host adapter obviously 2555 * Disable arbitration, etc. since the host adapter obviously
2507 * lost, and tell an interrupted NCR5380_select() to restart. 2556 * lost, and tell an interrupted NCR5380_select() to restart.
2508 */ 2557 */
2509 2558
2510 NCR5380_write(MODE_REG, MR_BASE); 2559 NCR5380_write(MODE_REG, MR_BASE);
2511 hostdata->restart_select = 1; 2560 hostdata->restart_select = 1;
2512 2561
2513 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2562 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2514 2563
2515 RSL_PRINTK("scsi%d: reselect\n", HOSTNO); 2564 RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
2516 2565
2517 /* 2566 /*
2518 * At this point, we have detected that our SCSI ID is on the bus, 2567 * At this point, we have detected that our SCSI ID is on the bus,
2519 * SEL is true and BSY was false for at least one bus settle delay 2568 * SEL is true and BSY was false for at least one bus settle delay
2520 * (400 ns). 2569 * (400 ns).
2521 * 2570 *
2522 * We must assert BSY ourselves, until the target drops the SEL 2571 * We must assert BSY ourselves, until the target drops the SEL
2523 * signal. 2572 * signal.
2524 */ 2573 */
2525 2574
2526 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); 2575 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
2527 2576
2528 while (NCR5380_read(STATUS_REG) & SR_SEL); 2577 while (NCR5380_read(STATUS_REG) & SR_SEL)
2529 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2578 ;
2530 2579 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2531 /* 2580
2532 * Wait for target to go into MSGIN. 2581 /*
2533 */ 2582 * Wait for target to go into MSGIN.
2534 2583 */
2535 while (!(NCR5380_read(STATUS_REG) & SR_REQ)); 2584
2536 2585 while (!(NCR5380_read(STATUS_REG) & SR_REQ))
2537 len = 1; 2586 ;
2538 data = msg; 2587
2539 phase = PHASE_MSGIN; 2588 len = 1;
2540 NCR5380_transfer_pio(instance, &phase, &len, &data); 2589 data = msg;
2541 2590 phase = PHASE_MSGIN;
2542 if (!(msg[0] & 0x80)) { 2591 NCR5380_transfer_pio(instance, &phase, &len, &data);
2543 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); 2592
2544 spi_print_msg(msg); 2593 if (!(msg[0] & 0x80)) {
2545 do_abort(instance); 2594 printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO);
2546 return; 2595 spi_print_msg(msg);
2547 } 2596 do_abort(instance);
2548 lun = (msg[0] & 0x07); 2597 return;
2598 }
2599 lun = (msg[0] & 0x07);
2549 2600
2550#ifdef SUPPORT_TAGS 2601#ifdef SUPPORT_TAGS
2551 /* If the phase is still MSGIN, the target wants to send some more 2602 /* If the phase is still MSGIN, the target wants to send some more
2552 * messages. In case it supports tagged queuing, this is probably a 2603 * messages. In case it supports tagged queuing, this is probably a
2553 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. 2604 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
2554 */ 2605 */
2555 tag = TAG_NONE; 2606 tag = TAG_NONE;
2556 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { 2607 if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
2557 /* Accept previous IDENTIFY message by clearing ACK */ 2608 /* Accept previous IDENTIFY message by clearing ACK */
2558 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); 2609 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2559 len = 2; 2610 len = 2;
2560 data = msg+1; 2611 data = msg + 1;
2561 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && 2612 if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
2562 msg[1] == SIMPLE_QUEUE_TAG) 2613 msg[1] == SIMPLE_QUEUE_TAG)
2563 tag = msg[2]; 2614 tag = msg[2];
2564 TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " 2615 TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
2565 "reselection\n", HOSTNO, target_mask, lun, tag); 2616 "reselection\n", HOSTNO, target_mask, lun, tag);
2566 } 2617 }
2567#endif 2618#endif
2568 2619
2569 /* 2620 /*
2570 * Find the command corresponding to the I_T_L or I_T_L_Q nexus we 2621 * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
2571 * just reestablished, and remove it from the disconnected queue. 2622 * just reestablished, and remove it from the disconnected queue.
2572 */ 2623 */
2573 2624
2574 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; 2625 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL;
2575 tmp; prev = tmp, tmp = NEXT(tmp) ) { 2626 tmp; prev = tmp, tmp = NEXT(tmp)) {
2576 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) 2627 if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
2577#ifdef SUPPORT_TAGS 2628#ifdef SUPPORT_TAGS
2578 && (tag == tmp->tag) 2629 && (tag == tmp->tag)
2579#endif 2630#endif
2580 ) { 2631 ) {
2581 /* ++guenther: prevent race with falcon_release_lock */ 2632 /* ++guenther: prevent race with falcon_release_lock */
2582 falcon_dont_release++; 2633 falcon_dont_release++;
2583 if (prev) { 2634 if (prev) {
2584 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); 2635 REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
2585 NEXT(prev) = NEXT(tmp); 2636 SET_NEXT(prev, NEXT(tmp));
2586 } else { 2637 } else {
2587 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); 2638 REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
2588 hostdata->disconnected_queue = NEXT(tmp); 2639 hostdata->disconnected_queue = NEXT(tmp);
2589 } 2640 }
2590 NEXT(tmp) = NULL; 2641 SET_NEXT(tmp, NULL);
2591 break; 2642 break;
2643 }
2592 } 2644 }
2593 } 2645
2594 2646 if (!tmp) {
2595 if (!tmp) { 2647 printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d "
2596 printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d "
2597#ifdef SUPPORT_TAGS 2648#ifdef SUPPORT_TAGS
2598 "tag %d " 2649 "tag %d "
2599#endif 2650#endif
2600 "not in disconnected_queue.\n", 2651 "not in disconnected_queue.\n",
2601 HOSTNO, target_mask, lun 2652 HOSTNO, target_mask, lun
2602#ifdef SUPPORT_TAGS 2653#ifdef SUPPORT_TAGS
2603 , tag 2654 , tag
2604#endif 2655#endif
2605 ); 2656 );
2606 /* 2657 /*
2607 * Since we have an established nexus that we can't do anything 2658 * Since we have an established nexus that we can't do anything
2608 * with, we must abort it. 2659 * with, we must abort it.
2609 */ 2660 */
2610 do_abort(instance); 2661 do_abort(instance);
2611 return; 2662 return;
2612 } 2663 }
2613 2664
2614 /* Accept message by clearing ACK */ 2665 /* Accept message by clearing ACK */
2615 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2666 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2616 2667
2617 hostdata->connected = tmp; 2668 hostdata->connected = tmp;
2618 RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", 2669 RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
2619 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); 2670 HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
2620 falcon_dont_release--; 2671 falcon_dont_release--;
2621} 2672}
2622 2673
2623 2674
@@ -2626,362 +2677,361 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
2626 * 2677 *
2627 * Purpose : abort a command 2678 * Purpose : abort a command
2628 * 2679 *
2629 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the 2680 * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
2630 * host byte of the result field to, if zero DID_ABORTED is 2681 * host byte of the result field to, if zero DID_ABORTED is
2631 * used. 2682 * used.
2632 * 2683 *
2633 * Returns : 0 - success, -1 on failure. 2684 * Returns : 0 - success, -1 on failure.
2634 * 2685 *
2635 * XXX - there is no way to abort the command that is currently 2686 * XXX - there is no way to abort the command that is currently
2636 * connected, you have to wait for it to complete. If this is 2687 * connected, you have to wait for it to complete. If this is
2637 * a problem, we could implement longjmp() / setjmp(), setjmp() 2688 * a problem, we could implement longjmp() / setjmp(), setjmp()
2638 * called where the loop started in NCR5380_main(). 2689 * called where the loop started in NCR5380_main().
2639 */ 2690 */
2640 2691
2641static 2692static
2642int NCR5380_abort (Scsi_Cmnd *cmd) 2693int NCR5380_abort(Scsi_Cmnd *cmd)
2643{ 2694{
2644 struct Scsi_Host *instance = cmd->device->host; 2695 struct Scsi_Host *instance = cmd->device->host;
2645 SETUP_HOSTDATA(instance); 2696 SETUP_HOSTDATA(instance);
2646 Scsi_Cmnd *tmp, **prev; 2697 Scsi_Cmnd *tmp, **prev;
2647 unsigned long flags; 2698 unsigned long flags;
2699
2700 printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO);
2701 scsi_print_command(cmd);
2648 2702
2649 printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); 2703 NCR5380_print_status(instance);
2650 scsi_print_command(cmd);
2651 2704
2652 NCR5380_print_status (instance); 2705 local_irq_save(flags);
2653 2706
2654 local_irq_save(flags); 2707 if (!IS_A_TT() && !falcon_got_lock)
2655 2708 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
2656 if (!IS_A_TT() && !falcon_got_lock) 2709 HOSTNO);
2657 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
2658 HOSTNO);
2659 2710
2660 ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, 2711 ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
2661 NCR5380_read(BUS_AND_STATUS_REG), 2712 NCR5380_read(BUS_AND_STATUS_REG),
2662 NCR5380_read(STATUS_REG)); 2713 NCR5380_read(STATUS_REG));
2663 2714
2664#if 1 2715#if 1
2665/* 2716 /*
2666 * Case 1 : If the command is the currently executing command, 2717 * Case 1 : If the command is the currently executing command,
2667 * we'll set the aborted flag and return control so that 2718 * we'll set the aborted flag and return control so that
2668 * information transfer routine can exit cleanly. 2719 * information transfer routine can exit cleanly.
2669 */ 2720 */
2670 2721
2671 if (hostdata->connected == cmd) { 2722 if (hostdata->connected == cmd) {
2672 2723
2673 ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); 2724 ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
2674/* 2725 /*
2675 * We should perform BSY checking, and make sure we haven't slipped 2726 * We should perform BSY checking, and make sure we haven't slipped
2676 * into BUS FREE. 2727 * into BUS FREE.
2677 */ 2728 */
2678 2729
2679/* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ 2730 /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */
2680/* 2731 /*
2681 * Since we can't change phases until we've completed the current 2732 * Since we can't change phases until we've completed the current
2682 * handshake, we have to source or sink a byte of data if the current 2733 * handshake, we have to source or sink a byte of data if the current
2683 * phase is not MSGOUT. 2734 * phase is not MSGOUT.
2684 */ 2735 */
2685 2736
2686/* 2737 /*
2687 * Return control to the executing NCR drive so we can clear the 2738 * Return control to the executing NCR drive so we can clear the
2688 * aborted flag and get back into our main loop. 2739 * aborted flag and get back into our main loop.
2689 */ 2740 */
2690 2741
2691 if (do_abort(instance) == 0) { 2742 if (do_abort(instance) == 0) {
2692 hostdata->aborted = 1; 2743 hostdata->aborted = 1;
2693 hostdata->connected = NULL; 2744 hostdata->connected = NULL;
2694 cmd->result = DID_ABORT << 16; 2745 cmd->result = DID_ABORT << 16;
2695#ifdef SUPPORT_TAGS 2746#ifdef SUPPORT_TAGS
2696 cmd_free_tag( cmd ); 2747 cmd_free_tag(cmd);
2697#else 2748#else
2698 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2749 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2699#endif 2750#endif
2700 local_irq_restore(flags); 2751 local_irq_restore(flags);
2701 cmd->scsi_done(cmd); 2752 cmd->scsi_done(cmd);
2702 falcon_release_lock_if_possible( hostdata ); 2753 falcon_release_lock_if_possible(hostdata);
2703 return SCSI_ABORT_SUCCESS; 2754 return SCSI_ABORT_SUCCESS;
2704 } else { 2755 } else {
2705/* local_irq_restore(flags); */ 2756/* local_irq_restore(flags); */
2706 printk("scsi%d: abort of connected command failed!\n", HOSTNO); 2757 printk("scsi%d: abort of connected command failed!\n", HOSTNO);
2707 return SCSI_ABORT_ERROR; 2758 return SCSI_ABORT_ERROR;
2708 } 2759 }
2709 } 2760 }
2710#endif 2761#endif
2711 2762
2712/* 2763 /*
2713 * Case 2 : If the command hasn't been issued yet, we simply remove it 2764 * Case 2 : If the command hasn't been issued yet, we simply remove it
2714 * from the issue queue. 2765 * from the issue queue.
2715 */ 2766 */
2716 for (prev = (Scsi_Cmnd **) &(hostdata->issue_queue), 2767 for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue),
2717 tmp = (Scsi_Cmnd *) hostdata->issue_queue; 2768 tmp = (Scsi_Cmnd *)hostdata->issue_queue;
2718 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) 2769 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2719 if (cmd == tmp) { 2770 if (cmd == tmp) {
2720 REMOVE(5, *prev, tmp, NEXT(tmp)); 2771 REMOVE(5, *prev, tmp, NEXT(tmp));
2721 (*prev) = NEXT(tmp); 2772 (*prev) = NEXT(tmp);
2722 NEXT(tmp) = NULL; 2773 SET_NEXT(tmp, NULL);
2723 tmp->result = DID_ABORT << 16; 2774 tmp->result = DID_ABORT << 16;
2724 local_irq_restore(flags); 2775 local_irq_restore(flags);
2725 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", 2776 ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
2726 HOSTNO); 2777 HOSTNO);
2727 /* Tagged queuing note: no tag to free here, hasn't been assigned 2778 /* Tagged queuing note: no tag to free here, hasn't been assigned
2728 * yet... */ 2779 * yet... */
2729 tmp->scsi_done(tmp); 2780 tmp->scsi_done(tmp);
2730 falcon_release_lock_if_possible( hostdata ); 2781 falcon_release_lock_if_possible(hostdata);
2731 return SCSI_ABORT_SUCCESS; 2782 return SCSI_ABORT_SUCCESS;
2783 }
2732 } 2784 }
2733 2785
2734/* 2786 /*
2735 * Case 3 : If any commands are connected, we're going to fail the abort 2787 * Case 3 : If any commands are connected, we're going to fail the abort
2736 * and let the high level SCSI driver retry at a later time or 2788 * and let the high level SCSI driver retry at a later time or
2737 * issue a reset. 2789 * issue a reset.
2738 * 2790 *
2739 * Timeouts, and therefore aborted commands, will be highly unlikely 2791 * Timeouts, and therefore aborted commands, will be highly unlikely
2740 * and handling them cleanly in this situation would make the common 2792 * and handling them cleanly in this situation would make the common
2741 * case of noresets less efficient, and would pollute our code. So, 2793 * case of noresets less efficient, and would pollute our code. So,
2742 * we fail. 2794 * we fail.
2743 */ 2795 */
2744 2796
2745 if (hostdata->connected) { 2797 if (hostdata->connected) {
2746 local_irq_restore(flags); 2798 local_irq_restore(flags);
2747 ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); 2799 ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
2748 return SCSI_ABORT_SNOOZE; 2800 return SCSI_ABORT_SNOOZE;
2749 } 2801 }
2750 2802
2751/* 2803 /*
2752 * Case 4: If the command is currently disconnected from the bus, and 2804 * Case 4: If the command is currently disconnected from the bus, and
2753 * there are no connected commands, we reconnect the I_T_L or 2805 * there are no connected commands, we reconnect the I_T_L or
2754 * I_T_L_Q nexus associated with it, go into message out, and send 2806 * I_T_L_Q nexus associated with it, go into message out, and send
2755 * an abort message. 2807 * an abort message.
2756 * 2808 *
2757 * This case is especially ugly. In order to reestablish the nexus, we 2809 * This case is especially ugly. In order to reestablish the nexus, we
2758 * need to call NCR5380_select(). The easiest way to implement this 2810 * need to call NCR5380_select(). The easiest way to implement this
2759 * function was to abort if the bus was busy, and let the interrupt 2811 * function was to abort if the bus was busy, and let the interrupt
2760 * handler triggered on the SEL for reselect take care of lost arbitrations 2812 * handler triggered on the SEL for reselect take care of lost arbitrations
2761 * where necessary, meaning interrupts need to be enabled. 2813 * where necessary, meaning interrupts need to be enabled.
2762 * 2814 *
2763 * When interrupts are enabled, the queues may change - so we 2815 * When interrupts are enabled, the queues may change - so we
2764 * can't remove it from the disconnected queue before selecting it 2816 * can't remove it from the disconnected queue before selecting it
2765 * because that could cause a failure in hashing the nexus if that 2817 * because that could cause a failure in hashing the nexus if that
2766 * device reselected. 2818 * device reselected.
2767 * 2819 *
2768 * Since the queues may change, we can't use the pointers from when we 2820 * Since the queues may change, we can't use the pointers from when we
2769 * first locate it. 2821 * first locate it.
2770 * 2822 *
2771 * So, we must first locate the command, and if NCR5380_select() 2823 * So, we must first locate the command, and if NCR5380_select()
2772 * succeeds, then issue the abort, relocate the command and remove 2824 * succeeds, then issue the abort, relocate the command and remove
2773 * it from the disconnected queue. 2825 * it from the disconnected queue.
2774 */ 2826 */
2827
2828 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp;
2829 tmp = NEXT(tmp)) {
2830 if (cmd == tmp) {
2831 local_irq_restore(flags);
2832 ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
2775 2833
2776 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; 2834 if (NCR5380_select(instance, cmd, (int)cmd->tag))
2777 tmp = NEXT(tmp)) 2835 return SCSI_ABORT_BUSY;
2778 if (cmd == tmp) { 2836
2779 local_irq_restore(flags); 2837 ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
2780 ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); 2838
2781 2839 do_abort(instance);
2782 if (NCR5380_select (instance, cmd, (int) cmd->tag)) 2840
2783 return SCSI_ABORT_BUSY; 2841 local_irq_save(flags);
2784 2842 for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue),
2785 ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); 2843 tmp = (Scsi_Cmnd *)hostdata->disconnected_queue;
2786 2844 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) {
2787 do_abort (instance); 2845 if (cmd == tmp) {
2788 2846 REMOVE(5, *prev, tmp, NEXT(tmp));
2789 local_irq_save(flags); 2847 *prev = NEXT(tmp);
2790 for (prev = (Scsi_Cmnd **) &(hostdata->disconnected_queue), 2848 SET_NEXT(tmp, NULL);
2791 tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; 2849 tmp->result = DID_ABORT << 16;
2792 tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) 2850 /* We must unlock the tag/LUN immediately here, since the
2793 if (cmd == tmp) { 2851 * target goes to BUS FREE and doesn't send us another
2794 REMOVE(5, *prev, tmp, NEXT(tmp)); 2852 * message (COMMAND_COMPLETE or the like)
2795 *prev = NEXT(tmp); 2853 */
2796 NEXT(tmp) = NULL;
2797 tmp->result = DID_ABORT << 16;
2798 /* We must unlock the tag/LUN immediately here, since the
2799 * target goes to BUS FREE and doesn't send us another
2800 * message (COMMAND_COMPLETE or the like)
2801 */
2802#ifdef SUPPORT_TAGS 2854#ifdef SUPPORT_TAGS
2803 cmd_free_tag( tmp ); 2855 cmd_free_tag(tmp);
2804#else 2856#else
2805 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); 2857 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
2806#endif 2858#endif
2807 local_irq_restore(flags); 2859 local_irq_restore(flags);
2808 tmp->scsi_done(tmp); 2860 tmp->scsi_done(tmp);
2809 falcon_release_lock_if_possible( hostdata ); 2861 falcon_release_lock_if_possible(hostdata);
2810 return SCSI_ABORT_SUCCESS; 2862 return SCSI_ABORT_SUCCESS;
2863 }
2864 }
2811 } 2865 }
2812 } 2866 }
2813 2867
2814/* 2868 /*
2815 * Case 5 : If we reached this point, the command was not found in any of 2869 * Case 5 : If we reached this point, the command was not found in any of
2816 * the queues. 2870 * the queues.
2817 * 2871 *
2818 * We probably reached this point because of an unlikely race condition 2872 * We probably reached this point because of an unlikely race condition
2819 * between the command completing successfully and the abortion code, 2873 * between the command completing successfully and the abortion code,
2820 * so we won't panic, but we will notify the user in case something really 2874 * so we won't panic, but we will notify the user in case something really
2821 * broke. 2875 * broke.
2822 */ 2876 */
2823 2877
2824 local_irq_restore(flags); 2878 local_irq_restore(flags);
2825 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" 2879 printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n"
2826 KERN_INFO " before abortion\n", HOSTNO); 2880 KERN_INFO " before abortion\n", HOSTNO);
2827 2881
2828/* Maybe it is sufficient just to release the ST-DMA lock... (if 2882 /* Maybe it is sufficient just to release the ST-DMA lock... (if
2829 * possible at all) At least, we should check if the lock could be 2883 * possible at all) At least, we should check if the lock could be
2830 * released after the abort, in case it is kept due to some bug. 2884 * released after the abort, in case it is kept due to some bug.
2831 */ 2885 */
2832 falcon_release_lock_if_possible( hostdata ); 2886 falcon_release_lock_if_possible(hostdata);
2833 2887
2834 return SCSI_ABORT_NOT_RUNNING; 2888 return SCSI_ABORT_NOT_RUNNING;
2835} 2889}
2836 2890
2837 2891
2838/* 2892/*
2839 * Function : int NCR5380_reset (Scsi_Cmnd *cmd) 2893 * Function : int NCR5380_reset (Scsi_Cmnd *cmd)
2840 * 2894 *
2841 * Purpose : reset the SCSI bus. 2895 * Purpose : reset the SCSI bus.
2842 * 2896 *
2843 * Returns : SCSI_RESET_WAKEUP 2897 * Returns : SCSI_RESET_WAKEUP
2844 * 2898 *
2845 */ 2899 */
2846 2900
2847static int NCR5380_bus_reset( Scsi_Cmnd *cmd) 2901static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
2848{ 2902{
2849 SETUP_HOSTDATA(cmd->device->host); 2903 SETUP_HOSTDATA(cmd->device->host);
2850 int i; 2904 int i;
2851 unsigned long flags; 2905 unsigned long flags;
2852#if 1 2906#if 1
2853 Scsi_Cmnd *connected, *disconnected_queue; 2907 Scsi_Cmnd *connected, *disconnected_queue;
2854#endif 2908#endif
2855 2909
2856 if (!IS_A_TT() && !falcon_got_lock) 2910 if (!IS_A_TT() && !falcon_got_lock)
2857 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n", 2911 printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n",
2858 H_NO(cmd) ); 2912 H_NO(cmd));
2859 2913
2860 NCR5380_print_status (cmd->device->host); 2914 NCR5380_print_status(cmd->device->host);
2861 2915
2862 /* get in phase */ 2916 /* get in phase */
2863 NCR5380_write( TARGET_COMMAND_REG, 2917 NCR5380_write(TARGET_COMMAND_REG,
2864 PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); 2918 PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG)));
2865 /* assert RST */ 2919 /* assert RST */
2866 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); 2920 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
2867 udelay (40); 2921 udelay(40);
2868 /* reset NCR registers */ 2922 /* reset NCR registers */
2869 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); 2923 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2870 NCR5380_write( MODE_REG, MR_BASE ); 2924 NCR5380_write(MODE_REG, MR_BASE);
2871 NCR5380_write( TARGET_COMMAND_REG, 0 ); 2925 NCR5380_write(TARGET_COMMAND_REG, 0);
2872 NCR5380_write( SELECT_ENABLE_REG, 0 ); 2926 NCR5380_write(SELECT_ENABLE_REG, 0);
2873 /* ++roman: reset interrupt condition! otherwise no interrupts don't get 2927 /* ++roman: reset interrupt condition! otherwise no interrupts don't get
2874 * through anymore ... */ 2928 * through anymore ... */
2875 (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); 2929 (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
2876 2930
2877#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ 2931#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
2878 /* XXX see below XXX */ 2932 /* XXX see below XXX */
2879 2933
2880 /* MSch: old-style reset: actually abort all command processing here */ 2934 /* MSch: old-style reset: actually abort all command processing here */
2881 2935
2882 /* After the reset, there are no more connected or disconnected commands 2936 /* After the reset, there are no more connected or disconnected commands
2883 * and no busy units; to avoid problems with re-inserting the commands 2937 * and no busy units; to avoid problems with re-inserting the commands
2884 * into the issue_queue (via scsi_done()), the aborted commands are 2938 * into the issue_queue (via scsi_done()), the aborted commands are
2885 * remembered in local variables first. 2939 * remembered in local variables first.
2886 */ 2940 */
2887 local_irq_save(flags); 2941 local_irq_save(flags);
2888 connected = (Scsi_Cmnd *)hostdata->connected; 2942 connected = (Scsi_Cmnd *)hostdata->connected;
2889 hostdata->connected = NULL; 2943 hostdata->connected = NULL;
2890 disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue; 2944 disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue;
2891 hostdata->disconnected_queue = NULL; 2945 hostdata->disconnected_queue = NULL;
2892#ifdef SUPPORT_TAGS 2946#ifdef SUPPORT_TAGS
2893 free_all_tags(); 2947 free_all_tags();
2894#endif 2948#endif
2895 for( i = 0; i < 8; ++i ) 2949 for (i = 0; i < 8; ++i)
2896 hostdata->busy[i] = 0; 2950 hostdata->busy[i] = 0;
2897#ifdef REAL_DMA 2951#ifdef REAL_DMA
2898 hostdata->dma_len = 0; 2952 hostdata->dma_len = 0;
2899#endif 2953#endif
2900 local_irq_restore(flags); 2954 local_irq_restore(flags);
2901 2955
2902 /* In order to tell the mid-level code which commands were aborted, 2956 /* In order to tell the mid-level code which commands were aborted,
2903 * set the command status to DID_RESET and call scsi_done() !!! 2957 * set the command status to DID_RESET and call scsi_done() !!!
2904 * This ultimately aborts processing of these commands in the mid-level. 2958 * This ultimately aborts processing of these commands in the mid-level.
2905 */ 2959 */
2906 2960
2907 if ((cmd = connected)) { 2961 if ((cmd = connected)) {
2908 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 2962 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
2909 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); 2963 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2910 cmd->scsi_done( cmd ); 2964 cmd->scsi_done(cmd);
2911 } 2965 }
2912
2913 for (i = 0; (cmd = disconnected_queue); ++i) {
2914 disconnected_queue = NEXT(cmd);
2915 NEXT(cmd) = NULL;
2916 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2917 cmd->scsi_done( cmd );
2918 }
2919 if (i > 0)
2920 ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
2921
2922/* The Falcon lock should be released after a reset...
2923 */
2924/* ++guenther: moved to atari_scsi_reset(), to prevent a race between
2925 * unlocking and enabling dma interrupt.
2926 */
2927/* falcon_release_lock_if_possible( hostdata );*/
2928 2966
2929 /* since all commands have been explicitly terminated, we need to tell 2967 for (i = 0; (cmd = disconnected_queue); ++i) {
2930 * the midlevel code that the reset was SUCCESSFUL, and there is no 2968 disconnected_queue = NEXT(cmd);
2931 * need to 'wake up' the commands by a request_sense 2969 SET_NEXT(cmd, NULL);
2932 */ 2970 cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
2933 return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; 2971 cmd->scsi_done(cmd);
2972 }
2973 if (i > 0)
2974 ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
2975
2976 /* The Falcon lock should be released after a reset...
2977 */
2978 /* ++guenther: moved to atari_scsi_reset(), to prevent a race between
2979 * unlocking and enabling dma interrupt.
2980 */
2981/* falcon_release_lock_if_possible( hostdata );*/
2982
2983 /* since all commands have been explicitly terminated, we need to tell
2984 * the midlevel code that the reset was SUCCESSFUL, and there is no
2985 * need to 'wake up' the commands by a request_sense
2986 */
2987 return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
2934#else /* 1 */ 2988#else /* 1 */
2935 2989
2936 /* MSch: new-style reset handling: let the mid-level do what it can */ 2990 /* MSch: new-style reset handling: let the mid-level do what it can */
2937 2991
2938 /* ++guenther: MID-LEVEL IS STILL BROKEN. 2992 /* ++guenther: MID-LEVEL IS STILL BROKEN.
2939 * Mid-level is supposed to requeue all commands that were active on the 2993 * Mid-level is supposed to requeue all commands that were active on the
2940 * various low-level queues. In fact it does this, but that's not enough 2994 * various low-level queues. In fact it does this, but that's not enough
2941 * because all these commands are subject to timeout. And if a timeout 2995 * because all these commands are subject to timeout. And if a timeout
2942 * happens for any removed command, *_abort() is called but all queues 2996 * happens for any removed command, *_abort() is called but all queues
2943 * are now empty. Abort then gives up the falcon lock, which is fatal, 2997 * are now empty. Abort then gives up the falcon lock, which is fatal,
2944 * since the mid-level will queue more commands and must have the lock 2998 * since the mid-level will queue more commands and must have the lock
2945 * (it's all happening inside timer interrupt handler!!). 2999 * (it's all happening inside timer interrupt handler!!).
2946 * Even worse, abort will return NOT_RUNNING for all those commands not 3000 * Even worse, abort will return NOT_RUNNING for all those commands not
2947 * on any queue, so they won't be retried ... 3001 * on any queue, so they won't be retried ...
2948 * 3002 *
2949 * Conclusion: either scsi.c disables timeout for all resetted commands 3003 * Conclusion: either scsi.c disables timeout for all resetted commands
2950 * immediately, or we lose! As of linux-2.0.20 it doesn't. 3004 * immediately, or we lose! As of linux-2.0.20 it doesn't.
2951 */ 3005 */
2952 3006
2953 /* After the reset, there are no more connected or disconnected commands 3007 /* After the reset, there are no more connected or disconnected commands
2954 * and no busy units; so clear the low-level status here to avoid 3008 * and no busy units; so clear the low-level status here to avoid
2955 * conflicts when the mid-level code tries to wake up the affected 3009 * conflicts when the mid-level code tries to wake up the affected
2956 * commands! 3010 * commands!
2957 */ 3011 */
2958 3012
2959 if (hostdata->issue_queue) 3013 if (hostdata->issue_queue)
2960 ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); 3014 ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
2961 if (hostdata->connected) 3015 if (hostdata->connected)
2962 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); 3016 ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
2963 if (hostdata->disconnected_queue) 3017 if (hostdata->disconnected_queue)
2964 ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); 3018 ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
2965 3019
2966 local_irq_save(flags); 3020 local_irq_save(flags);
2967 hostdata->issue_queue = NULL; 3021 hostdata->issue_queue = NULL;
2968 hostdata->connected = NULL; 3022 hostdata->connected = NULL;
2969 hostdata->disconnected_queue = NULL; 3023 hostdata->disconnected_queue = NULL;
2970#ifdef SUPPORT_TAGS 3024#ifdef SUPPORT_TAGS
2971 free_all_tags(); 3025 free_all_tags();
2972#endif 3026#endif
2973 for( i = 0; i < 8; ++i ) 3027 for (i = 0; i < 8; ++i)
2974 hostdata->busy[i] = 0; 3028 hostdata->busy[i] = 0;
2975#ifdef REAL_DMA 3029#ifdef REAL_DMA
2976 hostdata->dma_len = 0; 3030 hostdata->dma_len = 0;
2977#endif 3031#endif
2978 local_irq_restore(flags); 3032 local_irq_restore(flags);
2979 3033
2980 /* we did no complete reset of all commands, so a wakeup is required */ 3034 /* we did no complete reset of all commands, so a wakeup is required */
2981 return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; 3035 return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
2982#endif /* 1 */ 3036#endif /* 1 */
2983} 3037}
2984
2985/* Local Variables: */
2986/* tab-width: 8 */
2987/* End: */
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 642de7b2b7a2..6f8403b82ba1 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -69,9 +69,9 @@
69 69
70#define NDEBUG (0) 70#define NDEBUG (0)
71 71
72#define NDEBUG_ABORT 0x800000 72#define NDEBUG_ABORT 0x00100000
73#define NDEBUG_TAGS 0x1000000 73#define NDEBUG_TAGS 0x00200000
74#define NDEBUG_MERGING 0x2000000 74#define NDEBUG_MERGING 0x00400000
75 75
76#define AUTOSENSE 76#define AUTOSENSE
77/* For the Atari version, use only polled IO or REAL_DMA */ 77/* For the Atari version, use only polled IO or REAL_DMA */
@@ -186,38 +186,37 @@ static inline void DISABLE_IRQ(void)
186/***************************** Prototypes *****************************/ 186/***************************** Prototypes *****************************/
187 187
188#ifdef REAL_DMA 188#ifdef REAL_DMA
189static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ); 189static int scsi_dma_is_ignored_buserr(unsigned char dma_stat);
190static void atari_scsi_fetch_restbytes( void ); 190static void atari_scsi_fetch_restbytes(void);
191static long atari_scsi_dma_residual( struct Scsi_Host *instance ); 191static long atari_scsi_dma_residual(struct Scsi_Host *instance);
192static int falcon_classify_cmd( Scsi_Cmnd *cmd ); 192static int falcon_classify_cmd(Scsi_Cmnd *cmd);
193static unsigned long atari_dma_xfer_len( unsigned long wanted_len, 193static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
194 Scsi_Cmnd *cmd, int write_flag ); 194 Scsi_Cmnd *cmd, int write_flag);
195#endif 195#endif
196static irqreturn_t scsi_tt_intr( int irq, void *dummy); 196static irqreturn_t scsi_tt_intr(int irq, void *dummy);
197static irqreturn_t scsi_falcon_intr( int irq, void *dummy); 197static irqreturn_t scsi_falcon_intr(int irq, void *dummy);
198static void falcon_release_lock_if_possible( struct NCR5380_hostdata * 198static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata);
199 hostdata ); 199static void falcon_get_lock(void);
200static void falcon_get_lock( void );
201#ifdef CONFIG_ATARI_SCSI_RESET_BOOT 200#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
202static void atari_scsi_reset_boot( void ); 201static void atari_scsi_reset_boot(void);
203#endif 202#endif
204static unsigned char atari_scsi_tt_reg_read( unsigned char reg ); 203static unsigned char atari_scsi_tt_reg_read(unsigned char reg);
205static void atari_scsi_tt_reg_write( unsigned char reg, unsigned char value); 204static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value);
206static unsigned char atari_scsi_falcon_reg_read( unsigned char reg ); 205static unsigned char atari_scsi_falcon_reg_read(unsigned char reg);
207static void atari_scsi_falcon_reg_write( unsigned char reg, unsigned char value ); 206static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value);
208 207
209/************************* End of Prototypes **************************/ 208/************************* End of Prototypes **************************/
210 209
211 210
212static struct Scsi_Host *atari_scsi_host = NULL; 211static struct Scsi_Host *atari_scsi_host;
213static unsigned char (*atari_scsi_reg_read)( unsigned char reg ); 212static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
214static void (*atari_scsi_reg_write)( unsigned char reg, unsigned char value ); 213static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
215 214
216#ifdef REAL_DMA 215#ifdef REAL_DMA
217static unsigned long atari_dma_residual, atari_dma_startaddr; 216static unsigned long atari_dma_residual, atari_dma_startaddr;
218static short atari_dma_active; 217static short atari_dma_active;
219/* pointer to the dribble buffer */ 218/* pointer to the dribble buffer */
220static char *atari_dma_buffer = NULL; 219static char *atari_dma_buffer;
221/* precalculated physical address of the dribble buffer */ 220/* precalculated physical address of the dribble buffer */
222static unsigned long atari_dma_phys_buffer; 221static unsigned long atari_dma_phys_buffer;
223/* != 0 tells the Falcon int handler to copy data from the dribble buffer */ 222/* != 0 tells the Falcon int handler to copy data from the dribble buffer */
@@ -233,7 +232,7 @@ static char *atari_dma_orig_addr;
233static unsigned long atari_dma_stram_mask; 232static unsigned long atari_dma_stram_mask;
234#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) 233#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0)
235/* number of bytes to cut from a transfer to handle NCR overruns */ 234/* number of bytes to cut from a transfer to handle NCR overruns */
236static int atari_read_overruns = 0; 235static int atari_read_overruns;
237#endif 236#endif
238 237
239static int setup_can_queue = -1; 238static int setup_can_queue = -1;
@@ -256,10 +255,10 @@ module_param(setup_hostid, int, 0);
256 255
257#if defined(REAL_DMA) 256#if defined(REAL_DMA)
258 257
259static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ) 258static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
260{ 259{
261 int i; 260 int i;
262 unsigned long addr = SCSI_DMA_READ_P( dma_addr ), end_addr; 261 unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr;
263 262
264 if (dma_stat & 0x01) { 263 if (dma_stat & 0x01) {
265 264
@@ -267,15 +266,14 @@ static int scsi_dma_is_ignored_buserr( unsigned char dma_stat )
267 * physical memory chunk (DMA prefetch!), but that doesn't hurt. 266 * physical memory chunk (DMA prefetch!), but that doesn't hurt.
268 * Check for this case: 267 * Check for this case:
269 */ 268 */
270 269
271 for( i = 0; i < m68k_num_memory; ++i ) { 270 for (i = 0; i < m68k_num_memory; ++i) {
272 end_addr = m68k_memory[i].addr + 271 end_addr = m68k_memory[i].addr + m68k_memory[i].size;
273 m68k_memory[i].size;
274 if (end_addr <= addr && addr <= end_addr + 4) 272 if (end_addr <= addr && addr <= end_addr + 4)
275 return( 1 ); 273 return 1;
276 } 274 }
277 } 275 }
278 return( 0 ); 276 return 0;
279} 277}
280 278
281 279
@@ -284,28 +282,27 @@ static int scsi_dma_is_ignored_buserr( unsigned char dma_stat )
284 * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has 282 * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has
285 * to clear the DMA int pending bit before it allows other level 6 interrupts. 283 * to clear the DMA int pending bit before it allows other level 6 interrupts.
286 */ 284 */
287static void scsi_dma_buserr (int irq, void *dummy) 285static void scsi_dma_buserr(int irq, void *dummy)
288{ 286{
289 unsigned char dma_stat = tt_scsi_dma.dma_ctrl; 287 unsigned char dma_stat = tt_scsi_dma.dma_ctrl;
290 288
291 /* Don't do anything if a NCR interrupt is pending. Probably it's just 289 /* Don't do anything if a NCR interrupt is pending. Probably it's just
292 * masked... */ 290 * masked... */
293 if (atari_irq_pending( IRQ_TT_MFP_SCSI )) 291 if (atari_irq_pending(IRQ_TT_MFP_SCSI))
294 return; 292 return;
295 293
296 printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", 294 printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n",
297 SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); 295 SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt));
298 if (dma_stat & 0x80) { 296 if (dma_stat & 0x80) {
299 if (!scsi_dma_is_ignored_buserr( dma_stat )) 297 if (!scsi_dma_is_ignored_buserr(dma_stat))
300 printk( "SCSI DMA bus error -- bad DMA programming!\n" ); 298 printk("SCSI DMA bus error -- bad DMA programming!\n");
301 } 299 } else {
302 else {
303 /* Under normal circumstances we never should get to this point, 300 /* Under normal circumstances we never should get to this point,
304 * since both interrupts are triggered simultaneously and the 5380 301 * since both interrupts are triggered simultaneously and the 5380
305 * int has higher priority. When this irq is handled, that DMA 302 * int has higher priority. When this irq is handled, that DMA
306 * interrupt is cleared. So a warning message is printed here. 303 * interrupt is cleared. So a warning message is printed here.
307 */ 304 */
308 printk( "SCSI DMA intr ?? -- this shouldn't happen!\n" ); 305 printk("SCSI DMA intr ?? -- this shouldn't happen!\n");
309 } 306 }
310} 307}
311#endif 308#endif
@@ -313,7 +310,7 @@ static void scsi_dma_buserr (int irq, void *dummy)
313#endif 310#endif
314 311
315 312
316static irqreturn_t scsi_tt_intr (int irq, void *dummy) 313static irqreturn_t scsi_tt_intr(int irq, void *dummy)
317{ 314{
318#ifdef REAL_DMA 315#ifdef REAL_DMA
319 int dma_stat; 316 int dma_stat;
@@ -327,7 +324,7 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy)
327 * is that a bus error occurred... 324 * is that a bus error occurred...
328 */ 325 */
329 if (dma_stat & 0x80) { 326 if (dma_stat & 0x80) {
330 if (!scsi_dma_is_ignored_buserr( dma_stat )) { 327 if (!scsi_dma_is_ignored_buserr(dma_stat)) {
331 printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", 328 printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n",
332 SCSI_DMA_READ_P(dma_addr)); 329 SCSI_DMA_READ_P(dma_addr));
333 printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); 330 printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!");
@@ -344,8 +341,7 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy)
344 * data reg! 341 * data reg!
345 */ 342 */
346 if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { 343 if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
347 atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P( dma_addr ) - 344 atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
348 atari_dma_startaddr);
349 345
350 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", 346 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
351 atari_dma_residual); 347 atari_dma_residual);
@@ -353,28 +349,30 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy)
353 if ((signed int)atari_dma_residual < 0) 349 if ((signed int)atari_dma_residual < 0)
354 atari_dma_residual = 0; 350 atari_dma_residual = 0;
355 if ((dma_stat & 1) == 0) { 351 if ((dma_stat & 1) == 0) {
356 /* After read operations, we maybe have to 352 /*
357 transport some rest bytes */ 353 * After read operations, we maybe have to
354 * transport some rest bytes
355 */
358 atari_scsi_fetch_restbytes(); 356 atari_scsi_fetch_restbytes();
359 } 357 } else {
360 else { 358 /*
361 /* There seems to be a nasty bug in some SCSI-DMA/NCR 359 * There seems to be a nasty bug in some SCSI-DMA/NCR
362 combinations: If a target disconnects while a write 360 * combinations: If a target disconnects while a write
363 operation is going on, the address register of the 361 * operation is going on, the address register of the
364 DMA may be a few bytes farer than it actually read. 362 * DMA may be a few bytes farer than it actually read.
365 This is probably due to DMA prefetching and a delay 363 * This is probably due to DMA prefetching and a delay
366 between DMA and NCR. Experiments showed that the 364 * between DMA and NCR. Experiments showed that the
367 dma_addr is 9 bytes to high, but this could vary. 365 * dma_addr is 9 bytes to high, but this could vary.
368 The problem is, that the residual is thus calculated 366 * The problem is, that the residual is thus calculated
369 wrong and the next transfer will start behind where 367 * wrong and the next transfer will start behind where
370 it should. So we round up the residual to the next 368 * it should. So we round up the residual to the next
371 multiple of a sector size, if it isn't already a 369 * multiple of a sector size, if it isn't already a
372 multiple and the originally expected transfer size 370 * multiple and the originally expected transfer size
373 was. The latter condition is there to ensure that 371 * was. The latter condition is there to ensure that
374 the correction is taken only for "real" data 372 * the correction is taken only for "real" data
375 transfers and not for, e.g., the parameters of some 373 * transfers and not for, e.g., the parameters of some
376 other command. These shouldn't disconnect anyway. 374 * other command. These shouldn't disconnect anyway.
377 */ 375 */
378 if (atari_dma_residual & 0x1ff) { 376 if (atari_dma_residual & 0x1ff) {
379 DMA_PRINTK("SCSI DMA: DMA bug corrected, " 377 DMA_PRINTK("SCSI DMA: DMA bug corrected, "
380 "difference %ld bytes\n", 378 "difference %ld bytes\n",
@@ -394,18 +392,18 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy)
394 } 392 }
395 393
396#endif /* REAL_DMA */ 394#endif /* REAL_DMA */
397 395
398 NCR5380_intr (0, 0, 0); 396 NCR5380_intr(0, 0);
399 397
400#if 0 398#if 0
401 /* To be sure the int is not masked */ 399 /* To be sure the int is not masked */
402 atari_enable_irq( IRQ_TT_MFP_SCSI ); 400 atari_enable_irq(IRQ_TT_MFP_SCSI);
403#endif 401#endif
404 return IRQ_HANDLED; 402 return IRQ_HANDLED;
405} 403}
406 404
407 405
408static irqreturn_t scsi_falcon_intr (int irq, void *dummy) 406static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
409{ 407{
410#ifdef REAL_DMA 408#ifdef REAL_DMA
411 int dma_stat; 409 int dma_stat;
@@ -430,7 +428,7 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy)
430 * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) 428 * bytes are stuck in the ST-DMA fifo (there's no way to reach them!)
431 */ 429 */
432 if (atari_dma_active && (dma_stat & 0x02)) { 430 if (atari_dma_active && (dma_stat & 0x02)) {
433 unsigned long transferred; 431 unsigned long transferred;
434 432
435 transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; 433 transferred = SCSI_DMA_GETADR() - atari_dma_startaddr;
436 /* The ST-DMA address is incremented in 2-byte steps, but the 434 /* The ST-DMA address is incremented in 2-byte steps, but the
@@ -445,8 +443,7 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy)
445 atari_dma_residual = HOSTDATA_DMALEN - transferred; 443 atari_dma_residual = HOSTDATA_DMALEN - transferred;
446 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", 444 DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
447 atari_dma_residual); 445 atari_dma_residual);
448 } 446 } else
449 else
450 atari_dma_residual = 0; 447 atari_dma_residual = 0;
451 atari_dma_active = 0; 448 atari_dma_active = 0;
452 449
@@ -461,13 +458,13 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy)
461 458
462#endif /* REAL_DMA */ 459#endif /* REAL_DMA */
463 460
464 NCR5380_intr (0, 0, 0); 461 NCR5380_intr(0, 0);
465 return IRQ_HANDLED; 462 return IRQ_HANDLED;
466} 463}
467 464
468 465
469#ifdef REAL_DMA 466#ifdef REAL_DMA
470static void atari_scsi_fetch_restbytes( void ) 467static void atari_scsi_fetch_restbytes(void)
471{ 468{
472 int nr; 469 int nr;
473 char *src, *dst; 470 char *src, *dst;
@@ -505,19 +502,17 @@ static int falcon_dont_release = 0;
505 * again (but others waiting longer more probably will win). 502 * again (but others waiting longer more probably will win).
506 */ 503 */
507 504
508static void 505static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata)
509falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata )
510{ 506{
511 unsigned long flags; 507 unsigned long flags;
512 508
513 if (IS_A_TT()) return; 509 if (IS_A_TT())
514 510 return;
511
515 local_irq_save(flags); 512 local_irq_save(flags);
516 513
517 if (falcon_got_lock && 514 if (falcon_got_lock && !hostdata->disconnected_queue &&
518 !hostdata->disconnected_queue && 515 !hostdata->issue_queue && !hostdata->connected) {
519 !hostdata->issue_queue &&
520 !hostdata->connected) {
521 516
522 if (falcon_dont_release) { 517 if (falcon_dont_release) {
523#if 0 518#if 0
@@ -528,7 +523,7 @@ falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata )
528 } 523 }
529 falcon_got_lock = 0; 524 falcon_got_lock = 0;
530 stdma_release(); 525 stdma_release();
531 wake_up( &falcon_fairness_wait ); 526 wake_up(&falcon_fairness_wait);
532 } 527 }
533 528
534 local_irq_restore(flags); 529 local_irq_restore(flags);
@@ -549,31 +544,31 @@ falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata )
549 * Complicated, complicated.... Sigh... 544 * Complicated, complicated.... Sigh...
550 */ 545 */
551 546
552static void falcon_get_lock( void ) 547static void falcon_get_lock(void)
553{ 548{
554 unsigned long flags; 549 unsigned long flags;
555 550
556 if (IS_A_TT()) return; 551 if (IS_A_TT())
552 return;
557 553
558 local_irq_save(flags); 554 local_irq_save(flags);
559 555
560 while( !in_interrupt() && falcon_got_lock && stdma_others_waiting() ) 556 while (!in_irq() && falcon_got_lock && stdma_others_waiting())
561 sleep_on( &falcon_fairness_wait ); 557 sleep_on(&falcon_fairness_wait);
562 558
563 while (!falcon_got_lock) { 559 while (!falcon_got_lock) {
564 if (in_interrupt()) 560 if (in_irq())
565 panic( "Falcon SCSI hasn't ST-DMA lock in interrupt" ); 561 panic("Falcon SCSI hasn't ST-DMA lock in interrupt");
566 if (!falcon_trying_lock) { 562 if (!falcon_trying_lock) {
567 falcon_trying_lock = 1; 563 falcon_trying_lock = 1;
568 stdma_lock(scsi_falcon_intr, NULL); 564 stdma_lock(scsi_falcon_intr, NULL);
569 falcon_got_lock = 1; 565 falcon_got_lock = 1;
570 falcon_trying_lock = 0; 566 falcon_trying_lock = 0;
571 wake_up( &falcon_try_wait ); 567 wake_up(&falcon_try_wait);
572 } 568 } else {
573 else { 569 sleep_on(&falcon_try_wait);
574 sleep_on( &falcon_try_wait );
575 } 570 }
576 } 571 }
577 572
578 local_irq_restore(flags); 573 local_irq_restore(flags);
579 if (!falcon_got_lock) 574 if (!falcon_got_lock)
@@ -587,18 +582,18 @@ static void falcon_get_lock( void )
587 */ 582 */
588 583
589#if 0 584#if 0
590int atari_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) 585int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
591{ 586{
592 /* falcon_get_lock(); 587 /* falcon_get_lock();
593 * ++guenther: moved to NCR5380_queue_command() to prevent 588 * ++guenther: moved to NCR5380_queue_command() to prevent
594 * race condition, see there for an explanation. 589 * race condition, see there for an explanation.
595 */ 590 */
596 return( NCR5380_queue_command( cmd, done ) ); 591 return NCR5380_queue_command(cmd, done);
597} 592}
598#endif 593#endif
599 594
600 595
601int atari_scsi_detect (struct scsi_host_template *host) 596int atari_scsi_detect(struct scsi_host_template *host)
602{ 597{
603 static int called = 0; 598 static int called = 0;
604 struct Scsi_Host *instance; 599 struct Scsi_Host *instance;
@@ -606,7 +601,7 @@ int atari_scsi_detect (struct scsi_host_template *host)
606 if (!MACH_IS_ATARI || 601 if (!MACH_IS_ATARI ||
607 (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || 602 (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) ||
608 called) 603 called)
609 return( 0 ); 604 return 0;
610 605
611 host->proc_name = "Atari"; 606 host->proc_name = "Atari";
612 607
@@ -655,32 +650,33 @@ int atari_scsi_detect (struct scsi_host_template *host)
655 !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { 650 !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) {
656 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); 651 atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
657 if (!atari_dma_buffer) { 652 if (!atari_dma_buffer) {
658 printk( KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " 653 printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM "
659 "double buffer\n" ); 654 "double buffer\n");
660 return( 0 ); 655 return 0;
661 } 656 }
662 atari_dma_phys_buffer = virt_to_phys( atari_dma_buffer ); 657 atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer);
663 atari_dma_orig_addr = 0; 658 atari_dma_orig_addr = 0;
664 } 659 }
665#endif 660#endif
666 instance = scsi_register (host, sizeof (struct NCR5380_hostdata)); 661 instance = scsi_register(host, sizeof(struct NCR5380_hostdata));
667 if(instance == NULL) 662 if (instance == NULL) {
668 {
669 atari_stram_free(atari_dma_buffer); 663 atari_stram_free(atari_dma_buffer);
670 atari_dma_buffer = 0; 664 atari_dma_buffer = 0;
671 return 0; 665 return 0;
672 } 666 }
673 atari_scsi_host = instance; 667 atari_scsi_host = instance;
674 /* Set irq to 0, to avoid that the mid-level code disables our interrupt 668 /*
675 * during queue_command calls. This is completely unnecessary, and even 669 * Set irq to 0, to avoid that the mid-level code disables our interrupt
676 * worse causes bad problems on the Falcon, where the int is shared with 670 * during queue_command calls. This is completely unnecessary, and even
677 * IDE and floppy! */ 671 * worse causes bad problems on the Falcon, where the int is shared with
672 * IDE and floppy!
673 */
678 instance->irq = 0; 674 instance->irq = 0;
679 675
680#ifdef CONFIG_ATARI_SCSI_RESET_BOOT 676#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
681 atari_scsi_reset_boot(); 677 atari_scsi_reset_boot();
682#endif 678#endif
683 NCR5380_init (instance, 0); 679 NCR5380_init(instance, 0);
684 680
685 if (IS_A_TT()) { 681 if (IS_A_TT()) {
686 682
@@ -727,11 +723,10 @@ int atari_scsi_detect (struct scsi_host_template *host)
727 * the rest data bug is fixed, this can be lowered to 1. 723 * the rest data bug is fixed, this can be lowered to 1.
728 */ 724 */
729 atari_read_overruns = 4; 725 atari_read_overruns = 4;
730 } 726 }
731#endif /*REAL_DMA*/ 727#endif /*REAL_DMA*/
732 } 728 } else { /* ! IS_A_TT */
733 else { /* ! IS_A_TT */ 729
734
735 /* Nothing to do for the interrupt: the ST-DMA is initialized 730 /* Nothing to do for the interrupt: the ST-DMA is initialized
736 * already by atari_init_INTS() 731 * already by atari_init_INTS()
737 */ 732 */
@@ -756,23 +751,21 @@ int atari_scsi_detect (struct scsi_host_template *host)
756 setup_use_tagged_queuing ? "yes" : "no", 751 setup_use_tagged_queuing ? "yes" : "no",
757#endif 752#endif
758 instance->hostt->this_id ); 753 instance->hostt->this_id );
759 NCR5380_print_options (instance); 754 NCR5380_print_options(instance);
760 printk ("\n"); 755 printk("\n");
761 756
762 called = 1; 757 called = 1;
763 return( 1 ); 758 return 1;
764} 759}
765 760
766#ifdef MODULE 761int atari_scsi_release(struct Scsi_Host *sh)
767int atari_scsi_release (struct Scsi_Host *sh)
768{ 762{
769 if (IS_A_TT()) 763 if (IS_A_TT())
770 free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); 764 free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr);
771 if (atari_dma_buffer) 765 if (atari_dma_buffer)
772 atari_stram_free (atari_dma_buffer); 766 atari_stram_free(atari_dma_buffer);
773 return 1; 767 return 1;
774} 768}
775#endif
776 769
777void __init atari_scsi_setup(char *str, int *ints) 770void __init atari_scsi_setup(char *str, int *ints)
778{ 771{
@@ -781,9 +774,9 @@ void __init atari_scsi_setup(char *str, int *ints)
781 * Defaults depend on TT or Falcon, hostid determined at run time. 774 * Defaults depend on TT or Falcon, hostid determined at run time.
782 * Negative values mean don't change. 775 * Negative values mean don't change.
783 */ 776 */
784 777
785 if (ints[0] < 1) { 778 if (ints[0] < 1) {
786 printk( "atari_scsi_setup: no arguments!\n" ); 779 printk("atari_scsi_setup: no arguments!\n");
787 return; 780 return;
788 } 781 }
789 782
@@ -809,7 +802,7 @@ void __init atari_scsi_setup(char *str, int *ints)
809 if (ints[4] >= 0 && ints[4] <= 7) 802 if (ints[4] >= 0 && ints[4] <= 7)
810 setup_hostid = ints[4]; 803 setup_hostid = ints[4];
811 else if (ints[4] > 7) 804 else if (ints[4] > 7)
812 printk( "atari_scsi_setup: invalid host ID %d !\n", ints[4] ); 805 printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]);
813 } 806 }
814#ifdef SUPPORT_TAGS 807#ifdef SUPPORT_TAGS
815 if (ints[0] >= 5) { 808 if (ints[0] >= 5) {
@@ -821,7 +814,7 @@ void __init atari_scsi_setup(char *str, int *ints)
821 814
822int atari_scsi_bus_reset(Scsi_Cmnd *cmd) 815int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
823{ 816{
824 int rv; 817 int rv;
825 struct NCR5380_hostdata *hostdata = 818 struct NCR5380_hostdata *hostdata =
826 (struct NCR5380_hostdata *)cmd->device->host->hostdata; 819 (struct NCR5380_hostdata *)cmd->device->host->hostdata;
827 820
@@ -831,13 +824,12 @@ int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
831 */ 824 */
832 /* And abort a maybe active DMA transfer */ 825 /* And abort a maybe active DMA transfer */
833 if (IS_A_TT()) { 826 if (IS_A_TT()) {
834 atari_turnoff_irq( IRQ_TT_MFP_SCSI ); 827 atari_turnoff_irq(IRQ_TT_MFP_SCSI);
835#ifdef REAL_DMA 828#ifdef REAL_DMA
836 tt_scsi_dma.dma_ctrl = 0; 829 tt_scsi_dma.dma_ctrl = 0;
837#endif /* REAL_DMA */ 830#endif /* REAL_DMA */
838 } 831 } else {
839 else { 832 atari_turnoff_irq(IRQ_MFP_FSCSI);
840 atari_turnoff_irq( IRQ_MFP_FSCSI );
841#ifdef REAL_DMA 833#ifdef REAL_DMA
842 st_dma.dma_mode_status = 0x90; 834 st_dma.dma_mode_status = 0x90;
843 atari_dma_active = 0; 835 atari_dma_active = 0;
@@ -849,52 +841,51 @@ int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
849 841
850 /* Re-enable ints */ 842 /* Re-enable ints */
851 if (IS_A_TT()) { 843 if (IS_A_TT()) {
852 atari_turnon_irq( IRQ_TT_MFP_SCSI ); 844 atari_turnon_irq(IRQ_TT_MFP_SCSI);
853 } 845 } else {
854 else { 846 atari_turnon_irq(IRQ_MFP_FSCSI);
855 atari_turnon_irq( IRQ_MFP_FSCSI );
856 } 847 }
857 if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) 848 if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS)
858 falcon_release_lock_if_possible(hostdata); 849 falcon_release_lock_if_possible(hostdata);
859 850
860 return( rv ); 851 return rv;
861} 852}
862 853
863 854
864#ifdef CONFIG_ATARI_SCSI_RESET_BOOT 855#ifdef CONFIG_ATARI_SCSI_RESET_BOOT
865static void __init atari_scsi_reset_boot(void) 856static void __init atari_scsi_reset_boot(void)
866{ 857{
867 unsigned long end; 858 unsigned long end;
868 859
869 /* 860 /*
870 * Do a SCSI reset to clean up the bus during initialization. No messing 861 * Do a SCSI reset to clean up the bus during initialization. No messing
871 * with the queues, interrupts, or locks necessary here. 862 * with the queues, interrupts, or locks necessary here.
872 */ 863 */
873 864
874 printk( "Atari SCSI: resetting the SCSI bus..." ); 865 printk("Atari SCSI: resetting the SCSI bus...");
875 866
876 /* get in phase */ 867 /* get in phase */
877 NCR5380_write( TARGET_COMMAND_REG, 868 NCR5380_write(TARGET_COMMAND_REG,
878 PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); 869 PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG)));
879 870
880 /* assert RST */ 871 /* assert RST */
881 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); 872 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
882 /* The min. reset hold time is 25us, so 40us should be enough */ 873 /* The min. reset hold time is 25us, so 40us should be enough */
883 udelay( 50 ); 874 udelay(50);
884 /* reset RST and interrupt */ 875 /* reset RST and interrupt */
885 NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); 876 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
886 NCR5380_read( RESET_PARITY_INTERRUPT_REG ); 877 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
887 878
888 end = jiffies + AFTER_RESET_DELAY; 879 end = jiffies + AFTER_RESET_DELAY;
889 while (time_before(jiffies, end)) 880 while (time_before(jiffies, end))
890 barrier(); 881 barrier();
891 882
892 printk( " done\n" ); 883 printk(" done\n");
893} 884}
894#endif 885#endif
895 886
896 887
897const char * atari_scsi_info (struct Scsi_Host *host) 888const char *atari_scsi_info(struct Scsi_Host *host)
898{ 889{
899 /* atari_scsi_detect() is verbose enough... */ 890 /* atari_scsi_detect() is verbose enough... */
900 static const char string[] = "Atari native SCSI"; 891 static const char string[] = "Atari native SCSI";
@@ -904,10 +895,10 @@ const char * atari_scsi_info (struct Scsi_Host *host)
904 895
905#if defined(REAL_DMA) 896#if defined(REAL_DMA)
906 897
907unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data, 898unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data,
908 unsigned long count, int dir ) 899 unsigned long count, int dir)
909{ 900{
910 unsigned long addr = virt_to_phys( data ); 901 unsigned long addr = virt_to_phys(data);
911 902
912 DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " 903 DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
913 "dir = %d\n", instance->host_no, data, addr, count, dir); 904 "dir = %d\n", instance->host_no, data, addr, count, dir);
@@ -919,38 +910,37 @@ unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data,
919 * wanted address. 910 * wanted address.
920 */ 911 */
921 if (dir) 912 if (dir)
922 memcpy( atari_dma_buffer, data, count ); 913 memcpy(atari_dma_buffer, data, count);
923 else 914 else
924 atari_dma_orig_addr = data; 915 atari_dma_orig_addr = data;
925 addr = atari_dma_phys_buffer; 916 addr = atari_dma_phys_buffer;
926 } 917 }
927 918
928 atari_dma_startaddr = addr; /* Needed for calculating residual later. */ 919 atari_dma_startaddr = addr; /* Needed for calculating residual later. */
929 920
930 /* Cache cleanup stuff: On writes, push any dirty cache out before sending 921 /* Cache cleanup stuff: On writes, push any dirty cache out before sending
931 * it to the peripheral. (Must be done before DMA setup, since at least 922 * it to the peripheral. (Must be done before DMA setup, since at least
932 * the ST-DMA begins to fill internal buffers right after setup. For 923 * the ST-DMA begins to fill internal buffers right after setup. For
933 * reads, invalidate any cache, may be altered after DMA without CPU 924 * reads, invalidate any cache, may be altered after DMA without CPU
934 * knowledge. 925 * knowledge.
935 * 926 *
936 * ++roman: For the Medusa, there's no need at all for that cache stuff, 927 * ++roman: For the Medusa, there's no need at all for that cache stuff,
937 * because the hardware does bus snooping (fine!). 928 * because the hardware does bus snooping (fine!).
938 */ 929 */
939 dma_cache_maintenance( addr, count, dir ); 930 dma_cache_maintenance(addr, count, dir);
940 931
941 if (count == 0) 932 if (count == 0)
942 printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); 933 printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n");
943 934
944 if (IS_A_TT()) { 935 if (IS_A_TT()) {
945 tt_scsi_dma.dma_ctrl = dir; 936 tt_scsi_dma.dma_ctrl = dir;
946 SCSI_DMA_WRITE_P( dma_addr, addr ); 937 SCSI_DMA_WRITE_P(dma_addr, addr);
947 SCSI_DMA_WRITE_P( dma_cnt, count ); 938 SCSI_DMA_WRITE_P(dma_cnt, count);
948 tt_scsi_dma.dma_ctrl = dir | 2; 939 tt_scsi_dma.dma_ctrl = dir | 2;
949 } 940 } else { /* ! IS_A_TT */
950 else { /* ! IS_A_TT */ 941
951
952 /* set address */ 942 /* set address */
953 SCSI_DMA_SETADR( addr ); 943 SCSI_DMA_SETADR(addr);
954 944
955 /* toggle direction bit to clear FIFO and set DMA direction */ 945 /* toggle direction bit to clear FIFO and set DMA direction */
956 dir <<= 8; 946 dir <<= 8;
@@ -968,13 +958,13 @@ unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data,
968 atari_dma_active = 1; 958 atari_dma_active = 1;
969 } 959 }
970 960
971 return( count ); 961 return count;
972} 962}
973 963
974 964
975static long atari_scsi_dma_residual( struct Scsi_Host *instance ) 965static long atari_scsi_dma_residual(struct Scsi_Host *instance)
976{ 966{
977 return( atari_dma_residual ); 967 return atari_dma_residual;
978} 968}
979 969
980 970
@@ -982,13 +972,13 @@ static long atari_scsi_dma_residual( struct Scsi_Host *instance )
982#define CMD_SURELY_BYTE_MODE 1 972#define CMD_SURELY_BYTE_MODE 1
983#define CMD_MODE_UNKNOWN 2 973#define CMD_MODE_UNKNOWN 2
984 974
985static int falcon_classify_cmd( Scsi_Cmnd *cmd ) 975static int falcon_classify_cmd(Scsi_Cmnd *cmd)
986{ 976{
987 unsigned char opcode = cmd->cmnd[0]; 977 unsigned char opcode = cmd->cmnd[0];
988 978
989 if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || 979 if (opcode == READ_DEFECT_DATA || opcode == READ_LONG ||
990 opcode == READ_BUFFER) 980 opcode == READ_BUFFER)
991 return( CMD_SURELY_BYTE_MODE ); 981 return CMD_SURELY_BYTE_MODE;
992 else if (opcode == READ_6 || opcode == READ_10 || 982 else if (opcode == READ_6 || opcode == READ_10 ||
993 opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || 983 opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE ||
994 opcode == RECOVER_BUFFERED_DATA) { 984 opcode == RECOVER_BUFFERED_DATA) {
@@ -996,12 +986,11 @@ static int falcon_classify_cmd( Scsi_Cmnd *cmd )
996 * needed here: The transfer is block-mode only if the 'fixed' bit is 986 * needed here: The transfer is block-mode only if the 'fixed' bit is
997 * set! */ 987 * set! */
998 if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) 988 if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1))
999 return( CMD_SURELY_BYTE_MODE ); 989 return CMD_SURELY_BYTE_MODE;
1000 else 990 else
1001 return( CMD_SURELY_BLOCK_MODE ); 991 return CMD_SURELY_BLOCK_MODE;
1002 } 992 } else
1003 else 993 return CMD_MODE_UNKNOWN;
1004 return( CMD_MODE_UNKNOWN );
1005} 994}
1006 995
1007 996
@@ -1014,19 +1003,18 @@ static int falcon_classify_cmd( Scsi_Cmnd *cmd )
1014 * the overrun problem, so this question is academic :-) 1003 * the overrun problem, so this question is academic :-)
1015 */ 1004 */
1016 1005
1017static unsigned long atari_dma_xfer_len( unsigned long wanted_len, 1006static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
1018 Scsi_Cmnd *cmd, 1007 Scsi_Cmnd *cmd, int write_flag)
1019 int write_flag )
1020{ 1008{
1021 unsigned long possible_len, limit; 1009 unsigned long possible_len, limit;
1022#ifndef CONFIG_TT_DMA_EMUL 1010#ifndef CONFIG_TT_DMA_EMUL
1023 if (MACH_IS_HADES) 1011 if (MACH_IS_HADES)
1024 /* Hades has no SCSI DMA at all :-( Always force use of PIO */ 1012 /* Hades has no SCSI DMA at all :-( Always force use of PIO */
1025 return( 0 ); 1013 return 0;
1026#endif 1014#endif
1027 if (IS_A_TT()) 1015 if (IS_A_TT())
1028 /* TT SCSI DMA can transfer arbitrary #bytes */ 1016 /* TT SCSI DMA can transfer arbitrary #bytes */
1029 return( wanted_len ); 1017 return wanted_len;
1030 1018
1031 /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. 1019 /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max.
1032 * 255*512 bytes, but this should be enough) 1020 * 255*512 bytes, but this should be enough)
@@ -1062,8 +1050,7 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len,
1062 * this). 1050 * this).
1063 */ 1051 */
1064 possible_len = wanted_len; 1052 possible_len = wanted_len;
1065 } 1053 } else {
1066 else {
1067 /* Read operations: if the wanted transfer length is not a multiple of 1054 /* Read operations: if the wanted transfer length is not a multiple of
1068 * 512, we cannot use DMA, since the ST-DMA cannot split transfers 1055 * 512, we cannot use DMA, since the ST-DMA cannot split transfers
1069 * (no interrupt on DMA finished!) 1056 * (no interrupt on DMA finished!)
@@ -1073,15 +1060,15 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len,
1073 else { 1060 else {
1074 /* Now classify the command (see above) and decide whether it is 1061 /* Now classify the command (see above) and decide whether it is
1075 * allowed to do DMA at all */ 1062 * allowed to do DMA at all */
1076 switch( falcon_classify_cmd( cmd )) { 1063 switch (falcon_classify_cmd(cmd)) {
1077 case CMD_SURELY_BLOCK_MODE: 1064 case CMD_SURELY_BLOCK_MODE:
1078 possible_len = wanted_len; 1065 possible_len = wanted_len;
1079 break; 1066 break;
1080 case CMD_SURELY_BYTE_MODE: 1067 case CMD_SURELY_BYTE_MODE:
1081 possible_len = 0; /* DMA prohibited */ 1068 possible_len = 0; /* DMA prohibited */
1082 break; 1069 break;
1083 case CMD_MODE_UNKNOWN: 1070 case CMD_MODE_UNKNOWN:
1084 default: 1071 default:
1085 /* For unknown commands assume block transfers if the transfer 1072 /* For unknown commands assume block transfers if the transfer
1086 * size/allocation length is >= 1024 */ 1073 * size/allocation length is >= 1024 */
1087 possible_len = (wanted_len < 1024) ? 0 : wanted_len; 1074 possible_len = (wanted_len < 1024) ? 0 : wanted_len;
@@ -1089,9 +1076,9 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len,
1089 } 1076 }
1090 } 1077 }
1091 } 1078 }
1092 1079
1093 /* Last step: apply the hard limit on DMA transfers */ 1080 /* Last step: apply the hard limit on DMA transfers */
1094 limit = (atari_dma_buffer && !STRAM_ADDR( virt_to_phys(cmd->SCp.ptr) )) ? 1081 limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
1095 STRAM_BUFFER_SIZE : 255*512; 1082 STRAM_BUFFER_SIZE : 255*512;
1096 if (possible_len > limit) 1083 if (possible_len > limit)
1097 possible_len = limit; 1084 possible_len = limit;
@@ -1100,7 +1087,7 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len,
1100 DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " 1087 DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes "
1101 "instead of %ld\n", possible_len, wanted_len); 1088 "instead of %ld\n", possible_len, wanted_len);
1102 1089
1103 return( possible_len ); 1090 return possible_len;
1104} 1091}
1105 1092
1106 1093
@@ -1114,23 +1101,23 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len,
1114 * NCR5380_write call these functions via function pointers. 1101 * NCR5380_write call these functions via function pointers.
1115 */ 1102 */
1116 1103
1117static unsigned char atari_scsi_tt_reg_read( unsigned char reg ) 1104static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
1118{ 1105{
1119 return( tt_scsi_regp[reg * 2] ); 1106 return tt_scsi_regp[reg * 2];
1120} 1107}
1121 1108
1122static void atari_scsi_tt_reg_write( unsigned char reg, unsigned char value ) 1109static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
1123{ 1110{
1124 tt_scsi_regp[reg * 2] = value; 1111 tt_scsi_regp[reg * 2] = value;
1125} 1112}
1126 1113
1127static unsigned char atari_scsi_falcon_reg_read( unsigned char reg ) 1114static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
1128{ 1115{
1129 dma_wd.dma_mode_status= (u_short)(0x88 + reg); 1116 dma_wd.dma_mode_status= (u_short)(0x88 + reg);
1130 return( (u_char)dma_wd.fdc_acces_seccount ); 1117 return (u_char)dma_wd.fdc_acces_seccount;
1131} 1118}
1132 1119
1133static void atari_scsi_falcon_reg_write( unsigned char reg, unsigned char value ) 1120static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
1134{ 1121{
1135 dma_wd.dma_mode_status = (u_short)(0x88 + reg); 1122 dma_wd.dma_mode_status = (u_short)(0x88 + reg);
1136 dma_wd.fdc_acces_seccount = (u_short)value; 1123 dma_wd.fdc_acces_seccount = (u_short)value;
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index f917bdd09b41..efadb8d567c2 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -21,11 +21,7 @@
21int atari_scsi_detect (struct scsi_host_template *); 21int atari_scsi_detect (struct scsi_host_template *);
22const char *atari_scsi_info (struct Scsi_Host *); 22const char *atari_scsi_info (struct Scsi_Host *);
23int atari_scsi_reset (Scsi_Cmnd *, unsigned int); 23int atari_scsi_reset (Scsi_Cmnd *, unsigned int);
24#ifdef MODULE
25int atari_scsi_release (struct Scsi_Host *); 24int atari_scsi_release (struct Scsi_Host *);
26#else
27#define atari_scsi_release NULL
28#endif
29 25
30/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher 26/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher
31 * values should work, too; try it! (but cmd_per_lun costs memory!) */ 27 * values should work, too; try it! (but cmd_per_lun costs memory!) */
@@ -63,6 +59,32 @@ int atari_scsi_release (struct Scsi_Host *);
63#define NCR5380_dma_xfer_len(i,cmd,phase) \ 59#define NCR5380_dma_xfer_len(i,cmd,phase) \
64 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) 60 atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
65 61
62/* former generic SCSI error handling stuff */
63
64#define SCSI_ABORT_SNOOZE 0
65#define SCSI_ABORT_SUCCESS 1
66#define SCSI_ABORT_PENDING 2
67#define SCSI_ABORT_BUSY 3
68#define SCSI_ABORT_NOT_RUNNING 4
69#define SCSI_ABORT_ERROR 5
70
71#define SCSI_RESET_SNOOZE 0
72#define SCSI_RESET_PUNT 1
73#define SCSI_RESET_SUCCESS 2
74#define SCSI_RESET_PENDING 3
75#define SCSI_RESET_WAKEUP 4
76#define SCSI_RESET_NOT_RUNNING 5
77#define SCSI_RESET_ERROR 6
78
79#define SCSI_RESET_SYNCHRONOUS 0x01
80#define SCSI_RESET_ASYNCHRONOUS 0x02
81#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
82#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
83
84#define SCSI_RESET_BUS_RESET 0x100
85#define SCSI_RESET_HOST_RESET 0x200
86#define SCSI_RESET_ACTION 0xff
87
66/* Debugging printk definitions: 88/* Debugging printk definitions:
67 * 89 *
68 * ARB -> arbitration 90 * ARB -> arbitration
@@ -91,144 +113,58 @@ int atari_scsi_release (struct Scsi_Host *);
91 * 113 *
92 */ 114 */
93 115
94#if NDEBUG & NDEBUG_ARBITRATION 116#define dprint(flg, format...) \
117({ \
118 if (NDEBUG & (flg)) \
119 printk(KERN_DEBUG format); \
120})
121
95#define ARB_PRINTK(format, args...) \ 122#define ARB_PRINTK(format, args...) \
96 printk(KERN_DEBUG format , ## args) 123 dprint(NDEBUG_ARBITRATION, format , ## args)
97#else
98#define ARB_PRINTK(format, args...)
99#endif
100#if NDEBUG & NDEBUG_AUTOSENSE
101#define ASEN_PRINTK(format, args...) \ 124#define ASEN_PRINTK(format, args...) \
102 printk(KERN_DEBUG format , ## args) 125 dprint(NDEBUG_AUTOSENSE, format , ## args)
103#else
104#define ASEN_PRINTK(format, args...)
105#endif
106#if NDEBUG & NDEBUG_DMA
107#define DMA_PRINTK(format, args...) \ 126#define DMA_PRINTK(format, args...) \
108 printk(KERN_DEBUG format , ## args) 127 dprint(NDEBUG_DMA, format , ## args)
109#else
110#define DMA_PRINTK(format, args...)
111#endif
112#if NDEBUG & NDEBUG_HANDSHAKE
113#define HSH_PRINTK(format, args...) \ 128#define HSH_PRINTK(format, args...) \
114 printk(KERN_DEBUG format , ## args) 129 dprint(NDEBUG_HANDSHAKE, format , ## args)
115#else
116#define HSH_PRINTK(format, args...)
117#endif
118#if NDEBUG & NDEBUG_INFORMATION
119#define INF_PRINTK(format, args...) \ 130#define INF_PRINTK(format, args...) \
120 printk(KERN_DEBUG format , ## args) 131 dprint(NDEBUG_INFORMATION, format , ## args)
121#else
122#define INF_PRINTK(format, args...)
123#endif
124#if NDEBUG & NDEBUG_INIT
125#define INI_PRINTK(format, args...) \ 132#define INI_PRINTK(format, args...) \
126 printk(KERN_DEBUG format , ## args) 133 dprint(NDEBUG_INIT, format , ## args)
127#else
128#define INI_PRINTK(format, args...)
129#endif
130#if NDEBUG & NDEBUG_INTR
131#define INT_PRINTK(format, args...) \ 134#define INT_PRINTK(format, args...) \
132 printk(KERN_DEBUG format , ## args) 135 dprint(NDEBUG_INTR, format , ## args)
133#else
134#define INT_PRINTK(format, args...)
135#endif
136#if NDEBUG & NDEBUG_LINKED
137#define LNK_PRINTK(format, args...) \ 136#define LNK_PRINTK(format, args...) \
138 printk(KERN_DEBUG format , ## args) 137 dprint(NDEBUG_LINKED, format , ## args)
139#else
140#define LNK_PRINTK(format, args...)
141#endif
142#if NDEBUG & NDEBUG_MAIN
143#define MAIN_PRINTK(format, args...) \ 138#define MAIN_PRINTK(format, args...) \
144 printk(KERN_DEBUG format , ## args) 139 dprint(NDEBUG_MAIN, format , ## args)
145#else
146#define MAIN_PRINTK(format, args...)
147#endif
148#if NDEBUG & NDEBUG_NO_DATAOUT
149#define NDAT_PRINTK(format, args...) \ 140#define NDAT_PRINTK(format, args...) \
150 printk(KERN_DEBUG format , ## args) 141 dprint(NDEBUG_NO_DATAOUT, format , ## args)
151#else
152#define NDAT_PRINTK(format, args...)
153#endif
154#if NDEBUG & NDEBUG_NO_WRITE
155#define NWR_PRINTK(format, args...) \ 142#define NWR_PRINTK(format, args...) \
156 printk(KERN_DEBUG format , ## args) 143 dprint(NDEBUG_NO_WRITE, format , ## args)
157#else
158#define NWR_PRINTK(format, args...)
159#endif
160#if NDEBUG & NDEBUG_PIO
161#define PIO_PRINTK(format, args...) \ 144#define PIO_PRINTK(format, args...) \
162 printk(KERN_DEBUG format , ## args) 145 dprint(NDEBUG_PIO, format , ## args)
163#else
164#define PIO_PRINTK(format, args...)
165#endif
166#if NDEBUG & NDEBUG_PSEUDO_DMA
167#define PDMA_PRINTK(format, args...) \ 146#define PDMA_PRINTK(format, args...) \
168 printk(KERN_DEBUG format , ## args) 147 dprint(NDEBUG_PSEUDO_DMA, format , ## args)
169#else
170#define PDMA_PRINTK(format, args...)
171#endif
172#if NDEBUG & NDEBUG_QUEUES
173#define QU_PRINTK(format, args...) \ 148#define QU_PRINTK(format, args...) \
174 printk(KERN_DEBUG format , ## args) 149 dprint(NDEBUG_QUEUES, format , ## args)
175#else
176#define QU_PRINTK(format, args...)
177#endif
178#if NDEBUG & NDEBUG_RESELECTION
179#define RSL_PRINTK(format, args...) \ 150#define RSL_PRINTK(format, args...) \
180 printk(KERN_DEBUG format , ## args) 151 dprint(NDEBUG_RESELECTION, format , ## args)
181#else
182#define RSL_PRINTK(format, args...)
183#endif
184#if NDEBUG & NDEBUG_SELECTION
185#define SEL_PRINTK(format, args...) \ 152#define SEL_PRINTK(format, args...) \
186 printk(KERN_DEBUG format , ## args) 153 dprint(NDEBUG_SELECTION, format , ## args)
187#else
188#define SEL_PRINTK(format, args...)
189#endif
190#if NDEBUG & NDEBUG_USLEEP
191#define USL_PRINTK(format, args...) \ 154#define USL_PRINTK(format, args...) \
192 printk(KERN_DEBUG format , ## args) 155 dprint(NDEBUG_USLEEP, format , ## args)
193#else
194#define USL_PRINTK(format, args...)
195#endif
196#if NDEBUG & NDEBUG_LAST_BYTE_SENT
197#define LBS_PRINTK(format, args...) \ 156#define LBS_PRINTK(format, args...) \
198 printk(KERN_DEBUG format , ## args) 157 dprint(NDEBUG_LAST_BYTE_SENT, format , ## args)
199#else
200#define LBS_PRINTK(format, args...)
201#endif
202#if NDEBUG & NDEBUG_RESTART_SELECT
203#define RSS_PRINTK(format, args...) \ 158#define RSS_PRINTK(format, args...) \
204 printk(KERN_DEBUG format , ## args) 159 dprint(NDEBUG_RESTART_SELECT, format , ## args)
205#else
206#define RSS_PRINTK(format, args...)
207#endif
208#if NDEBUG & NDEBUG_EXTENDED
209#define EXT_PRINTK(format, args...) \ 160#define EXT_PRINTK(format, args...) \
210 printk(KERN_DEBUG format , ## args) 161 dprint(NDEBUG_EXTENDED, format , ## args)
211#else
212#define EXT_PRINTK(format, args...)
213#endif
214#if NDEBUG & NDEBUG_ABORT
215#define ABRT_PRINTK(format, args...) \ 162#define ABRT_PRINTK(format, args...) \
216 printk(KERN_DEBUG format , ## args) 163 dprint(NDEBUG_ABORT, format , ## args)
217#else
218#define ABRT_PRINTK(format, args...)
219#endif
220#if NDEBUG & NDEBUG_TAGS
221#define TAG_PRINTK(format, args...) \ 164#define TAG_PRINTK(format, args...) \
222 printk(KERN_DEBUG format , ## args) 165 dprint(NDEBUG_TAGS, format , ## args)
223#else
224#define TAG_PRINTK(format, args...)
225#endif
226#if NDEBUG & NDEBUG_MERGING
227#define MER_PRINTK(format, args...) \ 166#define MER_PRINTK(format, args...) \
228 printk(KERN_DEBUG format , ## args) 167 dprint(NDEBUG_MERGING, format , ## args)
229#else
230#define MER_PRINTK(format, args...)
231#endif
232 168
233/* conditional macros for NCR5380_print_{,phase,status} */ 169/* conditional macros for NCR5380_print_{,phase,status} */
234 170
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 61f6024b61ba..2a458d66b6ff 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -202,31 +202,29 @@ static const char * get_sa_name(const struct value_name_pair * arr,
202} 202}
203 203
204/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */ 204/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */
205static void print_opcode_name(unsigned char * cdbp, int cdb_len, 205static void print_opcode_name(unsigned char * cdbp, int cdb_len)
206 int start_of_line)
207{ 206{
208 int sa, len, cdb0; 207 int sa, len, cdb0;
209 const char * name; 208 const char * name;
210 const char * leadin = start_of_line ? KERN_INFO : "";
211 209
212 cdb0 = cdbp[0]; 210 cdb0 = cdbp[0];
213 switch(cdb0) { 211 switch(cdb0) {
214 case VARIABLE_LENGTH_CMD: 212 case VARIABLE_LENGTH_CMD:
215 len = cdbp[7] + 8; 213 len = cdbp[7] + 8;
216 if (len < 10) { 214 if (len < 10) {
217 printk("%sshort variable length command, " 215 printk("short variable length command, "
218 "len=%d ext_len=%d", leadin, len, cdb_len); 216 "len=%d ext_len=%d", len, cdb_len);
219 break; 217 break;
220 } 218 }
221 sa = (cdbp[8] << 8) + cdbp[9]; 219 sa = (cdbp[8] << 8) + cdbp[9];
222 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 220 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
223 if (name) { 221 if (name) {
224 printk("%s%s", leadin, name); 222 printk("%s", name);
225 if ((cdb_len > 0) && (len != cdb_len)) 223 if ((cdb_len > 0) && (len != cdb_len))
226 printk(", in_cdb_len=%d, ext_len=%d", 224 printk(", in_cdb_len=%d, ext_len=%d",
227 len, cdb_len); 225 len, cdb_len);
228 } else { 226 } else {
229 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 227 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
230 if ((cdb_len > 0) && (len != cdb_len)) 228 if ((cdb_len > 0) && (len != cdb_len))
231 printk(", in_cdb_len=%d, ext_len=%d", 229 printk(", in_cdb_len=%d, ext_len=%d",
232 len, cdb_len); 230 len, cdb_len);
@@ -236,83 +234,80 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
236 sa = cdbp[1] & 0x1f; 234 sa = cdbp[1] & 0x1f;
237 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 235 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
238 if (name) 236 if (name)
239 printk("%s%s", leadin, name); 237 printk("%s", name);
240 else 238 else
241 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 239 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
242 break; 240 break;
243 case MAINTENANCE_OUT: 241 case MAINTENANCE_OUT:
244 sa = cdbp[1] & 0x1f; 242 sa = cdbp[1] & 0x1f;
245 name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa); 243 name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa);
246 if (name) 244 if (name)
247 printk("%s%s", leadin, name); 245 printk("%s", name);
248 else 246 else
249 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 247 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
250 break; 248 break;
251 case SERVICE_ACTION_IN_12: 249 case SERVICE_ACTION_IN_12:
252 sa = cdbp[1] & 0x1f; 250 sa = cdbp[1] & 0x1f;
253 name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa); 251 name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa);
254 if (name) 252 if (name)
255 printk("%s%s", leadin, name); 253 printk("%s", name);
256 else 254 else
257 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 255 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
258 break; 256 break;
259 case SERVICE_ACTION_OUT_12: 257 case SERVICE_ACTION_OUT_12:
260 sa = cdbp[1] & 0x1f; 258 sa = cdbp[1] & 0x1f;
261 name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa); 259 name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa);
262 if (name) 260 if (name)
263 printk("%s%s", leadin, name); 261 printk("%s", name);
264 else 262 else
265 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 263 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
266 break; 264 break;
267 case SERVICE_ACTION_IN_16: 265 case SERVICE_ACTION_IN_16:
268 sa = cdbp[1] & 0x1f; 266 sa = cdbp[1] & 0x1f;
269 name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa); 267 name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa);
270 if (name) 268 if (name)
271 printk("%s%s", leadin, name); 269 printk("%s", name);
272 else 270 else
273 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 271 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
274 break; 272 break;
275 case SERVICE_ACTION_OUT_16: 273 case SERVICE_ACTION_OUT_16:
276 sa = cdbp[1] & 0x1f; 274 sa = cdbp[1] & 0x1f;
277 name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa); 275 name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa);
278 if (name) 276 if (name)
279 printk("%s%s", leadin, name); 277 printk("%s", name);
280 else 278 else
281 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 279 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
282 break; 280 break;
283 default: 281 default:
284 if (cdb0 < 0xc0) { 282 if (cdb0 < 0xc0) {
285 name = cdb_byte0_names[cdb0]; 283 name = cdb_byte0_names[cdb0];
286 if (name) 284 if (name)
287 printk("%s%s", leadin, name); 285 printk("%s", name);
288 else 286 else
289 printk("%scdb[0]=0x%x (reserved)", 287 printk("cdb[0]=0x%x (reserved)", cdb0);
290 leadin, cdb0);
291 } else 288 } else
292 printk("%scdb[0]=0x%x (vendor)", leadin, cdb0); 289 printk("cdb[0]=0x%x (vendor)", cdb0);
293 break; 290 break;
294 } 291 }
295} 292}
296 293
297#else /* ifndef CONFIG_SCSI_CONSTANTS */ 294#else /* ifndef CONFIG_SCSI_CONSTANTS */
298 295
299static void print_opcode_name(unsigned char * cdbp, int cdb_len, 296static void print_opcode_name(unsigned char * cdbp, int cdb_len)
300 int start_of_line)
301{ 297{
302 int sa, len, cdb0; 298 int sa, len, cdb0;
303 const char * leadin = start_of_line ? KERN_INFO : "";
304 299
305 cdb0 = cdbp[0]; 300 cdb0 = cdbp[0];
306 switch(cdb0) { 301 switch(cdb0) {
307 case VARIABLE_LENGTH_CMD: 302 case VARIABLE_LENGTH_CMD:
308 len = cdbp[7] + 8; 303 len = cdbp[7] + 8;
309 if (len < 10) { 304 if (len < 10) {
310 printk("%sshort opcode=0x%x command, len=%d " 305 printk("short opcode=0x%x command, len=%d "
311 "ext_len=%d", leadin, cdb0, len, cdb_len); 306 "ext_len=%d", cdb0, len, cdb_len);
312 break; 307 break;
313 } 308 }
314 sa = (cdbp[8] << 8) + cdbp[9]; 309 sa = (cdbp[8] << 8) + cdbp[9];
315 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 310 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
316 if (len != cdb_len) 311 if (len != cdb_len)
317 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); 312 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
318 break; 313 break;
@@ -323,49 +318,48 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
323 case SERVICE_ACTION_IN_16: 318 case SERVICE_ACTION_IN_16:
324 case SERVICE_ACTION_OUT_16: 319 case SERVICE_ACTION_OUT_16:
325 sa = cdbp[1] & 0x1f; 320 sa = cdbp[1] & 0x1f;
326 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 321 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
327 break; 322 break;
328 default: 323 default:
329 if (cdb0 < 0xc0) 324 if (cdb0 < 0xc0)
330 printk("%scdb[0]=0x%x", leadin, cdb0); 325 printk("cdb[0]=0x%x", cdb0);
331 else 326 else
332 printk("%scdb[0]=0x%x (vendor)", leadin, cdb0); 327 printk("cdb[0]=0x%x (vendor)", cdb0);
333 break; 328 break;
334 } 329 }
335} 330}
336#endif 331#endif
337 332
338void __scsi_print_command(unsigned char *command) 333void __scsi_print_command(unsigned char *cdb)
339{ 334{
340 int k, len; 335 int k, len;
341 336
342 print_opcode_name(command, 0, 1); 337 print_opcode_name(cdb, 0);
343 if (VARIABLE_LENGTH_CMD == command[0]) 338 if (VARIABLE_LENGTH_CMD == cdb[0])
344 len = command[7] + 8; 339 len = cdb[7] + 8;
345 else 340 else
346 len = COMMAND_SIZE(command[0]); 341 len = COMMAND_SIZE(cdb[0]);
347 /* print out all bytes in cdb */ 342 /* print out all bytes in cdb */
348 for (k = 0; k < len; ++k) 343 for (k = 0; k < len; ++k)
349 printk(" %02x", command[k]); 344 printk(" %02x", cdb[k]);
350 printk("\n"); 345 printk("\n");
351} 346}
352EXPORT_SYMBOL(__scsi_print_command); 347EXPORT_SYMBOL(__scsi_print_command);
353 348
354/* This function (perhaps with the addition of peripheral device type) 349void scsi_print_command(struct scsi_cmnd *cmd)
355 * is more approriate than __scsi_print_command(). Perhaps that static
356 * can be dropped later if it replaces the __scsi_print_command version.
357 */
358static void scsi_print_cdb(unsigned char *cdb, int cdb_len, int start_of_line)
359{ 350{
360 int k; 351 int k;
361 352
362 print_opcode_name(cdb, cdb_len, start_of_line); 353 scmd_printk(KERN_INFO, cmd, "CDB: ");
354 print_opcode_name(cmd->cmnd, cmd->cmd_len);
355
363 /* print out all bytes in cdb */ 356 /* print out all bytes in cdb */
364 printk(":"); 357 printk(":");
365 for (k = 0; k < cdb_len; ++k) 358 for (k = 0; k < cmd->cmd_len; ++k)
366 printk(" %02x", cdb[k]); 359 printk(" %02x", cmd->cmnd[k]);
367 printk("\n"); 360 printk("\n");
368} 361}
362EXPORT_SYMBOL(scsi_print_command);
369 363
370/** 364/**
371 * 365 *
@@ -410,7 +404,11 @@ struct error_info {
410 const char * text; 404 const char * text;
411}; 405};
412 406
413static struct error_info additional[] = 407/*
408 * The canonical list of T10 Additional Sense Codes is available at:
409 * http://www.t10.org/lists/asc-num.txt
410 */
411static const struct error_info additional[] =
414{ 412{
415 {0x0000, "No additional sense information"}, 413 {0x0000, "No additional sense information"},
416 {0x0001, "Filemark detected"}, 414 {0x0001, "Filemark detected"},
@@ -714,6 +712,7 @@ static struct error_info additional[] =
714 712
715 {0x2F00, "Commands cleared by another initiator"}, 713 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"}, 714 {0x2F01, "Commands cleared by power loss notification"},
715 {0x2F02, "Commands cleared by device server"},
717 716
718 {0x3000, "Incompatible medium installed"}, 717 {0x3000, "Incompatible medium installed"},
719 {0x3001, "Cannot read medium - unknown format"}, 718 {0x3001, "Cannot read medium - unknown format"},
@@ -1176,67 +1175,77 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
1176} 1175}
1177EXPORT_SYMBOL(scsi_extd_sense_format); 1176EXPORT_SYMBOL(scsi_extd_sense_format);
1178 1177
1179/* Print extended sense information; no leadin, no linefeed */ 1178void
1180static void
1181scsi_show_extd_sense(unsigned char asc, unsigned char ascq) 1179scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
1182{ 1180{
1183 const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq); 1181 const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq);
1184 1182
1185 if (extd_sense_fmt) { 1183 if (extd_sense_fmt) {
1186 if (strstr(extd_sense_fmt, "%x")) { 1184 if (strstr(extd_sense_fmt, "%x")) {
1187 printk("Additional sense: "); 1185 printk("Add. Sense: ");
1188 printk(extd_sense_fmt, ascq); 1186 printk(extd_sense_fmt, ascq);
1189 } else 1187 } else
1190 printk("Additional sense: %s", extd_sense_fmt); 1188 printk("Add. Sense: %s", extd_sense_fmt);
1191 } else { 1189 } else {
1192 if (asc >= 0x80) 1190 if (asc >= 0x80)
1193 printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc, ascq); 1191 printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc,
1192 ascq);
1194 if (ascq >= 0x80) 1193 if (ascq >= 0x80)
1195 printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc, ascq); 1194 printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc,
1195 ascq);
1196 else 1196 else
1197 printk("ASC=0x%x ASCQ=0x%x", asc, ascq); 1197 printk("ASC=0x%x ASCQ=0x%x", asc, ascq);
1198 } 1198 }
1199
1200 printk("\n");
1199} 1201}
1202EXPORT_SYMBOL(scsi_show_extd_sense);
1200 1203
1201void 1204void
1202scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr) 1205scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr)
1203{ 1206{
1204 const char *sense_txt; 1207 const char *sense_txt;
1205 /* An example of deferred is when an earlier write to disk cache
1206 * succeeded, but now the disk discovers that it cannot write the
1207 * data to the magnetic media.
1208 */
1209 const char *error = scsi_sense_is_deferred(sshdr) ?
1210 "<<DEFERRED>>" : "Current";
1211 printk(KERN_INFO "%s: %s", name, error);
1212 if (sshdr->response_code >= 0x72)
1213 printk(" [descriptor]");
1214 1208
1215 sense_txt = scsi_sense_key_string(sshdr->sense_key); 1209 sense_txt = scsi_sense_key_string(sshdr->sense_key);
1216 if (sense_txt) 1210 if (sense_txt)
1217 printk(": sense key: %s\n", sense_txt); 1211 printk("Sense Key : %s ", sense_txt);
1218 else 1212 else
1219 printk(": sense key=0x%x\n", sshdr->sense_key); 1213 printk("Sense Key : 0x%x ", sshdr->sense_key);
1220 printk(KERN_INFO " "); 1214
1221 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 1215 printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " :
1216 "[current] ");
1217
1218 if (sshdr->response_code >= 0x72)
1219 printk("[descriptor]");
1220
1222 printk("\n"); 1221 printk("\n");
1223} 1222}
1223EXPORT_SYMBOL(scsi_show_sense_hdr);
1224
1225/*
1226 * Print normalized SCSI sense header with a prefix.
1227 */
1228void
1229scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
1230{
1231 printk(KERN_INFO "%s: ", name);
1232 scsi_show_sense_hdr(sshdr);
1233 printk(KERN_INFO "%s: ", name);
1234 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1235}
1224EXPORT_SYMBOL(scsi_print_sense_hdr); 1236EXPORT_SYMBOL(scsi_print_sense_hdr);
1225 1237
1226/* Print sense information */
1227void 1238void
1228__scsi_print_sense(const char *name, const unsigned char *sense_buffer, 1239scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len,
1229 int sense_len) 1240 struct scsi_sense_hdr *sshdr)
1230{ 1241{
1231 int k, num, res; 1242 int k, num, res;
1232 unsigned int info;
1233 struct scsi_sense_hdr ssh;
1234 1243
1235 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); 1244 res = scsi_normalize_sense(sense_buffer, sense_len, sshdr);
1236 if (0 == res) { 1245 if (0 == res) {
1237 /* this may be SCSI-1 sense data */ 1246 /* this may be SCSI-1 sense data */
1238 num = (sense_len < 32) ? sense_len : 32; 1247 num = (sense_len < 32) ? sense_len : 32;
1239 printk(KERN_INFO "Unrecognized sense data (in hex):"); 1248 printk("Unrecognized sense data (in hex):");
1240 for (k = 0; k < num; ++k) { 1249 for (k = 0; k < num; ++k) {
1241 if (0 == (k % 16)) { 1250 if (0 == (k % 16)) {
1242 printk("\n"); 1251 printk("\n");
@@ -1247,11 +1256,20 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1247 printk("\n"); 1256 printk("\n");
1248 return; 1257 return;
1249 } 1258 }
1250 scsi_print_sense_hdr(name, &ssh); 1259}
1251 if (ssh.response_code < 0x72) { 1260
1261void
1262scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len,
1263 struct scsi_sense_hdr *sshdr)
1264{
1265 int k, num, res;
1266
1267 if (sshdr->response_code < 0x72)
1268 {
1252 /* only decode extras for "fixed" format now */ 1269 /* only decode extras for "fixed" format now */
1253 char buff[80]; 1270 char buff[80];
1254 int blen, fixed_valid; 1271 int blen, fixed_valid;
1272 unsigned int info;
1255 1273
1256 fixed_valid = sense_buffer[0] & 0x80; 1274 fixed_valid = sense_buffer[0] & 0x80;
1257 info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) | 1275 info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) |
@@ -1281,13 +1299,13 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1281 res += snprintf(buff + res, blen - res, "ILI"); 1299 res += snprintf(buff + res, blen - res, "ILI");
1282 } 1300 }
1283 if (res > 0) 1301 if (res > 0)
1284 printk(KERN_INFO "%s\n", buff); 1302 printk("%s\n", buff);
1285 } else if (ssh.additional_length > 0) { 1303 } else if (sshdr->additional_length > 0) {
1286 /* descriptor format with sense descriptors */ 1304 /* descriptor format with sense descriptors */
1287 num = 8 + ssh.additional_length; 1305 num = 8 + sshdr->additional_length;
1288 num = (sense_len < num) ? sense_len : num; 1306 num = (sense_len < num) ? sense_len : num;
1289 printk(KERN_INFO "Descriptor sense data with sense " 1307 printk("Descriptor sense data with sense descriptors "
1290 "descriptors (in hex):"); 1308 "(in hex):");
1291 for (k = 0; k < num; ++k) { 1309 for (k = 0; k < num; ++k) {
1292 if (0 == (k % 16)) { 1310 if (0 == (k % 16)) {
1293 printk("\n"); 1311 printk("\n");
@@ -1295,29 +1313,42 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1295 } 1313 }
1296 printk("%02x ", sense_buffer[k]); 1314 printk("%02x ", sense_buffer[k]);
1297 } 1315 }
1316
1298 printk("\n"); 1317 printk("\n");
1299 } 1318 }
1319
1300} 1320}
1301EXPORT_SYMBOL(__scsi_print_sense);
1302 1321
1303void scsi_print_sense(const char *devclass, struct scsi_cmnd *cmd) 1322/* Normalize and print sense buffer with name prefix */
1323void __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1324 int sense_len)
1304{ 1325{
1305 const char *name = devclass; 1326 struct scsi_sense_hdr sshdr;
1306 1327
1307 if (cmd->request->rq_disk) 1328 printk(KERN_INFO "%s: ", name);
1308 name = cmd->request->rq_disk->disk_name; 1329 scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr);
1309 __scsi_print_sense(name, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); 1330 scsi_show_sense_hdr(&sshdr);
1331 scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr);
1332 printk(KERN_INFO "%s: ", name);
1333 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1310} 1334}
1311EXPORT_SYMBOL(scsi_print_sense); 1335EXPORT_SYMBOL(__scsi_print_sense);
1312 1336
1313void scsi_print_command(struct scsi_cmnd *cmd) 1337/* Normalize and print sense buffer in SCSI command */
1338void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1314{ 1339{
1315 /* Assume appended output (i.e. not at start of line) */ 1340 struct scsi_sense_hdr sshdr;
1316 sdev_printk("", cmd->device, "\n"); 1341
1317 printk(KERN_INFO " command: "); 1342 scmd_printk(KERN_INFO, cmd, "");
1318 scsi_print_cdb(cmd->cmnd, cmd->cmd_len, 0); 1343 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1344 &sshdr);
1345 scsi_show_sense_hdr(&sshdr);
1346 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1347 &sshdr);
1348 scmd_printk(KERN_INFO, cmd, "");
1349 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1319} 1350}
1320EXPORT_SYMBOL(scsi_print_command); 1351EXPORT_SYMBOL(scsi_print_sense);
1321 1352
1322#ifdef CONFIG_SCSI_CONSTANTS 1353#ifdef CONFIG_SCSI_CONSTANTS
1323 1354
@@ -1327,25 +1358,6 @@ static const char * const hostbyte_table[]={
1327"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"}; 1358"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
1328#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) 1359#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1329 1360
1330void scsi_print_hostbyte(int scsiresult)
1331{
1332 int hb = host_byte(scsiresult);
1333
1334 printk("Hostbyte=0x%02x", hb);
1335 if (hb < NUM_HOSTBYTE_STRS)
1336 printk("(%s) ", hostbyte_table[hb]);
1337 else
1338 printk("is invalid ");
1339}
1340#else
1341void scsi_print_hostbyte(int scsiresult)
1342{
1343 printk("Hostbyte=0x%02x ", host_byte(scsiresult));
1344}
1345#endif
1346
1347#ifdef CONFIG_SCSI_CONSTANTS
1348
1349static const char * const driverbyte_table[]={ 1361static const char * const driverbyte_table[]={
1350"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", 1362"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
1351"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; 1363"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
@@ -1356,19 +1368,35 @@ static const char * const driversuggest_table[]={"SUGGEST_OK",
1356"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"}; 1368"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"};
1357#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table) 1369#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table)
1358 1370
1359void scsi_print_driverbyte(int scsiresult) 1371void scsi_show_result(int result)
1360{ 1372{
1361 int dr = (driver_byte(scsiresult) & DRIVER_MASK); 1373 int hb = host_byte(result);
1362 int su = ((driver_byte(scsiresult) & SUGGEST_MASK) >> 4); 1374 int db = (driver_byte(result) & DRIVER_MASK);
1375 int su = ((driver_byte(result) & SUGGEST_MASK) >> 4);
1363 1376
1364 printk("Driverbyte=0x%02x ", driver_byte(scsiresult)); 1377 printk("Result: hostbyte=%s driverbyte=%s,%s\n",
1365 printk("(%s,%s) ", 1378 (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"),
1366 (dr < NUM_DRIVERBYTE_STRS ? driverbyte_table[dr] : "invalid"), 1379 (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid"),
1367 (su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid")); 1380 (su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid"));
1368} 1381}
1382
1369#else 1383#else
1370void scsi_print_driverbyte(int scsiresult) 1384
1385void scsi_show_result(int result)
1371{ 1386{
1372 printk("Driverbyte=0x%02x ", driver_byte(scsiresult)); 1387 printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n",
1388 host_byte(result), driver_byte(result));
1373} 1389}
1390
1374#endif 1391#endif
1392EXPORT_SYMBOL(scsi_show_result);
1393
1394
1395void scsi_print_result(struct scsi_cmnd *cmd)
1396{
1397 scmd_printk(KERN_INFO, cmd, "");
1398 scsi_show_result(cmd->result);
1399}
1400EXPORT_SYMBOL(scsi_print_result);
1401
1402
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 5a49216fe4cf..100b49baca7f 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -31,7 +31,7 @@
31 * Tunable parameters first 31 * Tunable parameters first
32 */ 32 */
33 33
34/* How many different OSM's are we allowing */ 34/* How many different OSM's are we allowing */
35#define MAX_I2O_MODULES 64 35#define MAX_I2O_MODULES 64
36 36
37#define I2O_EVT_CAPABILITY_OTHER 0x01 37#define I2O_EVT_CAPABILITY_OTHER 0x01
@@ -63,7 +63,7 @@ struct i2o_message
63 u16 size; 63 u16 size;
64 u32 target_tid:12; 64 u32 target_tid:12;
65 u32 init_tid:12; 65 u32 init_tid:12;
66 u32 function:8; 66 u32 function:8;
67 u32 initiator_context; 67 u32 initiator_context;
68 /* List follows */ 68 /* List follows */
69}; 69};
@@ -77,7 +77,7 @@ struct i2o_device
77 77
78 char dev_name[8]; /* linux /dev name if available */ 78 char dev_name[8]; /* linux /dev name if available */
79 i2o_lct_entry lct_data;/* Device LCT information */ 79 i2o_lct_entry lct_data;/* Device LCT information */
80 u32 flags; 80 u32 flags;
81 struct proc_dir_entry* proc_entry; /* /proc dir */ 81 struct proc_dir_entry* proc_entry; /* /proc dir */
82 struct adpt_device *owner; 82 struct adpt_device *owner;
83 struct _adpt_hba *controller; /* Controlling IOP */ 83 struct _adpt_hba *controller; /* Controlling IOP */
@@ -86,7 +86,7 @@ struct i2o_device
86/* 86/*
87 * Each I2O controller has one of these objects 87 * Each I2O controller has one of these objects
88 */ 88 */
89 89
90struct i2o_controller 90struct i2o_controller
91{ 91{
92 char name[16]; 92 char name[16];
@@ -111,9 +111,9 @@ struct i2o_sys_tbl_entry
111 u32 iop_id:12; 111 u32 iop_id:12;
112 u32 reserved2:20; 112 u32 reserved2:20;
113 u16 seg_num:12; 113 u16 seg_num:12;
114 u16 i2o_version:4; 114 u16 i2o_version:4;
115 u8 iop_state; 115 u8 iop_state;
116 u8 msg_type; 116 u8 msg_type;
117 u16 frame_size; 117 u16 frame_size;
118 u16 reserved3; 118 u16 reserved3;
119 u32 last_changed; 119 u32 last_changed;
@@ -124,14 +124,14 @@ struct i2o_sys_tbl_entry
124 124
125struct i2o_sys_tbl 125struct i2o_sys_tbl
126{ 126{
127 u8 num_entries; 127 u8 num_entries;
128 u8 version; 128 u8 version;
129 u16 reserved1; 129 u16 reserved1;
130 u32 change_ind; 130 u32 change_ind;
131 u32 reserved2; 131 u32 reserved2;
132 u32 reserved3; 132 u32 reserved3;
133 struct i2o_sys_tbl_entry iops[0]; 133 struct i2o_sys_tbl_entry iops[0];
134}; 134};
135 135
136/* 136/*
137 * I2O classes / subclasses 137 * I2O classes / subclasses
@@ -146,7 +146,7 @@ struct i2o_sys_tbl
146/* Class code names 146/* Class code names
147 * (from v1.5 Table 6-1 Class Code Assignments.) 147 * (from v1.5 Table 6-1 Class Code Assignments.)
148 */ 148 */
149 149
150#define I2O_CLASS_EXECUTIVE 0x000 150#define I2O_CLASS_EXECUTIVE 0x000
151#define I2O_CLASS_DDM 0x001 151#define I2O_CLASS_DDM 0x001
152#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010 152#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
@@ -166,7 +166,7 @@ struct i2o_sys_tbl
166 166
167/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes 167/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
168 */ 168 */
169 169
170#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff 170#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
171 171
172/* Subclasses 172/* Subclasses
@@ -175,7 +175,7 @@ struct i2o_sys_tbl
175#define I2O_SUBCLASS_i960 0x001 175#define I2O_SUBCLASS_i960 0x001
176#define I2O_SUBCLASS_HDM 0x020 176#define I2O_SUBCLASS_HDM 0x020
177#define I2O_SUBCLASS_ISM 0x021 177#define I2O_SUBCLASS_ISM 0x021
178 178
179/* Operation functions */ 179/* Operation functions */
180 180
181#define I2O_PARAMS_FIELD_GET 0x0001 181#define I2O_PARAMS_FIELD_GET 0x0001
@@ -219,7 +219,7 @@ struct i2o_sys_tbl
219/* 219/*
220 * Messaging API values 220 * Messaging API values
221 */ 221 */
222 222
223#define I2O_CMD_ADAPTER_ASSIGN 0xB3 223#define I2O_CMD_ADAPTER_ASSIGN 0xB3
224#define I2O_CMD_ADAPTER_READ 0xB2 224#define I2O_CMD_ADAPTER_READ 0xB2
225#define I2O_CMD_ADAPTER_RELEASE 0xB5 225#define I2O_CMD_ADAPTER_RELEASE 0xB5
@@ -284,16 +284,16 @@ struct i2o_sys_tbl
284#define I2O_PRIVATE_MSG 0xFF 284#define I2O_PRIVATE_MSG 0xFF
285 285
286/* 286/*
287 * Init Outbound Q status 287 * Init Outbound Q status
288 */ 288 */
289 289
290#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01 290#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
291#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02 291#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
292#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03 292#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
293#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04 293#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
294 294
295/* 295/*
296 * I2O Get Status State values 296 * I2O Get Status State values
297 */ 297 */
298 298
299#define ADAPTER_STATE_INITIALIZING 0x01 299#define ADAPTER_STATE_INITIALIZING 0x01
@@ -303,7 +303,7 @@ struct i2o_sys_tbl
303#define ADAPTER_STATE_OPERATIONAL 0x08 303#define ADAPTER_STATE_OPERATIONAL 0x08
304#define ADAPTER_STATE_FAILED 0x10 304#define ADAPTER_STATE_FAILED 0x10
305#define ADAPTER_STATE_FAULTED 0x11 305#define ADAPTER_STATE_FAULTED 0x11
306 306
307/* I2O API function return values */ 307/* I2O API function return values */
308 308
309#define I2O_RTN_NO_ERROR 0 309#define I2O_RTN_NO_ERROR 0
@@ -321,9 +321,9 @@ struct i2o_sys_tbl
321 321
322/* Reply message status defines for all messages */ 322/* Reply message status defines for all messages */
323 323
324#define I2O_REPLY_STATUS_SUCCESS 0x00 324#define I2O_REPLY_STATUS_SUCCESS 0x00
325#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 325#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
326#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 326#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
327#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 327#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
328#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 328#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
329#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 329#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
@@ -338,7 +338,7 @@ struct i2o_sys_tbl
338 338
339#define I2O_PARAMS_STATUS_SUCCESS 0x00 339#define I2O_PARAMS_STATUS_SUCCESS 0x00
340#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 340#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
341#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 341#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
342#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 342#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
343#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 343#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
344#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 344#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
@@ -390,7 +390,7 @@ struct i2o_sys_tbl
390#define I2O_CLAIM_MANAGEMENT 0x02000000 390#define I2O_CLAIM_MANAGEMENT 0x02000000
391#define I2O_CLAIM_AUTHORIZED 0x03000000 391#define I2O_CLAIM_AUTHORIZED 0x03000000
392#define I2O_CLAIM_SECONDARY 0x04000000 392#define I2O_CLAIM_SECONDARY 0x04000000
393 393
394/* Message header defines for VersionOffset */ 394/* Message header defines for VersionOffset */
395#define I2OVER15 0x0001 395#define I2OVER15 0x0001
396#define I2OVER20 0x0002 396#define I2OVER20 0x0002
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index 82d24864be0c..cc784e8f6e9d 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -99,7 +99,7 @@ typedef struct {
99 uCHAR eataVersion; /* EATA Version */ 99 uCHAR eataVersion; /* EATA Version */
100 uLONG cpLength; /* EATA Command Packet Length */ 100 uLONG cpLength; /* EATA Command Packet Length */
101 uLONG spLength; /* EATA Status Packet Length */ 101 uLONG spLength; /* EATA Status Packet Length */
102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */ 102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */
103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */ 103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */
104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */ 104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */
105} CtrlInfo; 105} CtrlInfo;
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 4bf447792129..94bc894d1200 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -145,8 +145,8 @@ typedef unsigned long sigLONG;
145#define FT_LOGGER 12 /* Event Logger */ 145#define FT_LOGGER 12 /* Event Logger */
146#define FT_INSTALL 13 /* An Install Program */ 146#define FT_INSTALL 13 /* An Install Program */
147#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */ 147#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
148#define FT_RESOURCE 15 /* Storage Manager Resource File */ 148#define FT_RESOURCE 15 /* Storage Manager Resource File */
149#define FT_MODEM_DB 16 /* Storage Manager Modem Database */ 149#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
150 150
151/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */ 151/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
152/* ------------------------------------------------------------------ */ 152/* ------------------------------------------------------------------ */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index cd36e81b2d93..f7b9dbd64a96 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -195,8 +195,6 @@ static int adpt_detect(struct scsi_host_template* sht)
195 pci_dev_get(pDev); 195 pci_dev_get(pDev);
196 } 196 }
197 } 197 }
198 if (pDev)
199 pci_dev_put(pDev);
200 198
201 /* In INIT state, Activate IOPs */ 199 /* In INIT state, Activate IOPs */
202 for (pHba = hba_chain; pHba; pHba = pHba->next) { 200 for (pHba = hba_chain; pHba; pHba = pHba->next) {
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 635c14861f86..5016af5cf860 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -18,13 +18,6 @@
18 * Misc. definitions * 18 * Misc. definitions *
19 *********************************************/ 19 *********************************************/
20 20
21#ifndef TRUE
22#define TRUE 1
23#endif
24#ifndef FALSE
25#define FALSE 0
26#endif
27
28#define R_LIMIT 0x20000 21#define R_LIMIT 0x20000
29 22
30#define MAXISA 4 23#define MAXISA 4
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 99ce03331b64..ec71061aef61 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2212,7 +2212,7 @@ static void __devinit esp_init_swstate(struct esp *esp)
2212} 2212}
2213 2213
2214/* This places the ESP into a known state at boot time. */ 2214/* This places the ESP into a known state at boot time. */
2215static void __devinit esp_bootup_reset(struct esp *esp) 2215static void esp_bootup_reset(struct esp *esp)
2216{ 2216{
2217 u8 val; 2217 u8 val;
2218 2218
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index fbc1d5c3b0a7..b10eefe735c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -85,7 +85,7 @@
85static int max_id = 64; 85static int max_id = 64;
86static int max_channel = 3; 86static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
89 89
90#define IBMVSCSI_VERSION "1.5.8" 90#define IBMVSCSI_VERSION "1.5.8"
91 91
@@ -538,7 +538,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
538 int request_status; 538 int request_status;
539 int rc; 539 int rc;
540 540
541 /* If we have exhausted our request limit, just fail this request. 541 /* If we have exhausted our request limit, just fail this request,
542 * unless it is for a reset or abort.
542 * Note that there are rare cases involving driver generated requests 543 * Note that there are rare cases involving driver generated requests
543 * (such as task management requests) that the mid layer may think we 544 * (such as task management requests) that the mid layer may think we
544 * can handle more requests (can_queue) when we actually can't 545 * can handle more requests (can_queue) when we actually can't
@@ -551,9 +552,30 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
551 */ 552 */
552 if (request_status < -1) 553 if (request_status < -1)
553 goto send_error; 554 goto send_error;
554 /* Otherwise, if we have run out of requests */ 555 /* Otherwise, we may have run out of requests. */
555 else if (request_status < 0) 556 /* Abort and reset calls should make it through.
556 goto send_busy; 557 * Nothing except abort and reset should use the last two
558 * slots unless we had two or less to begin with.
559 */
560 else if (request_status < 2 &&
561 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
562 /* In the case that we have less than two requests
563 * available, check the server limit as a combination
564 * of the request limit and the number of requests
565 * in-flight (the size of the send list). If the
566 * server limit is greater than 2, return busy so
567 * that the last two are reserved for reset and abort.
568 */
569 int server_limit = request_status;
570 struct srp_event_struct *tmp_evt;
571
572 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
573 server_limit++;
574 }
575
576 if (server_limit > 2)
577 goto send_busy;
578 }
557 } 579 }
558 580
559 /* Copy the IU into the transfer area */ 581 /* Copy the IU into the transfer area */
@@ -572,6 +594,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
572 594
573 printk(KERN_ERR "ibmvscsi: send error %d\n", 595 printk(KERN_ERR "ibmvscsi: send error %d\n",
574 rc); 596 rc);
597 atomic_inc(&hostdata->request_limit);
575 goto send_error; 598 goto send_error;
576 } 599 }
577 600
@@ -581,7 +604,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 604 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
582 605
583 free_event_struct(&hostdata->pool, evt_struct); 606 free_event_struct(&hostdata->pool, evt_struct);
584 return SCSI_MLQUEUE_HOST_BUSY; 607 atomic_inc(&hostdata->request_limit);
608 return SCSI_MLQUEUE_HOST_BUSY;
585 609
586 send_error: 610 send_error:
587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 611 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
@@ -831,23 +855,16 @@ static void login_rsp(struct srp_event_struct *evt_struct)
831 855
832 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 856 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
833 857
834 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > 858 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
835 (max_requests - 2)) 859 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
836 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
837 max_requests - 2;
838 860
839 /* Now we know what the real request-limit is */ 861 /* Now we know what the real request-limit is.
862 * This value is set rather than added to request_limit because
863 * request_limit could have been set to -1 by this client.
864 */
840 atomic_set(&hostdata->request_limit, 865 atomic_set(&hostdata->request_limit,
841 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 866 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
842 867
843 hostdata->host->can_queue =
844 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
845
846 if (hostdata->host->can_queue < 1) {
847 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
848 return;
849 }
850
851 /* If we had any pending I/Os, kick them */ 868 /* If we had any pending I/Os, kick them */
852 scsi_unblock_requests(hostdata->host); 869 scsi_unblock_requests(hostdata->host);
853 870
@@ -1337,6 +1354,27 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1337 return rc; 1354 return rc;
1338} 1355}
1339 1356
1357/**
1358 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1359 * @sdev: struct scsi_device device to configure
1360 *
1361 * Enable allow_restart for a device if it is a disk. Adjust the
1362 * queue_depth here also as is required by the documentation for
1363 * struct scsi_host_template.
1364 */
1365static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1366{
1367 struct Scsi_Host *shost = sdev->host;
1368 unsigned long lock_flags = 0;
1369
1370 spin_lock_irqsave(shost->host_lock, lock_flags);
1371 if (sdev->type == TYPE_DISK)
1372 sdev->allow_restart = 1;
1373 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1374 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1375 return 0;
1376}
1377
1340/* ------------------------------------------------------------ 1378/* ------------------------------------------------------------
1341 * sysfs attributes 1379 * sysfs attributes
1342 */ 1380 */
@@ -1482,8 +1520,9 @@ static struct scsi_host_template driver_template = {
1482 .queuecommand = ibmvscsi_queuecommand, 1520 .queuecommand = ibmvscsi_queuecommand,
1483 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1521 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1484 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1522 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1523 .slave_configure = ibmvscsi_slave_configure,
1485 .cmd_per_lun = 16, 1524 .cmd_per_lun = 16,
1486 .can_queue = 1, /* Updated after SRP_LOGIN */ 1525 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1487 .this_id = -1, 1526 .this_id = -1,
1488 .sg_tablesize = SG_ALL, 1527 .sg_tablesize = SG_ALL,
1489 .use_clustering = ENABLE_CLUSTERING, 1528 .use_clustering = ENABLE_CLUSTERING,
@@ -1503,6 +1542,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1503 1542
1504 vdev->dev.driver_data = NULL; 1543 vdev->dev.driver_data = NULL;
1505 1544
1545 driver_template.can_queue = max_requests;
1506 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1546 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1507 if (!host) { 1547 if (!host) {
1508 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1548 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 5c6d93582929..77cc1d40f5bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -44,6 +44,8 @@ struct Scsi_Host;
44 */ 44 */
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48
47/* ------------------------------------------------------------ 49/* ------------------------------------------------------------
48 * Data Structures 50 * Data Structures
49 */ 51 */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index a39a478bb39a..6d223dd76440 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -35,7 +35,7 @@
35#include "ibmvscsi.h" 35#include "ibmvscsi.h"
36 36
37#define INITIAL_SRP_LIMIT 16 37#define INITIAL_SRP_LIMIT 16
38#define DEFAULT_MAX_SECTORS 512 38#define DEFAULT_MAX_SECTORS 256
39 39
40#define TGT_NAME "ibmvstgt" 40#define TGT_NAME "ibmvstgt"
41 41
@@ -248,8 +248,8 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
248 md[i].va + mdone); 248 md[i].va + mdone);
249 249
250 if (err != H_SUCCESS) { 250 if (err != H_SUCCESS) {
251 eprintk("rdma error %d %d\n", dir, slen); 251 eprintk("rdma error %d %d %ld\n", dir, slen, err);
252 goto out; 252 return -EIO;
253 } 253 }
254 254
255 mlen -= slen; 255 mlen -= slen;
@@ -265,45 +265,35 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
265 if (sidx > nsg) { 265 if (sidx > nsg) {
266 eprintk("out of sg %p %d %d\n", 266 eprintk("out of sg %p %d %d\n",
267 iue, sidx, nsg); 267 iue, sidx, nsg);
268 goto out; 268 return -EIO;
269 } 269 }
270 } 270 }
271 }; 271 };
272 272
273 rest -= mlen; 273 rest -= mlen;
274 } 274 }
275out:
276
277 return 0; 275 return 0;
278} 276}
279 277
280static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
281 void (*done)(struct scsi_cmnd *))
282{
283 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
284 int err;
285
286 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
287
288 done(sc);
289
290 return err;
291}
292
293static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, 278static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
294 void (*done)(struct scsi_cmnd *)) 279 void (*done)(struct scsi_cmnd *))
295{ 280{
296 unsigned long flags; 281 unsigned long flags;
297 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; 282 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
298 struct srp_target *target = iue->target; 283 struct srp_target *target = iue->target;
284 int err = 0;
299 285
300 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); 286 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
287 cmd->usg_sg);
288
289 if (sc->use_sg)
290 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
301 291
302 spin_lock_irqsave(&target->lock, flags); 292 spin_lock_irqsave(&target->lock, flags);
303 list_del(&iue->ilist); 293 list_del(&iue->ilist);
304 spin_unlock_irqrestore(&target->lock, flags); 294 spin_unlock_irqrestore(&target->lock, flags);
305 295
306 if (sc->result != SAM_STAT_GOOD) { 296 if (err|| sc->result != SAM_STAT_GOOD) {
307 eprintk("operation failed %p %d %x\n", 297 eprintk("operation failed %p %d %x\n",
308 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]); 298 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
309 send_rsp(iue, sc, HARDWARE_ERROR, 0x00); 299 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
@@ -503,7 +493,8 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
503{ 493{
504 struct vio_port *vport = target_to_port(target); 494 struct vio_port *vport = target_to_port(target);
505 struct iu_entry *iue; 495 struct iu_entry *iue;
506 long err, done; 496 long err;
497 int done = 1;
507 498
508 iue = srp_iu_get(target); 499 iue = srp_iu_get(target);
509 if (!iue) { 500 if (!iue) {
@@ -518,7 +509,6 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
518 509
519 if (err != H_SUCCESS) { 510 if (err != H_SUCCESS) {
520 eprintk("%ld transferring data error %p\n", err, iue); 511 eprintk("%ld transferring data error %p\n", err, iue);
521 done = 1;
522 goto out; 512 goto out;
523 } 513 }
524 514
@@ -794,7 +784,6 @@ static struct scsi_host_template ibmvstgt_sht = {
794 .use_clustering = DISABLE_CLUSTERING, 784 .use_clustering = DISABLE_CLUSTERING,
795 .max_sectors = DEFAULT_MAX_SECTORS, 785 .max_sectors = DEFAULT_MAX_SECTORS,
796 .transfer_response = ibmvstgt_cmd_done, 786 .transfer_response = ibmvstgt_cmd_done,
797 .transfer_data = ibmvstgt_transfer_data,
798 .eh_abort_handler = ibmvstgt_eh_abort_handler, 787 .eh_abort_handler = ibmvstgt_eh_abort_handler,
799 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response, 788 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
800 .shost_attrs = ibmvstgt_attrs, 789 .shost_attrs = ibmvstgt_attrs,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e9bd29975db4..2c7b77e833f9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -89,10 +89,9 @@ static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1; 89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0; 90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0; 91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; 92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1; 93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0; 94static unsigned int ipr_debug = 0;
95static int ipr_auto_create = 1;
96static DEFINE_SPINLOCK(ipr_driver_lock); 95static DEFINE_SPINLOCK(ipr_driver_lock);
97 96
98/* This table describes the differences between DMA controller chips */ 97/* This table describes the differences between DMA controller chips */
@@ -159,15 +158,13 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); 158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0); 159module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(auto_create, ipr_auto_create, int, 0);
163MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164MODULE_LICENSE("GPL"); 161MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION); 162MODULE_VERSION(IPR_DRIVER_VERSION);
166 163
167/* A constant array of IOASCs/URCs/Error Messages */ 164/* A constant array of IOASCs/URCs/Error Messages */
168static const 165static const
169struct ipr_error_table_t ipr_error_table[] = { 166struct ipr_error_table_t ipr_error_table[] = {
170 {0x00000000, 1, 1, 167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
171 "8155: An unknown error was received"}, 168 "8155: An unknown error was received"},
172 {0x00330000, 0, 0, 169 {0x00330000, 0, 0,
173 "Soft underlength error"}, 170 "Soft underlength error"},
@@ -175,37 +172,37 @@ struct ipr_error_table_t ipr_error_table[] = {
175 "Command to be cancelled not found"}, 172 "Command to be cancelled not found"},
176 {0x00808000, 0, 0, 173 {0x00808000, 0, 0,
177 "Qualified success"}, 174 "Qualified success"},
178 {0x01080000, 1, 1, 175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
179 "FFFE: Soft device bus error recovered by the IOA"}, 176 "FFFE: Soft device bus error recovered by the IOA"},
180 {0x01088100, 0, 1, 177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
181 "4101: Soft device bus fabric error"}, 178 "4101: Soft device bus fabric error"},
182 {0x01170600, 0, 1, 179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
183 "FFF9: Device sector reassign successful"}, 180 "FFF9: Device sector reassign successful"},
184 {0x01170900, 0, 1, 181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
185 "FFF7: Media error recovered by device rewrite procedures"}, 182 "FFF7: Media error recovered by device rewrite procedures"},
186 {0x01180200, 0, 1, 183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
187 "7001: IOA sector reassignment successful"}, 184 "7001: IOA sector reassignment successful"},
188 {0x01180500, 0, 1, 185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
189 "FFF9: Soft media error. Sector reassignment recommended"}, 186 "FFF9: Soft media error. Sector reassignment recommended"},
190 {0x01180600, 0, 1, 187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
191 "FFF7: Media error recovered by IOA rewrite procedures"}, 188 "FFF7: Media error recovered by IOA rewrite procedures"},
192 {0x01418000, 0, 1, 189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
193 "FF3D: Soft PCI bus error recovered by the IOA"}, 190 "FF3D: Soft PCI bus error recovered by the IOA"},
194 {0x01440000, 1, 1, 191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
195 "FFF6: Device hardware error recovered by the IOA"}, 192 "FFF6: Device hardware error recovered by the IOA"},
196 {0x01448100, 0, 1, 193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
197 "FFF6: Device hardware error recovered by the device"}, 194 "FFF6: Device hardware error recovered by the device"},
198 {0x01448200, 1, 1, 195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
199 "FF3D: Soft IOA error recovered by the IOA"}, 196 "FF3D: Soft IOA error recovered by the IOA"},
200 {0x01448300, 0, 1, 197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
201 "FFFA: Undefined device response recovered by the IOA"}, 198 "FFFA: Undefined device response recovered by the IOA"},
202 {0x014A0000, 1, 1, 199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
203 "FFF6: Device bus error, message or command phase"}, 200 "FFF6: Device bus error, message or command phase"},
204 {0x014A8000, 0, 1, 201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
205 "FFFE: Task Management Function failed"}, 202 "FFFE: Task Management Function failed"},
206 {0x015D0000, 0, 1, 203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
207 "FFF6: Failure prediction threshold exceeded"}, 204 "FFF6: Failure prediction threshold exceeded"},
208 {0x015D9200, 0, 1, 205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
209 "8009: Impending cache battery pack failure"}, 206 "8009: Impending cache battery pack failure"},
210 {0x02040400, 0, 0, 207 {0x02040400, 0, 0,
211 "34FF: Disk device format in progress"}, 208 "34FF: Disk device format in progress"},
@@ -215,85 +212,85 @@ struct ipr_error_table_t ipr_error_table[] = {
215 "No ready, IOA shutdown"}, 212 "No ready, IOA shutdown"},
216 {0x025A0000, 0, 0, 213 {0x025A0000, 0, 0,
217 "Not ready, IOA has been shutdown"}, 214 "Not ready, IOA has been shutdown"},
218 {0x02670100, 0, 1, 215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
219 "3020: Storage subsystem configuration error"}, 216 "3020: Storage subsystem configuration error"},
220 {0x03110B00, 0, 0, 217 {0x03110B00, 0, 0,
221 "FFF5: Medium error, data unreadable, recommend reassign"}, 218 "FFF5: Medium error, data unreadable, recommend reassign"},
222 {0x03110C00, 0, 0, 219 {0x03110C00, 0, 0,
223 "7000: Medium error, data unreadable, do not reassign"}, 220 "7000: Medium error, data unreadable, do not reassign"},
224 {0x03310000, 0, 1, 221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
225 "FFF3: Disk media format bad"}, 222 "FFF3: Disk media format bad"},
226 {0x04050000, 0, 1, 223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
227 "3002: Addressed device failed to respond to selection"}, 224 "3002: Addressed device failed to respond to selection"},
228 {0x04080000, 1, 1, 225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
229 "3100: Device bus error"}, 226 "3100: Device bus error"},
230 {0x04080100, 0, 1, 227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
231 "3109: IOA timed out a device command"}, 228 "3109: IOA timed out a device command"},
232 {0x04088000, 0, 0, 229 {0x04088000, 0, 0,
233 "3120: SCSI bus is not operational"}, 230 "3120: SCSI bus is not operational"},
234 {0x04088100, 0, 1, 231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
235 "4100: Hard device bus fabric error"}, 232 "4100: Hard device bus fabric error"},
236 {0x04118000, 0, 1, 233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
237 "9000: IOA reserved area data check"}, 234 "9000: IOA reserved area data check"},
238 {0x04118100, 0, 1, 235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
239 "9001: IOA reserved area invalid data pattern"}, 236 "9001: IOA reserved area invalid data pattern"},
240 {0x04118200, 0, 1, 237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
241 "9002: IOA reserved area LRC error"}, 238 "9002: IOA reserved area LRC error"},
242 {0x04320000, 0, 1, 239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "102E: Out of alternate sectors for disk storage"}, 240 "102E: Out of alternate sectors for disk storage"},
244 {0x04330000, 1, 1, 241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
245 "FFF4: Data transfer underlength error"}, 242 "FFF4: Data transfer underlength error"},
246 {0x04338000, 1, 1, 243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
247 "FFF4: Data transfer overlength error"}, 244 "FFF4: Data transfer overlength error"},
248 {0x043E0100, 0, 1, 245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
249 "3400: Logical unit failure"}, 246 "3400: Logical unit failure"},
250 {0x04408500, 0, 1, 247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
251 "FFF4: Device microcode is corrupt"}, 248 "FFF4: Device microcode is corrupt"},
252 {0x04418000, 1, 1, 249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
253 "8150: PCI bus error"}, 250 "8150: PCI bus error"},
254 {0x04430000, 1, 0, 251 {0x04430000, 1, 0,
255 "Unsupported device bus message received"}, 252 "Unsupported device bus message received"},
256 {0x04440000, 1, 1, 253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
257 "FFF4: Disk device problem"}, 254 "FFF4: Disk device problem"},
258 {0x04448200, 1, 1, 255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
259 "8150: Permanent IOA failure"}, 256 "8150: Permanent IOA failure"},
260 {0x04448300, 0, 1, 257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
261 "3010: Disk device returned wrong response to IOA"}, 258 "3010: Disk device returned wrong response to IOA"},
262 {0x04448400, 0, 1, 259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
263 "8151: IOA microcode error"}, 260 "8151: IOA microcode error"},
264 {0x04448500, 0, 0, 261 {0x04448500, 0, 0,
265 "Device bus status error"}, 262 "Device bus status error"},
266 {0x04448600, 0, 1, 263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
267 "8157: IOA error requiring IOA reset to recover"}, 264 "8157: IOA error requiring IOA reset to recover"},
268 {0x04448700, 0, 0, 265 {0x04448700, 0, 0,
269 "ATA device status error"}, 266 "ATA device status error"},
270 {0x04490000, 0, 0, 267 {0x04490000, 0, 0,
271 "Message reject received from the device"}, 268 "Message reject received from the device"},
272 {0x04449200, 0, 1, 269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
273 "8008: A permanent cache battery pack failure occurred"}, 270 "8008: A permanent cache battery pack failure occurred"},
274 {0x0444A000, 0, 1, 271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
275 "9090: Disk unit has been modified after the last known status"}, 272 "9090: Disk unit has been modified after the last known status"},
276 {0x0444A200, 0, 1, 273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
277 "9081: IOA detected device error"}, 274 "9081: IOA detected device error"},
278 {0x0444A300, 0, 1, 275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "9082: IOA detected device error"}, 276 "9082: IOA detected device error"},
280 {0x044A0000, 1, 1, 277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "3110: Device bus error, message or command phase"}, 278 "3110: Device bus error, message or command phase"},
282 {0x044A8000, 1, 1, 279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
283 "3110: SAS Command / Task Management Function failed"}, 280 "3110: SAS Command / Task Management Function failed"},
284 {0x04670400, 0, 1, 281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
285 "9091: Incorrect hardware configuration change has been detected"}, 282 "9091: Incorrect hardware configuration change has been detected"},
286 {0x04678000, 0, 1, 283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9073: Invalid multi-adapter configuration"}, 284 "9073: Invalid multi-adapter configuration"},
288 {0x04678100, 0, 1, 285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
289 "4010: Incorrect connection between cascaded expanders"}, 286 "4010: Incorrect connection between cascaded expanders"},
290 {0x04678200, 0, 1, 287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
291 "4020: Connections exceed IOA design limits"}, 288 "4020: Connections exceed IOA design limits"},
292 {0x04678300, 0, 1, 289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
293 "4030: Incorrect multipath connection"}, 290 "4030: Incorrect multipath connection"},
294 {0x04679000, 0, 1, 291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
295 "4110: Unsupported enclosure function"}, 292 "4110: Unsupported enclosure function"},
296 {0x046E0000, 0, 1, 293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "FFF4: Command to logical unit failed"}, 294 "FFF4: Command to logical unit failed"},
298 {0x05240000, 1, 0, 295 {0x05240000, 1, 0,
299 "Illegal request, invalid request type or request packet"}, 296 "Illegal request, invalid request type or request packet"},
@@ -313,101 +310,103 @@ struct ipr_error_table_t ipr_error_table[] = {
313 "Illegal request, command sequence error"}, 310 "Illegal request, command sequence error"},
314 {0x052C8000, 1, 0, 311 {0x052C8000, 1, 0,
315 "Illegal request, dual adapter support not enabled"}, 312 "Illegal request, dual adapter support not enabled"},
316 {0x06040500, 0, 1, 313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
317 "9031: Array protection temporarily suspended, protection resuming"}, 314 "9031: Array protection temporarily suspended, protection resuming"},
318 {0x06040600, 0, 1, 315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
319 "9040: Array protection temporarily suspended, protection resuming"}, 316 "9040: Array protection temporarily suspended, protection resuming"},
320 {0x06288000, 0, 1, 317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3140: Device bus not ready to ready transition"}, 318 "3140: Device bus not ready to ready transition"},
322 {0x06290000, 0, 1, 319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "FFFB: SCSI bus was reset"}, 320 "FFFB: SCSI bus was reset"},
324 {0x06290500, 0, 0, 321 {0x06290500, 0, 0,
325 "FFFE: SCSI bus transition to single ended"}, 322 "FFFE: SCSI bus transition to single ended"},
326 {0x06290600, 0, 0, 323 {0x06290600, 0, 0,
327 "FFFE: SCSI bus transition to LVD"}, 324 "FFFE: SCSI bus transition to LVD"},
328 {0x06298000, 0, 1, 325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
329 "FFFB: SCSI bus was reset by another initiator"}, 326 "FFFB: SCSI bus was reset by another initiator"},
330 {0x063F0300, 0, 1, 327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
331 "3029: A device replacement has occurred"}, 328 "3029: A device replacement has occurred"},
332 {0x064C8000, 0, 1, 329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "9051: IOA cache data exists for a missing or failed device"}, 330 "9051: IOA cache data exists for a missing or failed device"},
334 {0x064C8100, 0, 1, 331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
336 {0x06670100, 0, 1, 333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
337 "9025: Disk unit is not supported at its physical location"}, 334 "9025: Disk unit is not supported at its physical location"},
338 {0x06670600, 0, 1, 335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
339 "3020: IOA detected a SCSI bus configuration error"}, 336 "3020: IOA detected a SCSI bus configuration error"},
340 {0x06678000, 0, 1, 337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3150: SCSI bus configuration error"}, 338 "3150: SCSI bus configuration error"},
342 {0x06678100, 0, 1, 339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9074: Asymmetric advanced function disk configuration"}, 340 "9074: Asymmetric advanced function disk configuration"},
344 {0x06678300, 0, 1, 341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
345 "4040: Incomplete multipath connection between IOA and enclosure"}, 342 "4040: Incomplete multipath connection between IOA and enclosure"},
346 {0x06678400, 0, 1, 343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
347 "4041: Incomplete multipath connection between enclosure and device"}, 344 "4041: Incomplete multipath connection between enclosure and device"},
348 {0x06678500, 0, 1, 345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
349 "9075: Incomplete multipath connection between IOA and remote IOA"}, 346 "9075: Incomplete multipath connection between IOA and remote IOA"},
350 {0x06678600, 0, 1, 347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
351 "9076: Configuration error, missing remote IOA"}, 348 "9076: Configuration error, missing remote IOA"},
352 {0x06679100, 0, 1, 349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
353 "4050: Enclosure does not support a required multipath function"}, 350 "4050: Enclosure does not support a required multipath function"},
354 {0x06690200, 0, 1, 351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
355 "9041: Array protection temporarily suspended"}, 352 "9041: Array protection temporarily suspended"},
356 {0x06698200, 0, 1, 353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
357 "9042: Corrupt array parity detected on specified device"}, 354 "9042: Corrupt array parity detected on specified device"},
358 {0x066B0200, 0, 1, 355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
359 "9030: Array no longer protected due to missing or failed disk unit"}, 356 "9030: Array no longer protected due to missing or failed disk unit"},
360 {0x066B8000, 0, 1, 357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
361 "9071: Link operational transition"}, 358 "9071: Link operational transition"},
362 {0x066B8100, 0, 1, 359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
363 "9072: Link not operational transition"}, 360 "9072: Link not operational transition"},
364 {0x066B8200, 0, 1, 361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "9032: Array exposed but still protected"}, 362 "9032: Array exposed but still protected"},
366 {0x066B9100, 0, 1, 363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
367 "4061: Multipath redundancy level got better"}, 366 "4061: Multipath redundancy level got better"},
368 {0x066B9200, 0, 1, 367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "4060: Multipath redundancy level got worse"}, 368 "4060: Multipath redundancy level got worse"},
370 {0x07270000, 0, 0, 369 {0x07270000, 0, 0,
371 "Failure due to other device"}, 370 "Failure due to other device"},
372 {0x07278000, 0, 1, 371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
373 "9008: IOA does not support functions expected by devices"}, 372 "9008: IOA does not support functions expected by devices"},
374 {0x07278100, 0, 1, 373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
375 "9010: Cache data associated with attached devices cannot be found"}, 374 "9010: Cache data associated with attached devices cannot be found"},
376 {0x07278200, 0, 1, 375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9011: Cache data belongs to devices other than those attached"}, 376 "9011: Cache data belongs to devices other than those attached"},
378 {0x07278400, 0, 1, 377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9020: Array missing 2 or more devices with only 1 device present"}, 378 "9020: Array missing 2 or more devices with only 1 device present"},
380 {0x07278500, 0, 1, 379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9021: Array missing 2 or more devices with 2 or more devices present"}, 380 "9021: Array missing 2 or more devices with 2 or more devices present"},
382 {0x07278600, 0, 1, 381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9022: Exposed array is missing a required device"}, 382 "9022: Exposed array is missing a required device"},
384 {0x07278700, 0, 1, 383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9023: Array member(s) not at required physical locations"}, 384 "9023: Array member(s) not at required physical locations"},
386 {0x07278800, 0, 1, 385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
387 "9024: Array not functional due to present hardware configuration"}, 386 "9024: Array not functional due to present hardware configuration"},
388 {0x07278900, 0, 1, 387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
389 "9026: Array not functional due to present hardware configuration"}, 388 "9026: Array not functional due to present hardware configuration"},
390 {0x07278A00, 0, 1, 389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9027: Array is missing a device and parity is out of sync"}, 390 "9027: Array is missing a device and parity is out of sync"},
392 {0x07278B00, 0, 1, 391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9028: Maximum number of arrays already exist"}, 392 "9028: Maximum number of arrays already exist"},
394 {0x07278C00, 0, 1, 393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
395 "9050: Required cache data cannot be located for a disk unit"}, 394 "9050: Required cache data cannot be located for a disk unit"},
396 {0x07278D00, 0, 1, 395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
397 "9052: Cache data exists for a device that has been modified"}, 396 "9052: Cache data exists for a device that has been modified"},
398 {0x07278F00, 0, 1, 397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
399 "9054: IOA resources not available due to previous problems"}, 398 "9054: IOA resources not available due to previous problems"},
400 {0x07279100, 0, 1, 399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
401 "9092: Disk unit requires initialization before use"}, 400 "9092: Disk unit requires initialization before use"},
402 {0x07279200, 0, 1, 401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
403 "9029: Incorrect hardware configuration change has been detected"}, 402 "9029: Incorrect hardware configuration change has been detected"},
404 {0x07279600, 0, 1, 403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
405 "9060: One or more disk pairs are missing from an array"}, 404 "9060: One or more disk pairs are missing from an array"},
406 {0x07279700, 0, 1, 405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
407 "9061: One or more disks are missing from an array"}, 406 "9061: One or more disks are missing from an array"},
408 {0x07279800, 0, 1, 407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
409 "9062: One or more disks are missing from an array"}, 408 "9062: One or more disks are missing from an array"},
410 {0x07279900, 0, 1, 409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9063: Maximum number of functional arrays has been exceeded"}, 410 "9063: Maximum number of functional arrays has been exceeded"},
412 {0x0B260000, 0, 0, 411 {0x0B260000, 0, 0,
413 "Aborted command, invalid descriptor"}, 412 "Aborted command, invalid descriptor"},
@@ -481,12 +480,16 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481{ 480{
482 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
483 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
484 484
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0; 486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0; 487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0; 488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0; 489 ioarcb->read_ioadl_len = 0;
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
490 ioasa->ioasc = 0; 493 ioasa->ioasc = 0;
491 ioasa->residual_data_len = 0; 494 ioasa->residual_data_len = 0;
492 ioasa->u.gata.status = 0; 495 ioasa->u.gata.status = 0;
@@ -1610,7 +1613,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1610 /* Set indication we have logged an error */ 1613 /* Set indication we have logged an error */
1611 ioa_cfg->errors_logged++; 1614 ioa_cfg->errors_logged++;
1612 1615
1613 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1614 return; 1617 return;
1615 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1616 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
@@ -3850,6 +3853,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3850 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3853 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3851 if (ipr_cmd->scsi_cmd) 3854 if (ipr_cmd->scsi_cmd)
3852 ipr_cmd->done = ipr_scsi_eh_done; 3855 ipr_cmd->done = ipr_scsi_eh_done;
3856 if (ipr_cmd->qc)
3857 ipr_cmd->done = ipr_sata_eh_done;
3853 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3854 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3855 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
@@ -4230,6 +4235,14 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4230 4235
4231 sglist = scsi_cmd->request_buffer; 4236 sglist = scsi_cmd->request_buffer;
4232 4237
4238 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4239 ioadl = ioarcb->add_data.u.ioadl;
4240 ioarcb->write_ioadl_addr =
4241 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4242 offsetof(struct ipr_ioarcb, add_data));
4243 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4244 }
4245
4233 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4246 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4234 ioadl[i].flags_and_data_len = 4247 ioadl[i].flags_and_data_len =
4235 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4248 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
@@ -4260,6 +4273,11 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4260 scsi_cmd->sc_data_direction); 4273 scsi_cmd->sc_data_direction);
4261 4274
4262 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { 4275 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4276 ioadl = ioarcb->add_data.u.ioadl;
4277 ioarcb->write_ioadl_addr =
4278 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4279 offsetof(struct ipr_ioarcb, add_data));
4280 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4263 ipr_cmd->dma_use_sg = 1; 4281 ipr_cmd->dma_use_sg = 1;
4264 ioadl[0].flags_and_data_len = 4282 ioadl[0].flags_and_data_len =
4265 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); 4283 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
@@ -4346,11 +4364,9 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4346 **/ 4364 **/
4347static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 4365static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4348{ 4366{
4349 struct ipr_ioarcb *ioarcb; 4367 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4350 struct ipr_ioasa *ioasa; 4368 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4351 4369 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4352 ioarcb = &ipr_cmd->ioarcb;
4353 ioasa = &ipr_cmd->ioasa;
4354 4370
4355 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 4371 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4356 ioarcb->write_data_transfer_length = 0; 4372 ioarcb->write_data_transfer_length = 0;
@@ -4359,6 +4375,9 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4359 ioarcb->read_ioadl_len = 0; 4375 ioarcb->read_ioadl_len = 0;
4360 ioasa->ioasc = 0; 4376 ioasa->ioasc = 0;
4361 ioasa->residual_data_len = 0; 4377 ioasa->residual_data_len = 0;
4378 ioarcb->write_ioadl_addr =
4379 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4380 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4362} 4381}
4363 4382
4364/** 4383/**
@@ -4457,12 +4476,13 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4457{ 4476{
4458 int i; 4477 int i;
4459 u16 data_len; 4478 u16 data_len;
4460 u32 ioasc; 4479 u32 ioasc, fd_ioasc;
4461 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 4480 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4462 __be32 *ioasa_data = (__be32 *)ioasa; 4481 __be32 *ioasa_data = (__be32 *)ioasa;
4463 int error_index; 4482 int error_index;
4464 4483
4465 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; 4484 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4485 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4466 4486
4467 if (0 == ioasc) 4487 if (0 == ioasc)
4468 return; 4488 return;
@@ -4470,13 +4490,19 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4470 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 4490 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4471 return; 4491 return;
4472 4492
4473 error_index = ipr_get_error(ioasc); 4493 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4494 error_index = ipr_get_error(fd_ioasc);
4495 else
4496 error_index = ipr_get_error(ioasc);
4474 4497
4475 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 4498 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4476 /* Don't log an error if the IOA already logged one */ 4499 /* Don't log an error if the IOA already logged one */
4477 if (ioasa->ilid != 0) 4500 if (ioasa->ilid != 0)
4478 return; 4501 return;
4479 4502
4503 if (!ipr_is_gscsi(res))
4504 return;
4505
4480 if (ipr_error_table[error_index].log_ioasa == 0) 4506 if (ipr_error_table[error_index].log_ioasa == 0)
4481 return; 4507 return;
4482 } 4508 }
@@ -4636,11 +4662,11 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4636 return; 4662 return;
4637 } 4663 }
4638 4664
4639 if (ipr_is_gscsi(res)) 4665 if (!ipr_is_gscsi(res))
4640 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4641 else
4642 ipr_gen_sense(ipr_cmd); 4666 ipr_gen_sense(ipr_cmd);
4643 4667
4668 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4669
4644 switch (ioasc & IPR_IOASC_IOASC_MASK) { 4670 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4645 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 4671 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4646 if (ipr_is_naca_model(res)) 4672 if (ipr_is_naca_model(res))
@@ -5121,7 +5147,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5121 struct ipr_ioarcb_ata_regs *regs; 5147 struct ipr_ioarcb_ata_regs *regs;
5122 5148
5123 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 5149 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5124 return -EIO; 5150 return AC_ERR_SYSTEM;
5125 5151
5126 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5152 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5127 ioarcb = &ipr_cmd->ioarcb; 5153 ioarcb = &ipr_cmd->ioarcb;
@@ -5166,7 +5192,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5166 5192
5167 default: 5193 default:
5168 WARN_ON(1); 5194 WARN_ON(1);
5169 return -1; 5195 return AC_ERR_INVALID;
5170 } 5196 }
5171 5197
5172 mb(); 5198 mb();
@@ -6188,7 +6214,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6188 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 6214 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6189 6215
6190 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 6216 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6191 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ); 6217 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6192 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 6218 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6193 ipr_cmd->done = ipr_reset_ioa_job; 6219 ipr_cmd->done = ipr_reset_ioa_job;
6194 add_timer(&ipr_cmd->timer); 6220 add_timer(&ipr_cmd->timer);
@@ -6385,6 +6411,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6385 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 6411 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6386 6412
6387 if (rc != PCIBIOS_SUCCESSFUL) { 6413 if (rc != PCIBIOS_SUCCESSFUL) {
6414 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6388 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 6415 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6389 rc = IPR_RC_JOB_CONTINUE; 6416 rc = IPR_RC_JOB_CONTINUE;
6390 } else { 6417 } else {
@@ -7117,8 +7144,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7117 ioa_cfg->pdev = pdev; 7144 ioa_cfg->pdev = pdev;
7118 ioa_cfg->log_level = ipr_log_level; 7145 ioa_cfg->log_level = ipr_log_level;
7119 ioa_cfg->doorbell = IPR_DOORBELL; 7146 ioa_cfg->doorbell = IPR_DOORBELL;
7120 if (!ipr_auto_create)
7121 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7122 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 7147 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7123 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 7148 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7124 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); 7149 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
@@ -7233,6 +7258,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7233 goto out_scsi_host_put; 7258 goto out_scsi_host_put;
7234 } 7259 }
7235 7260
7261 if (ipr_transop_timeout)
7262 ioa_cfg->transop_timeout = ipr_transop_timeout;
7263 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7264 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7265 else
7266 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7267
7236 ipr_regs_pci = pci_resource_start(pdev, 0); 7268 ipr_regs_pci = pci_resource_start(pdev, 0);
7237 7269
7238 rc = pci_request_regions(pdev, IPR_NAME); 7270 rc = pci_request_regions(pdev, IPR_NAME);
@@ -7540,29 +7572,45 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7540 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7572 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7541 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 7573 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7542 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7574 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 }, 7575 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7576 IPR_USE_LONG_TRANSOP_TIMEOUT },
7544 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7577 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7545 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7578 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7579 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7580 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7548 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7581 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, 7582 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7583 IPR_USE_LONG_TRANSOP_TIMEOUT },
7550 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7584 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7551 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7586 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7588 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, 7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7590 IPR_USE_LONG_TRANSOP_TIMEOUT },
7591 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
7593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7595 IPR_USE_LONG_TRANSOP_TIMEOUT },
7596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7556 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7557 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 }, 7599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7600 IPR_USE_LONG_TRANSOP_TIMEOUT },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7601 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7560 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 7604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7562 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7605 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7563 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 }, 7606 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7607 IPR_USE_LONG_TRANSOP_TIMEOUT },
7564 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7608 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 }, 7609 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7610 IPR_USE_LONG_TRANSOP_TIMEOUT },
7611 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7612 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7613 IPR_USE_LONG_TRANSOP_TIMEOUT },
7566 { } 7614 { }
7567}; 7615};
7568MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7616MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 88f285de97bb..bc53d7cebe0a 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.3.1" 40#define IPR_DRIVER_VERSION "2.3.2"
41#define IPR_DRIVER_DATE "(January 23, 2007)" 41#define IPR_DRIVER_DATE "(March 23, 2007)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,6 +55,7 @@
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A
58 59
59#define IPR_SUBS_DEV_ID_2780 0x0264 60#define IPR_SUBS_DEV_ID_2780 0x0264
60#define IPR_SUBS_DEV_ID_5702 0x0266 61#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -69,8 +70,12 @@
69#define IPR_SUBS_DEV_ID_572A 0x02C1 70#define IPR_SUBS_DEV_ID_572A 0x02C1
70#define IPR_SUBS_DEV_ID_572B 0x02C2 71#define IPR_SUBS_DEV_ID_572B 0x02C2
71#define IPR_SUBS_DEV_ID_572F 0x02C3 72#define IPR_SUBS_DEV_ID_572F 0x02C3
73#define IPR_SUBS_DEV_ID_574D 0x030B
74#define IPR_SUBS_DEV_ID_574E 0x030A
72#define IPR_SUBS_DEV_ID_575B 0x030D 75#define IPR_SUBS_DEV_ID_575B 0x030D
73#define IPR_SUBS_DEV_ID_575C 0x0338 76#define IPR_SUBS_DEV_ID_575C 0x0338
77#define IPR_SUBS_DEV_ID_575D 0x033E
78#define IPR_SUBS_DEV_ID_57B3 0x033A
74#define IPR_SUBS_DEV_ID_57B7 0x0360 79#define IPR_SUBS_DEV_ID_57B7 0x0360
75#define IPR_SUBS_DEV_ID_57B8 0x02C2 80#define IPR_SUBS_DEV_ID_57B8 0x02C2
76 81
@@ -104,6 +109,9 @@
104#define IPR_IOASC_IOA_WAS_RESET 0x10000001 109#define IPR_IOASC_IOA_WAS_RESET 0x10000001
105#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 110#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
106 111
112/* Driver data flags */
113#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
114
107#define IPR_DEFAULT_MAX_ERROR_DUMP 984 115#define IPR_DEFAULT_MAX_ERROR_DUMP 984
108#define IPR_NUM_LOG_HCAMS 2 116#define IPR_NUM_LOG_HCAMS 2
109#define IPR_NUM_CFG_CHG_HCAMS 2 117#define IPR_NUM_CFG_CHG_HCAMS 2
@@ -179,6 +187,7 @@
179#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) 187#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
180#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) 188#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
181#define IPR_OPERATIONAL_TIMEOUT (5 * 60) 189#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
190#define IPR_LONG_OPERATIONAL_TIMEOUT (12 * 60)
182#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) 191#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
183#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 192#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
184#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 193#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
@@ -413,9 +422,25 @@ struct ipr_ioarcb_ata_regs {
413 u8 ctl; 422 u8 ctl;
414}__attribute__ ((packed, aligned(4))); 423}__attribute__ ((packed, aligned(4)));
415 424
425struct ipr_ioadl_desc {
426 __be32 flags_and_data_len;
427#define IPR_IOADL_FLAGS_MASK 0xff000000
428#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
429#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
430#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
431#define IPR_IOADL_FLAGS_READ 0x48000000
432#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
433#define IPR_IOADL_FLAGS_WRITE 0x68000000
434#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
435#define IPR_IOADL_FLAGS_LAST 0x01000000
436
437 __be32 address;
438}__attribute__((packed, aligned (8)));
439
416struct ipr_ioarcb_add_data { 440struct ipr_ioarcb_add_data {
417 union { 441 union {
418 struct ipr_ioarcb_ata_regs regs; 442 struct ipr_ioarcb_ata_regs regs;
443 struct ipr_ioadl_desc ioadl[5];
419 __be32 add_cmd_parms[10]; 444 __be32 add_cmd_parms[10];
420 }u; 445 }u;
421}__attribute__ ((packed, aligned(4))); 446}__attribute__ ((packed, aligned(4)));
@@ -447,21 +472,6 @@ struct ipr_ioarcb {
447 struct ipr_ioarcb_add_data add_data; 472 struct ipr_ioarcb_add_data add_data;
448}__attribute__((packed, aligned (4))); 473}__attribute__((packed, aligned (4)));
449 474
450struct ipr_ioadl_desc {
451 __be32 flags_and_data_len;
452#define IPR_IOADL_FLAGS_MASK 0xff000000
453#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
454#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
455#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
456#define IPR_IOADL_FLAGS_READ 0x48000000
457#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
458#define IPR_IOADL_FLAGS_WRITE 0x68000000
459#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
460#define IPR_IOADL_FLAGS_LAST 0x01000000
461
462 __be32 address;
463}__attribute__((packed, aligned (8)));
464
465struct ipr_ioasa_vset { 475struct ipr_ioasa_vset {
466 __be32 failing_lba_hi; 476 __be32 failing_lba_hi;
467 __be32 failing_lba_lo; 477 __be32 failing_lba_lo;
@@ -1119,6 +1129,7 @@ struct ipr_ioa_cfg {
1119 1129
1120 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; 1130 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
1121 1131
1132 unsigned int transop_timeout;
1122 const struct ipr_chip_cfg_t *chip_cfg; 1133 const struct ipr_chip_cfg_t *chip_cfg;
1123 1134
1124 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ 1135 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8f55e1431433..c9a3abf9e7b6 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -527,12 +527,12 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
527 * than 8K, but there are no targets that currently do this. 527 * than 8K, but there are no targets that currently do this.
528 * For now we fail until we find a vendor that needs it 528 * For now we fail until we find a vendor that needs it
529 */ 529 */
530 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < 530 if (ISCSI_DEF_MAX_RECV_SEG_LEN <
531 tcp_conn->in.datalen) { 531 tcp_conn->in.datalen) {
532 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 532 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
533 "but conn buffer is only %u (opcode %0x)\n", 533 "but conn buffer is only %u (opcode %0x)\n",
534 tcp_conn->in.datalen, 534 tcp_conn->in.datalen,
535 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); 535 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
536 rc = ISCSI_ERR_PROTO; 536 rc = ISCSI_ERR_PROTO;
537 break; 537 break;
538 } 538 }
@@ -1762,7 +1762,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1762 * due to strange issues with iser these are not set 1762 * due to strange issues with iser these are not set
1763 * in iscsi_conn_setup 1763 * in iscsi_conn_setup
1764 */ 1764 */
1765 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 1765 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1766 1766
1767 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); 1767 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
1768 if (!tcp_conn) 1768 if (!tcp_conn)
@@ -1777,14 +1777,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1777 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1777 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1778 CRYPTO_ALG_ASYNC); 1778 CRYPTO_ALG_ASYNC);
1779 tcp_conn->tx_hash.flags = 0; 1779 tcp_conn->tx_hash.flags = 0;
1780 if (IS_ERR(tcp_conn->tx_hash.tfm)) 1780 if (IS_ERR(tcp_conn->tx_hash.tfm)) {
1781 printk(KERN_ERR "Could not create connection due to crc32c "
1782 "loading error %ld. Make sure the crc32c module is "
1783 "built as a module or into the kernel\n",
1784 PTR_ERR(tcp_conn->tx_hash.tfm));
1781 goto free_tcp_conn; 1785 goto free_tcp_conn;
1786 }
1782 1787
1783 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1788 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1784 CRYPTO_ALG_ASYNC); 1789 CRYPTO_ALG_ASYNC);
1785 tcp_conn->rx_hash.flags = 0; 1790 tcp_conn->rx_hash.flags = 0;
1786 if (IS_ERR(tcp_conn->rx_hash.tfm)) 1791 if (IS_ERR(tcp_conn->rx_hash.tfm)) {
1792 printk(KERN_ERR "Could not create connection due to crc32c "
1793 "loading error %ld. Make sure the crc32c module is "
1794 "built as a module or into the kernel\n",
1795 PTR_ERR(tcp_conn->rx_hash.tfm));
1787 goto free_tx_tfm; 1796 goto free_tx_tfm;
1797 }
1788 1798
1789 return cls_conn; 1799 return cls_conn;
1790 1800
@@ -2138,6 +2148,7 @@ static struct scsi_host_template iscsi_sht = {
2138 .change_queue_depth = iscsi_change_queue_depth, 2148 .change_queue_depth = iscsi_change_queue_depth,
2139 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2149 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
2140 .sg_tablesize = ISCSI_SG_TABLESIZE, 2150 .sg_tablesize = ISCSI_SG_TABLESIZE,
2151 .max_sectors = 0xFFFF,
2141 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 2152 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
2142 .eh_abort_handler = iscsi_eh_abort, 2153 .eh_abort_handler = iscsi_eh_abort,
2143 .eh_host_reset_handler = iscsi_eh_host_reset, 2154 .eh_host_reset_handler = iscsi_eh_host_reset,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7c75771c77ff..3f5b9b445b29 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -25,6 +25,7 @@
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/kfifo.h> 26#include <linux/kfifo.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <asm/unaligned.h>
28#include <net/tcp.h> 29#include <net/tcp.h>
29#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -269,14 +270,14 @@ invalid_datalen:
269 goto out; 270 goto out;
270 } 271 }
271 272
272 senselen = be16_to_cpu(*(__be16 *)data); 273 senselen = be16_to_cpu(get_unaligned((__be16 *) data));
273 if (datalen < senselen) 274 if (datalen < senselen)
274 goto invalid_datalen; 275 goto invalid_datalen;
275 276
276 memcpy(sc->sense_buffer, data + 2, 277 memcpy(sc->sense_buffer, data + 2,
277 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 278 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
278 debug_scsi("copied %d bytes of sense\n", 279 debug_scsi("copied %d bytes of sense\n",
279 min(senselen, SCSI_SENSE_BUFFERSIZE)); 280 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
280 } 281 }
281 282
282 if (sc->sc_data_direction == DMA_TO_DEVICE) 283 if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -577,7 +578,7 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
577} 578}
578EXPORT_SYMBOL_GPL(iscsi_conn_failure); 579EXPORT_SYMBOL_GPL(iscsi_conn_failure);
579 580
580static int iscsi_xmit_imm_task(struct iscsi_conn *conn) 581static int iscsi_xmit_mtask(struct iscsi_conn *conn)
581{ 582{
582 struct iscsi_hdr *hdr = conn->mtask->hdr; 583 struct iscsi_hdr *hdr = conn->mtask->hdr;
583 int rc, was_logout = 0; 584 int rc, was_logout = 0;
@@ -591,6 +592,9 @@ static int iscsi_xmit_imm_task(struct iscsi_conn *conn)
591 if (rc) 592 if (rc)
592 return rc; 593 return rc;
593 594
595 /* done with this in-progress mtask */
596 conn->mtask = NULL;
597
594 if (was_logout) { 598 if (was_logout) {
595 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
596 return -ENODATA; 600 return -ENODATA;
@@ -643,11 +647,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
643 conn->ctask = NULL; 647 conn->ctask = NULL;
644 } 648 }
645 if (conn->mtask) { 649 if (conn->mtask) {
646 rc = iscsi_xmit_imm_task(conn); 650 rc = iscsi_xmit_mtask(conn);
647 if (rc) 651 if (rc)
648 goto again; 652 goto again;
649 /* done with this in-progress mtask */
650 conn->mtask = NULL;
651 } 653 }
652 654
653 /* process immediate first */ 655 /* process immediate first */
@@ -658,12 +660,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
658 list_add_tail(&conn->mtask->running, 660 list_add_tail(&conn->mtask->running,
659 &conn->mgmt_run_list); 661 &conn->mgmt_run_list);
660 spin_unlock_bh(&conn->session->lock); 662 spin_unlock_bh(&conn->session->lock);
661 rc = iscsi_xmit_imm_task(conn); 663 rc = iscsi_xmit_mtask(conn);
662 if (rc) 664 if (rc)
663 goto again; 665 goto again;
664 } 666 }
665 /* done with this mtask */
666 conn->mtask = NULL;
667 } 667 }
668 668
669 /* process command queue */ 669 /* process command queue */
@@ -701,12 +701,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
701 list_add_tail(&conn->mtask->running, 701 list_add_tail(&conn->mtask->running,
702 &conn->mgmt_run_list); 702 &conn->mgmt_run_list);
703 spin_unlock_bh(&conn->session->lock); 703 spin_unlock_bh(&conn->session->lock);
704 rc = tt->xmit_mgmt_task(conn, conn->mtask); 704 rc = iscsi_xmit_mtask(conn);
705 if (rc) 705 if (rc)
706 goto again; 706 goto again;
707 } 707 }
708 /* done with this mtask */
709 conn->mtask = NULL;
710 } 708 }
711 709
712 return -ENODATA; 710 return -ENODATA;
@@ -1523,7 +1521,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1523 } 1521 }
1524 spin_unlock_bh(&session->lock); 1522 spin_unlock_bh(&session->lock);
1525 1523
1526 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1524 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1527 if (!data) 1525 if (!data)
1528 goto login_mtask_data_alloc_fail; 1526 goto login_mtask_data_alloc_fail;
1529 conn->login_mtask->data = conn->data = data; 1527 conn->login_mtask->data = conn->data = data;
@@ -1597,6 +1595,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1597 wake_up(&conn->ehwait); 1595 wake_up(&conn->ehwait);
1598 } 1596 }
1599 1597
1598 /* flush queued up work because we free the connection below */
1599 scsi_flush_work(session->host);
1600
1600 spin_lock_bh(&session->lock); 1601 spin_lock_bh(&session->lock);
1601 kfree(conn->data); 1602 kfree(conn->data);
1602 kfree(conn->persistent_address); 1603 kfree(conn->persistent_address);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index dc70c180e115..e34442e405e8 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/pci.h>
26#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
27 26
28#include "sas_internal.h" 27#include "sas_internal.h"
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 89403b00e042..5631c199a8eb 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -22,7 +22,6 @@
22#include <linux/kfifo.h> 22#include <linux/kfifo.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/pci.h>
26#include <scsi/scsi.h> 25#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h> 26#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_tcq.h> 27#include <scsi/scsi_tcq.h>
@@ -225,8 +224,7 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
225 struct srp_direct_buf *md = NULL; 224 struct srp_direct_buf *md = NULL;
226 struct scatterlist dummy, *sg = NULL; 225 struct scatterlist dummy, *sg = NULL;
227 dma_addr_t token = 0; 226 dma_addr_t token = 0;
228 long err; 227 int err = 0;
229 unsigned int done = 0;
230 int nmd, nsg = 0, len; 228 int nmd, nsg = 0, len;
231 229
232 if (dma_map || ext_desc) { 230 if (dma_map || ext_desc) {
@@ -258,8 +256,8 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
258 sg_dma_address(&dummy) = token; 256 sg_dma_address(&dummy) = token;
259 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, 257 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
260 id->table_desc.len); 258 id->table_desc.len);
261 if (err < 0) { 259 if (err) {
262 eprintk("Error copying indirect table %ld\n", err); 260 eprintk("Error copying indirect table %d\n", err);
263 goto free_mem; 261 goto free_mem;
264 } 262 }
265 } else { 263 } else {
@@ -272,6 +270,7 @@ rdma:
272 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL); 270 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
273 if (!nsg) { 271 if (!nsg) {
274 eprintk("fail to map %p %d\n", iue, sc->use_sg); 272 eprintk("fail to map %p %d\n", iue, sc->use_sg);
273 err = -EIO;
275 goto free_mem; 274 goto free_mem;
276 } 275 }
277 len = min(sc->request_bufflen, id->len); 276 len = min(sc->request_bufflen, id->len);
@@ -287,7 +286,7 @@ free_mem:
287 if (token && dma_map) 286 if (token && dma_map)
288 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token); 287 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
289 288
290 return done; 289 return err;
291} 290}
292 291
293static int data_out_desc_size(struct srp_cmd *cmd) 292static int data_out_desc_size(struct srp_cmd *cmd)
@@ -352,7 +351,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
352 break; 351 break;
353 default: 352 default:
354 eprintk("Unknown format %d %x\n", dir, format); 353 eprintk("Unknown format %d %x\n", dir, format);
355 break; 354 err = -EINVAL;
356 } 355 }
357 356
358 return err; 357 return err;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 057fd7e0e379..dcf6106f557a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -671,7 +671,7 @@ static int
671lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 671lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
672{ 672{
673 uint8_t lenlo, lenhi; 673 uint8_t lenlo, lenhi;
674 uint32_t Length; 674 int Length;
675 int i, j; 675 int i, j;
676 int finished = 0; 676 int finished = 0;
677 int index = 0; 677 int index = 0;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 0aa3304f6b9b..7fc6e06ea7e1 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2088,7 +2088,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
2088static inline int 2088static inline int
2089make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) 2089make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
2090{ 2090{
2091 *pdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); 2091 *pdev = alloc_pci_dev();
2092 2092
2093 if( *pdev == NULL ) return -1; 2093 if( *pdev == NULL ) return -1;
2094 2094
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index a967fadb7439..08060fb478b6 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -87,6 +87,7 @@ MODULE_AUTHOR("Willem Riede");
87MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver"); 87MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR); 89MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
90MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
90 91
91module_param(max_dev, int, 0444); 92module_param(max_dev, int, 0444);
92MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)"); 93MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
diff --git a/drivers/scsi/pci2000.h b/drivers/scsi/pci2000.h
deleted file mode 100644
index 0ebd8ce9e1de..000000000000
--- a/drivers/scsi/pci2000.h
+++ /dev/null
@@ -1,197 +0,0 @@
1/****************************************************************************
2 * Perceptive Solutions, Inc. PCI-2000 device driver for Linux.
3 *
4 * pci2000.h - Linux Host Driver for PCI-2000 IntelliCache SCSI Adapters
5 *
6 * Copyright (c) 1997-1999 Perceptive Solutions, Inc.
7 * All Rights Reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that redistributions of source
11 * code retain the above copyright notice and this comment without
12 * modification.
13 *
14 * Technical updates and product information at:
15 * http://www.psidisk.com
16 *
17 * Please send questions, comments, bug reports to:
18 * tech@psidisk.com Technical Support
19 *
20 ****************************************************************************/
21#ifndef _PCI2000_H
22#define _PCI2000_H
23
24#include <linux/types.h>
25
26#ifndef PSI_EIDE_SCSIOP
27#define PSI_EIDE_SCSIOP 1
28
29#define LINUXVERSION(v,p,s) (((v)<<16) + ((p)<<8) + (s))
30
31/************************************************/
32/* definition of standard data types */
33/************************************************/
34#define CHAR char
35#define UCHAR unsigned char
36#define SHORT short
37#define USHORT unsigned short
38#define BOOL long
39#define LONG long
40#define ULONG unsigned long
41#define VOID void
42
43typedef CHAR *PCHAR;
44typedef UCHAR *PUCHAR;
45typedef SHORT *PSHORT;
46typedef USHORT *PUSHORT;
47typedef BOOL *PBOOL;
48typedef LONG *PLONG;
49typedef ULONG *PULONG;
50typedef VOID *PVOID;
51
52
53/************************************************/
54/* Misc. macros */
55/************************************************/
56#define ANY2SCSI(up, p) \
57((UCHAR *)up)[0] = (((ULONG)(p)) >> 8); \
58((UCHAR *)up)[1] = ((ULONG)(p));
59
60#define SCSI2LONG(up) \
61( (((long)*(((UCHAR *)up))) << 16) \
62+ (((long)(((UCHAR *)up)[1])) << 8) \
63+ ((long)(((UCHAR *)up)[2])) )
64
65#define XANY2SCSI(up, p) \
66((UCHAR *)up)[0] = ((long)(p)) >> 24; \
67((UCHAR *)up)[1] = ((long)(p)) >> 16; \
68((UCHAR *)up)[2] = ((long)(p)) >> 8; \
69((UCHAR *)up)[3] = ((long)(p));
70
71#define XSCSI2LONG(up) \
72( (((long)(((UCHAR *)up)[0])) << 24) \
73+ (((long)(((UCHAR *)up)[1])) << 16) \
74+ (((long)(((UCHAR *)up)[2])) << 8) \
75+ ((long)(((UCHAR *)up)[3])) )
76
77/************************************************/
78/* SCSI CDB operation codes */
79/************************************************/
80#define SCSIOP_TEST_UNIT_READY 0x00
81#define SCSIOP_REZERO_UNIT 0x01
82#define SCSIOP_REWIND 0x01
83#define SCSIOP_REQUEST_BLOCK_ADDR 0x02
84#define SCSIOP_REQUEST_SENSE 0x03
85#define SCSIOP_FORMAT_UNIT 0x04
86#define SCSIOP_READ_BLOCK_LIMITS 0x05
87#define SCSIOP_REASSIGN_BLOCKS 0x07
88#define SCSIOP_READ6 0x08
89#define SCSIOP_RECEIVE 0x08
90#define SCSIOP_WRITE6 0x0A
91#define SCSIOP_PRINT 0x0A
92#define SCSIOP_SEND 0x0A
93#define SCSIOP_SEEK6 0x0B
94#define SCSIOP_TRACK_SELECT 0x0B
95#define SCSIOP_SLEW_PRINT 0x0B
96#define SCSIOP_SEEK_BLOCK 0x0C
97#define SCSIOP_PARTITION 0x0D
98#define SCSIOP_READ_REVERSE 0x0F
99#define SCSIOP_WRITE_FILEMARKS 0x10
100#define SCSIOP_FLUSH_BUFFER 0x10
101#define SCSIOP_SPACE 0x11
102#define SCSIOP_INQUIRY 0x12
103#define SCSIOP_VERIFY6 0x13
104#define SCSIOP_RECOVER_BUF_DATA 0x14
105#define SCSIOP_MODE_SELECT 0x15
106#define SCSIOP_RESERVE_UNIT 0x16
107#define SCSIOP_RELEASE_UNIT 0x17
108#define SCSIOP_COPY 0x18
109#define SCSIOP_ERASE 0x19
110#define SCSIOP_MODE_SENSE 0x1A
111#define SCSIOP_START_STOP_UNIT 0x1B
112#define SCSIOP_STOP_PRINT 0x1B
113#define SCSIOP_LOAD_UNLOAD 0x1B
114#define SCSIOP_RECEIVE_DIAGNOSTIC 0x1C
115#define SCSIOP_SEND_DIAGNOSTIC 0x1D
116#define SCSIOP_MEDIUM_REMOVAL 0x1E
117#define SCSIOP_READ_CAPACITY 0x25
118#define SCSIOP_READ 0x28
119#define SCSIOP_WRITE 0x2A
120#define SCSIOP_SEEK 0x2B
121#define SCSIOP_LOCATE 0x2B
122#define SCSIOP_WRITE_VERIFY 0x2E
123#define SCSIOP_VERIFY 0x2F
124#define SCSIOP_SEARCH_DATA_HIGH 0x30
125#define SCSIOP_SEARCH_DATA_EQUAL 0x31
126#define SCSIOP_SEARCH_DATA_LOW 0x32
127#define SCSIOP_SET_LIMITS 0x33
128#define SCSIOP_READ_POSITION 0x34
129#define SCSIOP_SYNCHRONIZE_CACHE 0x35
130#define SCSIOP_COMPARE 0x39
131#define SCSIOP_COPY_COMPARE 0x3A
132#define SCSIOP_WRITE_DATA_BUFF 0x3B
133#define SCSIOP_READ_DATA_BUFF 0x3C
134#define SCSIOP_CHANGE_DEFINITION 0x40
135#define SCSIOP_READ_SUB_CHANNEL 0x42
136#define SCSIOP_READ_TOC 0x43
137#define SCSIOP_READ_HEADER 0x44
138#define SCSIOP_PLAY_AUDIO 0x45
139#define SCSIOP_PLAY_AUDIO_MSF 0x47
140#define SCSIOP_PLAY_TRACK_INDEX 0x48
141#define SCSIOP_PLAY_TRACK_RELATIVE 0x49
142#define SCSIOP_PAUSE_RESUME 0x4B
143#define SCSIOP_LOG_SELECT 0x4C
144#define SCSIOP_LOG_SENSE 0x4D
145#define SCSIOP_MODE_SELECT10 0x55
146#define SCSIOP_MODE_SENSE10 0x5A
147#define SCSIOP_LOAD_UNLOAD_SLOT 0xA6
148#define SCSIOP_MECHANISM_STATUS 0xBD
149#define SCSIOP_READ_CD 0xBE
150
151// SCSI read capacity structure
152typedef struct _READ_CAPACITY_DATA
153 {
154 ULONG blks; /* total blocks (converted to little endian) */
155 ULONG blksiz; /* size of each (converted to little endian) */
156 } READ_CAPACITY_DATA, *PREAD_CAPACITY_DATA;
157
158// SCSI inquiry data
159typedef struct _INQUIRYDATA
160 {
161 UCHAR DeviceType :5;
162 UCHAR DeviceTypeQualifier :3;
163 UCHAR DeviceTypeModifier :7;
164 UCHAR RemovableMedia :1;
165 UCHAR Versions;
166 UCHAR ResponseDataFormat;
167 UCHAR AdditionalLength;
168 UCHAR Reserved[2];
169 UCHAR SoftReset :1;
170 UCHAR CommandQueue :1;
171 UCHAR Reserved2 :1;
172 UCHAR LinkedCommands :1;
173 UCHAR Synchronous :1;
174 UCHAR Wide16Bit :1;
175 UCHAR Wide32Bit :1;
176 UCHAR RelativeAddressing :1;
177 UCHAR VendorId[8];
178 UCHAR ProductId[16];
179 UCHAR ProductRevisionLevel[4];
180 UCHAR VendorSpecific[20];
181 UCHAR Reserved3[40];
182 } INQUIRYDATA, *PINQUIRYDATA;
183
184#endif
185
186// function prototypes
187int Pci2000_Detect (struct scsi_host_template *tpnt);
188int Pci2000_Command (Scsi_Cmnd *SCpnt);
189int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
190int Pci2000_Abort (Scsi_Cmnd *SCpnt);
191int Pci2000_Reset (Scsi_Cmnd *SCpnt, unsigned int flags);
192int Pci2000_Release (struct Scsi_Host *pshost);
193int Pci2000_BiosParam (struct scsi_device *sdev,
194 struct block_device *bdev,
195 sector_t capacity, int geom[]);
196
197#endif
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index eac8e179cfff..7dd787f6ab27 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -3,11 +3,11 @@
3# 3#
4 4
5menu "PCMCIA SCSI adapter support" 5menu "PCMCIA SCSI adapter support"
6 depends on SCSI!=n && PCMCIA!=n && MODULES 6 depends on SCSI!=n && PCMCIA!=n
7 7
8config PCMCIA_AHA152X 8config PCMCIA_AHA152X
9 tristate "Adaptec AHA152X PCMCIA support" 9 tristate "Adaptec AHA152X PCMCIA support"
10 depends on m && !64BIT 10 depends on !64BIT
11 select SCSI_SPI_ATTRS 11 select SCSI_SPI_ATTRS
12 help 12 help
13 Say Y here if you intend to attach this type of PCMCIA SCSI host 13 Say Y here if you intend to attach this type of PCMCIA SCSI host
@@ -18,7 +18,6 @@ config PCMCIA_AHA152X
18 18
19config PCMCIA_FDOMAIN 19config PCMCIA_FDOMAIN
20 tristate "Future Domain PCMCIA support" 20 tristate "Future Domain PCMCIA support"
21 depends on m
22 help 21 help
23 Say Y here if you intend to attach this type of PCMCIA SCSI host 22 Say Y here if you intend to attach this type of PCMCIA SCSI host
24 adapter to your computer. 23 adapter to your computer.
@@ -28,7 +27,7 @@ config PCMCIA_FDOMAIN
28 27
29config PCMCIA_NINJA_SCSI 28config PCMCIA_NINJA_SCSI
30 tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" 29 tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
31 depends on m && !64BIT 30 depends on !64BIT
32 help 31 help
33 If you intend to attach this type of PCMCIA SCSI host adapter to 32 If you intend to attach this type of PCMCIA SCSI host adapter to
34 your computer, say Y here and read 33 your computer, say Y here and read
@@ -62,7 +61,6 @@ config PCMCIA_NINJA_SCSI
62 61
63config PCMCIA_QLOGIC 62config PCMCIA_QLOGIC
64 tristate "Qlogic PCMCIA support" 63 tristate "Qlogic PCMCIA support"
65 depends on m
66 help 64 help
67 Say Y here if you intend to attach this type of PCMCIA SCSI host 65 Say Y here if you intend to attach this type of PCMCIA SCSI host
68 adapter to your computer. 66 adapter to your computer.
@@ -72,7 +70,6 @@ config PCMCIA_QLOGIC
72 70
73config PCMCIA_SYM53C500 71config PCMCIA_SYM53C500
74 tristate "Symbios 53c500 PCMCIA support" 72 tristate "Symbios 53c500 PCMCIA support"
75 depends on m
76 help 73 help
77 Say Y here if you have a New Media Bus Toaster or other PCMCIA 74 Say Y here if you have a New Media Bus Toaster or other PCMCIA
78 SCSI adapter based on the Symbios 53c500 controller. 75 SCSI adapter based on the Symbios 53c500 controller.
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 05f4f2a378eb..e8948b679f5b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1478,14 +1478,17 @@ typedef union {
1478 uint32_t b24 : 24; 1478 uint32_t b24 : 24;
1479 1479
1480 struct { 1480 struct {
1481 uint8_t d_id[3]; 1481#ifdef __BIG_ENDIAN
1482 uint8_t rsvd_1; 1482 uint8_t domain;
1483 } r; 1483 uint8_t area;
1484 1484 uint8_t al_pa;
1485 struct { 1485#elif __LITTLE_ENDIAN
1486 uint8_t al_pa; 1486 uint8_t al_pa;
1487 uint8_t area; 1487 uint8_t area;
1488 uint8_t domain; 1488 uint8_t domain;
1489#else
1490#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
1491#endif
1489 uint8_t rsvd_1; 1492 uint8_t rsvd_1;
1490 } b; 1493 } b;
1491} port_id_t; 1494} port_id_t;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 98c01cd5e1a8..3e296ab845b6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -11,6 +11,11 @@
11 11
12#include "qla_devtbl.h" 12#include "qla_devtbl.h"
13 13
14#ifdef CONFIG_SPARC
15#include <asm/prom.h>
16#include <asm/pbm.h>
17#endif
18
14/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */ 19/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */
15#ifndef EXT_IS_LUN_BIT_SET 20#ifndef EXT_IS_LUN_BIT_SET
16#define EXT_IS_LUN_BIT_SET(P,L) \ 21#define EXT_IS_LUN_BIT_SET(P,L) \
@@ -88,12 +93,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
88 93
89 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 94 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
90 95
91 rval = ha->isp_ops.nvram_config(ha); 96 ha->isp_ops.nvram_config(ha);
92 if (rval) {
93 DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n",
94 ha->host_no));
95 return rval;
96 }
97 97
98 if (ha->flags.disable_serdes) { 98 if (ha->flags.disable_serdes) {
99 /* Mask HBA via NVRAM settings? */ 99 /* Mask HBA via NVRAM settings? */
@@ -1393,6 +1393,28 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1393 } 1393 }
1394} 1394}
1395 1395
1396/* On sparc systems, obtain port and node WWN from firmware
1397 * properties.
1398 */
1399static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1400{
1401#ifdef CONFIG_SPARC
1402 struct pci_dev *pdev = ha->pdev;
1403 struct pcidev_cookie *pcp = pdev->sysdata;
1404 struct device_node *dp = pcp->prom_node;
1405 u8 *val;
1406 int len;
1407
1408 val = of_get_property(dp, "port-wwn", &len);
1409 if (val && len >= WWN_SIZE)
1410 memcpy(nv->port_name, val, WWN_SIZE);
1411
1412 val = of_get_property(dp, "node-wwn", &len);
1413 if (val && len >= WWN_SIZE)
1414 memcpy(nv->node_name, val, WWN_SIZE);
1415#endif
1416}
1417
1396/* 1418/*
1397* NVRAM configuration for ISP 2xxx 1419* NVRAM configuration for ISP 2xxx
1398* 1420*
@@ -1409,6 +1431,7 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1409int 1431int
1410qla2x00_nvram_config(scsi_qla_host_t *ha) 1432qla2x00_nvram_config(scsi_qla_host_t *ha)
1411{ 1433{
1434 int rval;
1412 uint8_t chksum = 0; 1435 uint8_t chksum = 0;
1413 uint16_t cnt; 1436 uint16_t cnt;
1414 uint8_t *dptr1, *dptr2; 1437 uint8_t *dptr1, *dptr2;
@@ -1417,6 +1440,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1417 uint8_t *ptr = (uint8_t *)ha->request_ring; 1440 uint8_t *ptr = (uint8_t *)ha->request_ring;
1418 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1441 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1419 1442
1443 rval = QLA_SUCCESS;
1444
1420 /* Determine NVRAM starting address. */ 1445 /* Determine NVRAM starting address. */
1421 ha->nvram_size = sizeof(nvram_t); 1446 ha->nvram_size = sizeof(nvram_t);
1422 ha->nvram_base = 0; 1447 ha->nvram_base = 0;
@@ -1440,7 +1465,57 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1440 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1465 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
1441 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1466 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
1442 nv->nvram_version); 1467 nv->nvram_version);
1443 return QLA_FUNCTION_FAILED; 1468 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1469 "invalid -- WWPN) defaults.\n");
1470
1471 /*
1472 * Set default initialization control block.
1473 */
1474 memset(nv, 0, ha->nvram_size);
1475 nv->parameter_block_version = ICB_VERSION;
1476
1477 if (IS_QLA23XX(ha)) {
1478 nv->firmware_options[0] = BIT_2 | BIT_1;
1479 nv->firmware_options[1] = BIT_7 | BIT_5;
1480 nv->add_firmware_options[0] = BIT_5;
1481 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1482 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1483 nv->special_options[1] = BIT_7;
1484 } else if (IS_QLA2200(ha)) {
1485 nv->firmware_options[0] = BIT_2 | BIT_1;
1486 nv->firmware_options[1] = BIT_7 | BIT_5;
1487 nv->add_firmware_options[0] = BIT_5;
1488 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1489 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1490 } else if (IS_QLA2100(ha)) {
1491 nv->firmware_options[0] = BIT_3 | BIT_1;
1492 nv->firmware_options[1] = BIT_5;
1493 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1494 }
1495
1496 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
1497 nv->execution_throttle = __constant_cpu_to_le16(16);
1498 nv->retry_count = 8;
1499 nv->retry_delay = 1;
1500
1501 nv->port_name[0] = 33;
1502 nv->port_name[3] = 224;
1503 nv->port_name[4] = 139;
1504
1505 qla2xxx_nvram_wwn_from_ofw(ha, nv);
1506
1507 nv->login_timeout = 4;
1508
1509 /*
1510 * Set default host adapter parameters
1511 */
1512 nv->host_p[1] = BIT_2;
1513 nv->reset_delay = 5;
1514 nv->port_down_retry_count = 8;
1515 nv->max_luns_per_target = __constant_cpu_to_le16(8);
1516 nv->link_down_timeout = 60;
1517
1518 rval = 1;
1444 } 1519 }
1445 1520
1446#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1521#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
@@ -1653,7 +1728,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1653 } 1728 }
1654 } 1729 }
1655 1730
1656 return QLA_SUCCESS; 1731 if (rval) {
1732 DEBUG2_3(printk(KERN_WARNING
1733 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
1734 }
1735 return (rval);
1657} 1736}
1658 1737
1659static void 1738static void
@@ -3071,9 +3150,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3071 3150
3072 ha->isp_ops.get_flash_version(ha, ha->request_ring); 3151 ha->isp_ops.get_flash_version(ha, ha->request_ring);
3073 3152
3074 rval = ha->isp_ops.nvram_config(ha); 3153 ha->isp_ops.nvram_config(ha);
3075 if (rval)
3076 goto isp_abort_retry;
3077 3154
3078 if (!qla2x00_restart_isp(ha)) { 3155 if (!qla2x00_restart_isp(ha)) {
3079 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3156 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3103,7 +3180,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3103 } 3180 }
3104 } 3181 }
3105 } else { /* failed the ISP abort */ 3182 } else { /* failed the ISP abort */
3106isp_abort_retry:
3107 ha->flags.online = 1; 3183 ha->flags.online = 1;
3108 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3184 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
3109 if (ha->isp_abort_cnt == 0) { 3185 if (ha->isp_abort_cnt == 0) {
@@ -3290,9 +3366,32 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3290 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3366 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3291} 3367}
3292 3368
3369/* On sparc systems, obtain port and node WWN from firmware
3370 * properties.
3371 */
3372static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv)
3373{
3374#ifdef CONFIG_SPARC
3375 struct pci_dev *pdev = ha->pdev;
3376 struct pcidev_cookie *pcp = pdev->sysdata;
3377 struct device_node *dp = pcp->prom_node;
3378 u8 *val;
3379 int len;
3380
3381 val = of_get_property(dp, "port-wwn", &len);
3382 if (val && len >= WWN_SIZE)
3383 memcpy(nv->port_name, val, WWN_SIZE);
3384
3385 val = of_get_property(dp, "node-wwn", &len);
3386 if (val && len >= WWN_SIZE)
3387 memcpy(nv->node_name, val, WWN_SIZE);
3388#endif
3389}
3390
3293int 3391int
3294qla24xx_nvram_config(scsi_qla_host_t *ha) 3392qla24xx_nvram_config(scsi_qla_host_t *ha)
3295{ 3393{
3394 int rval;
3296 struct init_cb_24xx *icb; 3395 struct init_cb_24xx *icb;
3297 struct nvram_24xx *nv; 3396 struct nvram_24xx *nv;
3298 uint32_t *dptr; 3397 uint32_t *dptr;
@@ -3300,6 +3399,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3300 uint32_t chksum; 3399 uint32_t chksum;
3301 uint16_t cnt; 3400 uint16_t cnt;
3302 3401
3402 rval = QLA_SUCCESS;
3303 icb = (struct init_cb_24xx *)ha->init_cb; 3403 icb = (struct init_cb_24xx *)ha->init_cb;
3304 nv = (struct nvram_24xx *)ha->request_ring; 3404 nv = (struct nvram_24xx *)ha->request_ring;
3305 3405
@@ -3332,7 +3432,52 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3332 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3432 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
3333 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3433 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
3334 le16_to_cpu(nv->nvram_version)); 3434 le16_to_cpu(nv->nvram_version));
3335 return QLA_FUNCTION_FAILED; 3435 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
3436 "invalid -- WWPN) defaults.\n");
3437
3438 /*
3439 * Set default initialization control block.
3440 */
3441 memset(nv, 0, ha->nvram_size);
3442 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
3443 nv->version = __constant_cpu_to_le16(ICB_VERSION);
3444 nv->frame_payload_size = __constant_cpu_to_le16(2048);
3445 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3446 nv->exchange_count = __constant_cpu_to_le16(0);
3447 nv->hard_address = __constant_cpu_to_le16(124);
3448 nv->port_name[0] = 0x21;
3449 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
3450 nv->port_name[2] = 0x00;
3451 nv->port_name[3] = 0xe0;
3452 nv->port_name[4] = 0x8b;
3453 nv->port_name[5] = 0x1c;
3454 nv->port_name[6] = 0x55;
3455 nv->port_name[7] = 0x86;
3456 nv->node_name[0] = 0x20;
3457 nv->node_name[1] = 0x00;
3458 nv->node_name[2] = 0x00;
3459 nv->node_name[3] = 0xe0;
3460 nv->node_name[4] = 0x8b;
3461 nv->node_name[5] = 0x1c;
3462 nv->node_name[6] = 0x55;
3463 nv->node_name[7] = 0x86;
3464 qla24xx_nvram_wwn_from_ofw(ha, nv);
3465 nv->login_retry_count = __constant_cpu_to_le16(8);
3466 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3467 nv->login_timeout = __constant_cpu_to_le16(0);
3468 nv->firmware_options_1 =
3469 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
3470 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
3471 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
3472 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
3473 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
3474 nv->efi_parameters = __constant_cpu_to_le32(0);
3475 nv->reset_delay = 5;
3476 nv->max_luns_per_target = __constant_cpu_to_le16(128);
3477 nv->port_down_retry_count = __constant_cpu_to_le16(30);
3478 nv->link_down_timeout = __constant_cpu_to_le16(30);
3479
3480 rval = 1;
3336 } 3481 }
3337 3482
3338 /* Reset Initialization control block */ 3483 /* Reset Initialization control block */
@@ -3479,7 +3624,11 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3479 ha->flags.process_response_queue = 1; 3624 ha->flags.process_response_queue = 1;
3480 } 3625 }
3481 3626
3482 return QLA_SUCCESS; 3627 if (rval) {
3628 DEBUG2_3(printk(KERN_WARNING
3629 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
3630 }
3631 return (rval);
3483} 3632}
3484 3633
3485static int 3634static int
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 83376f6ac3db..71e32a248528 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1280,14 +1280,14 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1280 } else { 1280 } else {
1281 if (name != NULL) { 1281 if (name != NULL) {
1282 /* This function returns name in big endian. */ 1282 /* This function returns name in big endian. */
1283 name[0] = LSB(mcp->mb[2]); 1283 name[0] = MSB(mcp->mb[2]);
1284 name[1] = MSB(mcp->mb[2]); 1284 name[1] = LSB(mcp->mb[2]);
1285 name[2] = LSB(mcp->mb[3]); 1285 name[2] = MSB(mcp->mb[3]);
1286 name[3] = MSB(mcp->mb[3]); 1286 name[3] = LSB(mcp->mb[3]);
1287 name[4] = LSB(mcp->mb[6]); 1287 name[4] = MSB(mcp->mb[6]);
1288 name[5] = MSB(mcp->mb[6]); 1288 name[5] = LSB(mcp->mb[6]);
1289 name[6] = LSB(mcp->mb[7]); 1289 name[6] = MSB(mcp->mb[7]);
1290 name[7] = MSB(mcp->mb[7]); 1290 name[7] = LSB(mcp->mb[7]);
1291 } 1291 }
1292 1292
1293 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1293 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 68f5d24b938b..b78919a318e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(ql2xallocfwdump,
62 "vary by ISP type. Default is 1 - allocate memory."); 62 "vary by ISP type. Default is 1 - allocate memory.");
63 63
64int ql2xextended_error_logging; 64int ql2xextended_error_logging;
65module_param(ql2xextended_error_logging, int, S_IRUGO|S_IRUSR); 65module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(ql2xextended_error_logging, 66MODULE_PARM_DESC(ql2xextended_error_logging,
67 "Option to enable extended error logging, " 67 "Option to enable extended error logging, "
68 "Default is 0 - no logging. 1 - log errors."); 68 "Default is 0 - no logging. 1 - log errors.");
@@ -157,6 +157,8 @@ static struct scsi_host_template qla24xx_driver_template = {
157 157
158 .slave_alloc = qla2xxx_slave_alloc, 158 .slave_alloc = qla2xxx_slave_alloc,
159 .slave_destroy = qla2xxx_slave_destroy, 159 .slave_destroy = qla2xxx_slave_destroy,
160 .scan_finished = qla2xxx_scan_finished,
161 .scan_start = qla2xxx_scan_start,
160 .change_queue_depth = qla2x00_change_queue_depth, 162 .change_queue_depth = qla2x00_change_queue_depth,
161 .change_queue_type = qla2x00_change_queue_type, 163 .change_queue_type = qla2x00_change_queue_type,
162 .this_id = -1, 164 .this_id = -1,
@@ -1705,6 +1707,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
1705 1707
1706 scsi_host_put(ha->host); 1708 scsi_host_put(ha->host);
1707 1709
1710 pci_disable_device(pdev);
1708 pci_set_drvdata(pdev, NULL); 1711 pci_set_drvdata(pdev, NULL);
1709} 1712}
1710 1713
@@ -1747,8 +1750,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1747 if (ha->iobase) 1750 if (ha->iobase)
1748 iounmap(ha->iobase); 1751 iounmap(ha->iobase);
1749 pci_release_regions(ha->pdev); 1752 pci_release_regions(ha->pdev);
1750
1751 pci_disable_device(ha->pdev);
1752} 1753}
1753 1754
1754static inline void 1755static inline void
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index ff1dd4175a7f..206bda093da2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -466,6 +466,7 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
466 udelay(10); 466 udelay(10);
467 else 467 else
468 rval = QLA_FUNCTION_TIMEOUT; 468 rval = QLA_FUNCTION_TIMEOUT;
469 cond_resched();
469 } 470 }
470 471
471 /* TODO: What happens if we time out? */ 472 /* TODO: What happens if we time out? */
@@ -508,6 +509,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
508 udelay(10); 509 udelay(10);
509 else 510 else
510 rval = QLA_FUNCTION_TIMEOUT; 511 rval = QLA_FUNCTION_TIMEOUT;
512 cond_resched();
511 } 513 }
512 return rval; 514 return rval;
513} 515}
@@ -1255,6 +1257,7 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1255 } 1257 }
1256 udelay(10); 1258 udelay(10);
1257 barrier(); 1259 barrier();
1260 cond_resched();
1258 } 1261 }
1259 return status; 1262 return status;
1260} 1263}
@@ -1403,6 +1406,7 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1403 if (saddr % 100) 1406 if (saddr % 100)
1404 udelay(10); 1407 udelay(10);
1405 *tmp_buf = data; 1408 *tmp_buf = data;
1409 cond_resched();
1406 } 1410 }
1407} 1411}
1408 1412
@@ -1449,7 +1453,6 @@ uint8_t *
1449qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1453qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1450 uint32_t offset, uint32_t length) 1454 uint32_t offset, uint32_t length)
1451{ 1455{
1452 unsigned long flags;
1453 uint32_t addr, midpoint; 1456 uint32_t addr, midpoint;
1454 uint8_t *data; 1457 uint8_t *data;
1455 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1458 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1458,7 +1461,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1458 qla2x00_suspend_hba(ha); 1461 qla2x00_suspend_hba(ha);
1459 1462
1460 /* Go with read. */ 1463 /* Go with read. */
1461 spin_lock_irqsave(&ha->hardware_lock, flags);
1462 midpoint = ha->optrom_size / 2; 1464 midpoint = ha->optrom_size / 2;
1463 1465
1464 qla2x00_flash_enable(ha); 1466 qla2x00_flash_enable(ha);
@@ -1473,7 +1475,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1473 *data = qla2x00_read_flash_byte(ha, addr); 1475 *data = qla2x00_read_flash_byte(ha, addr);
1474 } 1476 }
1475 qla2x00_flash_disable(ha); 1477 qla2x00_flash_disable(ha);
1476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477 1478
1478 /* Resume HBA. */ 1479 /* Resume HBA. */
1479 qla2x00_resume_hba(ha); 1480 qla2x00_resume_hba(ha);
@@ -1487,7 +1488,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1487{ 1488{
1488 1489
1489 int rval; 1490 int rval;
1490 unsigned long flags;
1491 uint8_t man_id, flash_id, sec_number, data; 1491 uint8_t man_id, flash_id, sec_number, data;
1492 uint16_t wd; 1492 uint16_t wd;
1493 uint32_t addr, liter, sec_mask, rest_addr; 1493 uint32_t addr, liter, sec_mask, rest_addr;
@@ -1500,7 +1500,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1500 sec_number = 0; 1500 sec_number = 0;
1501 1501
1502 /* Reset ISP chip. */ 1502 /* Reset ISP chip. */
1503 spin_lock_irqsave(&ha->hardware_lock, flags);
1504 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 1503 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1505 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 1504 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1506 1505
@@ -1689,10 +1688,10 @@ update_flash:
1689 rval = QLA_FUNCTION_FAILED; 1688 rval = QLA_FUNCTION_FAILED;
1690 break; 1689 break;
1691 } 1690 }
1691 cond_resched();
1692 } 1692 }
1693 } while (0); 1693 } while (0);
1694 qla2x00_flash_disable(ha); 1694 qla2x00_flash_disable(ha);
1695 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1696 1695
1697 /* Resume HBA. */ 1696 /* Resume HBA. */
1698 qla2x00_resume_hba(ha); 1697 qla2x00_resume_hba(ha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 61347aee55ce..dc85495c337f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.07-k5" 10#define QLA2XXX_VERSION "8.01.07-k6"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c89ee3e69ba..4c1e31334765 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -344,7 +344,6 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
344void scsi_log_send(struct scsi_cmnd *cmd) 344void scsi_log_send(struct scsi_cmnd *cmd)
345{ 345{
346 unsigned int level; 346 unsigned int level;
347 struct scsi_device *sdev;
348 347
349 /* 348 /*
350 * If ML QUEUE log level is greater than or equal to: 349 * If ML QUEUE log level is greater than or equal to:
@@ -361,22 +360,17 @@ void scsi_log_send(struct scsi_cmnd *cmd)
361 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 360 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
362 SCSI_LOG_MLQUEUE_BITS); 361 SCSI_LOG_MLQUEUE_BITS);
363 if (level > 1) { 362 if (level > 1) {
364 sdev = cmd->device; 363 scmd_printk(KERN_INFO, cmd, "Send: ");
365 sdev_printk(KERN_INFO, sdev, "send ");
366 if (level > 2) 364 if (level > 2)
367 printk("0x%p ", cmd); 365 printk("0x%p ", cmd);
368 /* 366 printk("\n");
369 * spaces to match disposition and cmd->result
370 * output in scsi_log_completion.
371 */
372 printk(" ");
373 scsi_print_command(cmd); 367 scsi_print_command(cmd);
374 if (level > 3) { 368 if (level > 3) {
375 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 369 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
376 " done = 0x%p, queuecommand 0x%p\n", 370 " done = 0x%p, queuecommand 0x%p\n",
377 cmd->request_buffer, cmd->request_bufflen, 371 cmd->request_buffer, cmd->request_bufflen,
378 cmd->done, 372 cmd->done,
379 sdev->host->hostt->queuecommand); 373 cmd->device->host->hostt->queuecommand);
380 374
381 } 375 }
382 } 376 }
@@ -386,7 +380,6 @@ void scsi_log_send(struct scsi_cmnd *cmd)
386void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 380void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
387{ 381{
388 unsigned int level; 382 unsigned int level;
389 struct scsi_device *sdev;
390 383
391 /* 384 /*
392 * If ML COMPLETE log level is greater than or equal to: 385 * If ML COMPLETE log level is greater than or equal to:
@@ -405,8 +398,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
405 SCSI_LOG_MLCOMPLETE_BITS); 398 SCSI_LOG_MLCOMPLETE_BITS);
406 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 399 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
407 (level > 1)) { 400 (level > 1)) {
408 sdev = cmd->device; 401 scmd_printk(KERN_INFO, cmd, "Done: ");
409 sdev_printk(KERN_INFO, sdev, "done ");
410 if (level > 2) 402 if (level > 2)
411 printk("0x%p ", cmd); 403 printk("0x%p ", cmd);
412 /* 404 /*
@@ -415,40 +407,35 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
415 */ 407 */
416 switch (disposition) { 408 switch (disposition) {
417 case SUCCESS: 409 case SUCCESS:
418 printk("SUCCESS"); 410 printk("SUCCESS\n");
419 break; 411 break;
420 case NEEDS_RETRY: 412 case NEEDS_RETRY:
421 printk("RETRY "); 413 printk("RETRY\n");
422 break; 414 break;
423 case ADD_TO_MLQUEUE: 415 case ADD_TO_MLQUEUE:
424 printk("MLQUEUE"); 416 printk("MLQUEUE\n");
425 break; 417 break;
426 case FAILED: 418 case FAILED:
427 printk("FAILED "); 419 printk("FAILED\n");
428 break; 420 break;
429 case TIMEOUT_ERROR: 421 case TIMEOUT_ERROR:
430 /* 422 /*
431 * If called via scsi_times_out. 423 * If called via scsi_times_out.
432 */ 424 */
433 printk("TIMEOUT"); 425 printk("TIMEOUT\n");
434 break; 426 break;
435 default: 427 default:
436 printk("UNKNOWN"); 428 printk("UNKNOWN\n");
437 } 429 }
438 printk(" %8x ", cmd->result); 430 scsi_print_result(cmd);
439 scsi_print_command(cmd); 431 scsi_print_command(cmd);
440 if (status_byte(cmd->result) & CHECK_CONDITION) { 432 if (status_byte(cmd->result) & CHECK_CONDITION)
441 /*
442 * XXX The scsi_print_sense formatting/prefix
443 * doesn't match this function.
444 */
445 scsi_print_sense("", cmd); 433 scsi_print_sense("", cmd);
446 } 434 if (level > 3)
447 if (level > 3) { 435 scmd_printk(KERN_INFO, cmd,
448 printk(KERN_INFO "scsi host busy %d failed %d\n", 436 "scsi host busy %d failed %d\n",
449 sdev->host->host_busy, 437 cmd->device->host->host_busy,
450 sdev->host->host_failed); 438 cmd->device->host->host_failed);
451 }
452 } 439 }
453 } 440 }
454} 441}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 918bb6019540..3963e7013bd9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -184,10 +184,19 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
184 **/ 184 **/
185void scsi_times_out(struct scsi_cmnd *scmd) 185void scsi_times_out(struct scsi_cmnd *scmd)
186{ 186{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
188
187 scsi_log_completion(scmd, TIMEOUT_ERROR); 189 scsi_log_completion(scmd, TIMEOUT_ERROR);
188 190
189 if (scmd->device->host->transportt->eh_timed_out) 191 if (scmd->device->host->transportt->eh_timed_out)
190 switch (scmd->device->host->transportt->eh_timed_out(scmd)) { 192 eh_timed_out = scmd->device->host->transportt->eh_timed_out;
193 else if (scmd->device->host->hostt->eh_timed_out)
194 eh_timed_out = scmd->device->host->hostt->eh_timed_out;
195 else
196 eh_timed_out = NULL;
197
198 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) {
191 case EH_HANDLED: 200 case EH_HANDLED:
192 __scsi_done(scmd); 201 __scsi_done(scmd);
193 return; 202 return;
@@ -923,10 +932,12 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
923 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 932 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
924 933
925 if (scmd->device->allow_restart) { 934 if (scmd->device->allow_restart) {
926 int rtn; 935 int i, rtn = NEEDS_RETRY;
936
937 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
938 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
939 START_UNIT_TIMEOUT, 0);
927 940
928 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
929 START_UNIT_TIMEOUT, 0);
930 if (rtn == SUCCESS) 941 if (rtn == SUCCESS)
931 return 0; 942 return 0;
932 } 943 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 05d79af5ab90..61fbcdcbb009 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -848,8 +848,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
848 memcpy(req->sense, cmd->sense_buffer, len); 848 memcpy(req->sense, cmd->sense_buffer, len);
849 req->sense_len = len; 849 req->sense_len = len;
850 } 850 }
851 } else 851 }
852 req->data_len = cmd->resid; 852 req->data_len = cmd->resid;
853 } 853 }
854 854
855 /* 855 /*
@@ -968,9 +968,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
968 } 968 }
969 if (result) { 969 if (result) {
970 if (!(req->cmd_flags & REQ_QUIET)) { 970 if (!(req->cmd_flags & REQ_QUIET)) {
971 scmd_printk(KERN_INFO, cmd, 971 scsi_print_result(cmd);
972 "SCSI error: return code = 0x%08x\n",
973 result);
974 if (driver_byte(result) & DRIVER_SENSE) 972 if (driver_byte(result) & DRIVER_SENSE)
975 scsi_print_sense("", cmd); 973 scsi_print_sense("", cmd);
976 } 974 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0949145304ea..a67f315244d7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -181,10 +181,8 @@ int scsi_complete_async_scans(void)
181 return 0; 181 return 0;
182} 182}
183 183
184#ifdef MODULE
185/* Only exported for the benefit of scsi_wait_scan */ 184/* Only exported for the benefit of scsi_wait_scan */
186EXPORT_SYMBOL_GPL(scsi_complete_async_scans); 185EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
187#endif
188 186
189/** 187/**
190 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 188 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 939de0de18bc..67a38a1409ba 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -276,8 +276,22 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
276 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 276 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
277} 277}
278 278
279static int scsi_bus_uevent(struct device *dev, char **envp, int num_envp,
280 char *buffer, int buffer_size)
281{
282 struct scsi_device *sdev = to_scsi_device(dev);
283 int i = 0;
284 int length = 0;
285
286 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
287 "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
288 envp[i] = NULL;
289 return 0;
290}
291
279static int scsi_bus_suspend(struct device * dev, pm_message_t state) 292static int scsi_bus_suspend(struct device * dev, pm_message_t state)
280{ 293{
294 struct device_driver *drv = dev->driver;
281 struct scsi_device *sdev = to_scsi_device(dev); 295 struct scsi_device *sdev = to_scsi_device(dev);
282 struct scsi_host_template *sht = sdev->host->hostt; 296 struct scsi_host_template *sht = sdev->host->hostt;
283 int err; 297 int err;
@@ -286,28 +300,51 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
286 if (err) 300 if (err)
287 return err; 301 return err;
288 302
289 if (sht->suspend) 303 /* call HLD suspend first */
304 if (drv && drv->suspend) {
305 err = drv->suspend(dev, state);
306 if (err)
307 return err;
308 }
309
310 /* then, call host suspend */
311 if (sht->suspend) {
290 err = sht->suspend(sdev, state); 312 err = sht->suspend(sdev, state);
313 if (err) {
314 if (drv && drv->resume)
315 drv->resume(dev);
316 return err;
317 }
318 }
291 319
292 return err; 320 return 0;
293} 321}
294 322
295static int scsi_bus_resume(struct device * dev) 323static int scsi_bus_resume(struct device * dev)
296{ 324{
325 struct device_driver *drv = dev->driver;
297 struct scsi_device *sdev = to_scsi_device(dev); 326 struct scsi_device *sdev = to_scsi_device(dev);
298 struct scsi_host_template *sht = sdev->host->hostt; 327 struct scsi_host_template *sht = sdev->host->hostt;
299 int err = 0; 328 int err = 0, err2 = 0;
300 329
330 /* call host resume first */
301 if (sht->resume) 331 if (sht->resume)
302 err = sht->resume(sdev); 332 err = sht->resume(sdev);
303 333
334 /* then, call HLD resume */
335 if (drv && drv->resume)
336 err2 = drv->resume(dev);
337
304 scsi_device_resume(sdev); 338 scsi_device_resume(sdev);
305 return err; 339
340 /* favor LLD failure */
341 return err ? err : err2;;
306} 342}
307 343
308struct bus_type scsi_bus_type = { 344struct bus_type scsi_bus_type = {
309 .name = "scsi", 345 .name = "scsi",
310 .match = scsi_bus_match, 346 .match = scsi_bus_match,
347 .uevent = scsi_bus_uevent,
311 .suspend = scsi_bus_suspend, 348 .suspend = scsi_bus_suspend,
312 .resume = scsi_bus_resume, 349 .resume = scsi_bus_resume,
313}; 350};
@@ -547,6 +584,14 @@ show_sdev_iostat(iorequest_cnt);
547show_sdev_iostat(iodone_cnt); 584show_sdev_iostat(iodone_cnt);
548show_sdev_iostat(ioerr_cnt); 585show_sdev_iostat(ioerr_cnt);
549 586
587static ssize_t
588sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
589{
590 struct scsi_device *sdev;
591 sdev = to_scsi_device(dev);
592 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
593}
594static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
550 595
551/* Default template for device attributes. May NOT be modified */ 596/* Default template for device attributes. May NOT be modified */
552static struct device_attribute *scsi_sysfs_sdev_attrs[] = { 597static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
@@ -566,6 +611,7 @@ static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
566 &dev_attr_iorequest_cnt, 611 &dev_attr_iorequest_cnt,
567 &dev_attr_iodone_cnt, 612 &dev_attr_iodone_cnt,
568 &dev_attr_ioerr_cnt, 613 &dev_attr_ioerr_cnt,
614 &dev_attr_modalias,
569 NULL 615 NULL
570}; 616};
571 617
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 0e08817fdecf..ca22ddf81746 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -179,10 +179,12 @@ static int event_recv_msg(struct tgt_event *ev)
179 switch (ev->hdr.type) { 179 switch (ev->hdr.type) {
180 case TGT_UEVENT_CMD_RSP: 180 case TGT_UEVENT_CMD_RSP:
181 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no, 181 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
182 ev->p.cmd_rsp.tag,
183 ev->p.cmd_rsp.result, 182 ev->p.cmd_rsp.result,
184 ev->p.cmd_rsp.len, 183 ev->p.cmd_rsp.tag,
185 ev->p.cmd_rsp.uaddr, 184 ev->p.cmd_rsp.uaddr,
185 ev->p.cmd_rsp.len,
186 ev->p.cmd_rsp.sense_uaddr,
187 ev->p.cmd_rsp.sense_len,
186 ev->p.cmd_rsp.rw); 188 ev->p.cmd_rsp.rw);
187 break; 189 break;
188 case TGT_UEVENT_TSK_MGMT_RSP: 190 case TGT_UEVENT_TSK_MGMT_RSP:
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index d402aff5f314..2570f48a69c7 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -28,7 +28,6 @@
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h> 30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32 31
33#include "scsi_tgt_priv.h" 32#include "scsi_tgt_priv.h"
34 33
@@ -42,16 +41,12 @@ static struct kmem_cache *scsi_tgt_cmd_cache;
42struct scsi_tgt_cmd { 41struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */ 42 /* TODO replace work with James b's code */
44 struct work_struct work; 43 struct work_struct work;
45 /* TODO replace the lists with a large bio */ 44 /* TODO fix limits of some drivers */
46 struct bio_list xfer_done_list; 45 struct bio *bio;
47 struct bio_list xfer_list;
48 46
49 struct list_head hash_list; 47 struct list_head hash_list;
50 struct request *rq; 48 struct request *rq;
51 u64 tag; 49 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55}; 50};
56 51
57#define TGT_HASH_ORDER 4 52#define TGT_HASH_ORDER 4
@@ -93,7 +88,12 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
93 if (!tcmd) 88 if (!tcmd)
94 goto put_dev; 89 goto put_dev;
95 90
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask); 91 /*
92 * The blk helpers are used to the READ/WRITE requests
93 * transfering data from a initiator point of view. Since
94 * we are in target mode we want the opposite.
95 */
96 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
97 if (!rq) 97 if (!rq)
98 goto free_tcmd; 98 goto free_tcmd;
99 99
@@ -111,8 +111,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd; 112 rq->end_io_data = tcmd;
113 113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq; 114 tcmd->rq = rq;
117 115
118 return cmd; 116 return cmd;
@@ -157,22 +155,6 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
157} 155}
158EXPORT_SYMBOL_GPL(scsi_host_put_command); 156EXPORT_SYMBOL_GPL(scsi_host_put_command);
159 157
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd) 158static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{ 159{
178 struct request_queue *q = cmd->request->q; 160 struct request_queue *q = cmd->request->q;
@@ -185,6 +167,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd)
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 167 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186} 168}
187 169
170static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
171{
172 blk_rq_unmap_user(tcmd->bio);
173}
174
188static void scsi_tgt_cmd_destroy(struct work_struct *work) 175static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{ 176{
190 struct scsi_tgt_cmd *tcmd = 177 struct scsi_tgt_cmd *tcmd =
@@ -193,16 +180,6 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
193 180
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, 181 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request)); 182 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd); 183 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); 184 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208} 185}
@@ -215,6 +192,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
215 struct list_head *head; 192 struct list_head *head;
216 193
217 tcmd->tag = tag; 194 tcmd->tag = tag;
195 tcmd->bio = NULL;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); 196 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags); 197 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)]; 198 head = &qdata->cmd_hash[cmd_hashfn(tag)];
@@ -349,10 +327,14 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); 327 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350 328
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag); 329 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
330
331 if (cmd->request_buffer)
332 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
333
352 queue_work(scsi_tgtd, &tcmd->work); 334 queue_work(scsi_tgtd, &tcmd->work);
353} 335}
354 336
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd) 337static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{ 338{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); 339 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err; 340 int err;
@@ -365,30 +347,12 @@ static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
365 case SCSI_MLQUEUE_DEVICE_BUSY: 347 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN; 348 return -EAGAIN;
367 } 349 }
368
369 return 0; 350 return 0;
370} 351}
371 352
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) 353static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{ 354{
390 struct request *rq = cmd->request; 355 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count; 356 int count;
393 357
394 cmd->use_sg = rq->nr_phys_segments; 358 cmd->use_sg = rq->nr_phys_segments;
@@ -398,143 +362,54 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
398 362
399 cmd->request_bufflen = rq->data_len; 363 cmd->request_bufflen = rq->data_len;
400 364
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg, 365 dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer); 366 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) { 367 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count; 368 cmd->use_sg = count;
406 return 0; 369 return 0;
407 } 370 }
408 371
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg); 372 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 373 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL; 374 return -EINVAL;
412} 375}
413 376
414/* TODO: test this crap and replace bio_map_user with new interface maybe */ 377/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, 378static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw) 379 unsigned long uaddr, unsigned int len, int rw)
417{ 380{
418 struct request_queue *q = cmd->request->q; 381 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request; 382 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err; 383 int err;
424 384
425 while (len > 0) { 385 dprintk("%lx %u\n", uaddr, len);
426 dprintk("%lx %u\n", (unsigned long) uaddr, len); 386 err = blk_rq_map_user(q, rq, (void *)uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw); 387 if (err) {
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /* 388 /*
439 * The first bio is added and merged. We could probably 389 * TODO: need to fixup sg_tablesize, max_segment_size,
440 * try to add others using scsi_merge_bio() but for now 390 * max_sectors, etc for modern HW and software drivers
441 * we keep it simple. The first bio should be pretty large 391 * where this value is bogus.
442 * (either hitting the 1 MB bio pages limit or a queue limit) 392 *
443 * already but for really large IO we may want to try and 393 * TODO2: we can alloc a reserve buffer of max size
444 * merge these. 394 * we can handle and do the slow copy path for really large
395 * IO.
445 */ 396 */
446 if (!rq->bio) { 397 eprintk("Could not handle request of size %u.\n", len);
447 blk_rq_bio_prep(q, rq, bio); 398 return err;
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 } 399 }
453 400
454 cmd->offset = 0; 401 tcmd->bio = rq->bio;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL); 402 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err) 403 if (err)
457 goto unmap_bios; 404 goto unmap_rq;
458 405
459 return 0; 406 return 0;
460 407
461unmap_bios: 408unmap_rq:
462 if (rq->bio) { 409 scsi_unmap_user_pages(tcmd);
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err; 410 return err;
469} 411}
470 412
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, 413static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len) 414 unsigned len)
540{ 415{
@@ -584,8 +459,9 @@ static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
584 return rq; 459 return rq;
585} 460}
586 461
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len, 462int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
588 unsigned long uaddr, u8 rw) 463 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
464 u32 sense_len, u8 rw)
589{ 465{
590 struct Scsi_Host *shost; 466 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd; 467 struct scsi_cmnd *cmd;
@@ -617,8 +493,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
617 } 493 }
618 cmd = rq->special; 494 cmd = rq->special;
619 495
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd, 496 dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]); 497 cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
498 rq_data_dir(rq), cmd->cmnd[0]);
622 499
623 if (result == TASK_ABORTED) { 500 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd); 501 scsi_tgt_abort_cmd(shost, cmd);
@@ -629,36 +506,36 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
629 * in the request_* values 506 * in the request_* values
630 */ 507 */
631 tcmd = cmd->request->end_io_data; 508 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result; 509 cmd->result = result;
635 510
636 if (!tcmd->bufflen || cmd->request_buffer) { 511 if (cmd->result == SAM_STAT_CHECK_CONDITION)
637 err = __scsi_tgt_transfer_response(cmd); 512 scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
638 goto done;
639 }
640 513
641 /* 514 if (len) {
642 * TODO: Do we need to handle case where request does not 515 err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
643 * align with LLD. 516 if (err) {
644 */ 517 /*
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw); 518 * user-space daemon bugs or OOM
646 if (err) { 519 * TODO: we can do better for OOM.
647 eprintk("%p %d\n", cmd, err); 520 */
648 err = -EAGAIN; 521 struct scsi_tgt_queuedata *qdata;
649 goto done; 522 struct list_head *head;
650 } 523 unsigned long flags;
651 524
652 /* userspace failure */ 525 eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
653 if (cmd->result) { 526 cmd, err, uaddr, len, rw);
654 if (status_byte(cmd->result) == CHECK_CONDITION) 527
655 scsi_tgt_copy_sense(cmd, uaddr, len); 528 qdata = shost->uspace_req_q->queuedata;
656 err = __scsi_tgt_transfer_response(cmd); 529 head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];
657 goto done; 530
658 } 531 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
659 /* ask the target LLD to transfer the data to the buffer */ 532 list_add(&tcmd->hash_list, head);
660 err = scsi_tgt_transfer_data(cmd); 533 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
661 534
535 goto done;
536 }
537 }
538 err = scsi_tgt_transfer_response(cmd);
662done: 539done:
663 scsi_host_put(shost); 540 scsi_host_put(shost);
664 return err; 541 return err;
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index 84488c51ff62..e9e6db1c417f 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -18,8 +18,9 @@ extern int scsi_tgt_if_init(void);
18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, 18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
19 u64 tag); 19 u64 tag);
20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag); 20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
21extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len, 21extern int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
22 unsigned long uaddr, u8 rw); 22 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
23 u32 sense_len, u8 rw);
23extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag, 24extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
24 struct scsi_lun *scsilun, void *data); 25 struct scsi_lun *scsilun, void *data);
25extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result); 26extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 58afdb401703..14c4f065b2b8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -200,6 +200,8 @@ static const struct {
200 { FC_PORTSPEED_2GBIT, "2 Gbit" }, 200 { FC_PORTSPEED_2GBIT, "2 Gbit" },
201 { FC_PORTSPEED_4GBIT, "4 Gbit" }, 201 { FC_PORTSPEED_4GBIT, "4 Gbit" },
202 { FC_PORTSPEED_10GBIT, "10 Gbit" }, 202 { FC_PORTSPEED_10GBIT, "10 Gbit" },
203 { FC_PORTSPEED_8GBIT, "8 Gbit" },
204 { FC_PORTSPEED_16GBIT, "16 Gbit" },
203 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 205 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
204}; 206};
205fc_bitfield_name_search(port_speed, fc_port_speed_names) 207fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index aabaa0576ab4..caf1836bbeca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -49,7 +49,7 @@ struct iscsi_internal {
49 struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; 49 struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
50}; 50};
51 51
52static int iscsi_session_nr; /* sysfs session id for next new session */ 52static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53 53
54/* 54/*
55 * list of registered transports and lock that must 55 * list of registered transports and lock that must
@@ -300,7 +300,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
300 int err; 300 int err;
301 301
302 ihost = shost->shost_data; 302 ihost = shost->shost_data;
303 session->sid = iscsi_session_nr++; 303 session->sid = atomic_add_return(1, &iscsi_session_nr);
304 session->target_id = target_id; 304 session->target_id = target_id;
305 305
306 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 306 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
@@ -1419,6 +1419,8 @@ static __init int iscsi_transport_init(void)
1419 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 1419 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
1420 ISCSI_TRANSPORT_VERSION); 1420 ISCSI_TRANSPORT_VERSION);
1421 1421
1422 atomic_set(&iscsi_session_nr, 0);
1423
1422 err = class_register(&iscsi_transport_class); 1424 err = class_register(&iscsi_transport_class);
1423 if (err) 1425 if (err)
1424 return err; 1426 return err;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a8f55fea5ff..00e46662296f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,16 +58,10 @@
58#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
59#include <scsi/scsi_ioctl.h> 59#include <scsi/scsi_ioctl.h>
60#include <scsi/scsicam.h> 60#include <scsi/scsicam.h>
61#include <scsi/sd.h>
61 62
62#include "scsi_logging.h" 63#include "scsi_logging.h"
63 64
64/*
65 * More than enough for everybody ;) The huge number of majors
66 * is a leftover from 16bit dev_t days, we don't really need that
67 * much numberspace.
68 */
69#define SD_MAJORS 16
70
71MODULE_AUTHOR("Eric Youngdale"); 65MODULE_AUTHOR("Eric Youngdale");
72MODULE_DESCRIPTION("SCSI disk (sd) driver"); 66MODULE_DESCRIPTION("SCSI disk (sd) driver");
73MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
@@ -88,45 +82,9 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
90MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
91 85MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
92/* 86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
93 * This is limited by the naming scheme enforced in sd_probe, 87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
94 * add another character to it if you really need more disks.
95 */
96#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
97
98/*
99 * Time out in seconds for disks and Magneto-opticals (which are slower).
100 */
101#define SD_TIMEOUT (30 * HZ)
102#define SD_MOD_TIMEOUT (75 * HZ)
103
104/*
105 * Number of allowed retries
106 */
107#define SD_MAX_RETRIES 5
108#define SD_PASSTHROUGH_RETRIES 1
109
110/*
111 * Size of the initial data buffer for mode and read capacity data
112 */
113#define SD_BUF_SIZE 512
114
115struct scsi_disk {
116 struct scsi_driver *driver; /* always &sd_template */
117 struct scsi_device *device;
118 struct class_device cdev;
119 struct gendisk *disk;
120 unsigned int openers; /* protected by BKL for now, yuck */
121 sector_t capacity; /* size in 512-byte sectors */
122 u32 index;
123 u8 media_present;
124 u8 write_prot;
125 unsigned WCE : 1; /* state of disk WCE bit */
126 unsigned RCD : 1; /* state of disk RCD bit, unused */
127 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
128};
129#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,cdev)
130 88
131static DEFINE_IDR(sd_index_idr); 89static DEFINE_IDR(sd_index_idr);
132static DEFINE_SPINLOCK(sd_index_lock); 90static DEFINE_SPINLOCK(sd_index_lock);
@@ -136,20 +94,6 @@ static DEFINE_SPINLOCK(sd_index_lock);
136 * object after last put) */ 94 * object after last put) */
137static DEFINE_MUTEX(sd_ref_mutex); 95static DEFINE_MUTEX(sd_ref_mutex);
138 96
139static int sd_revalidate_disk(struct gendisk *disk);
140static void sd_rw_intr(struct scsi_cmnd * SCpnt);
141
142static int sd_probe(struct device *);
143static int sd_remove(struct device *);
144static void sd_shutdown(struct device *dev);
145static void sd_rescan(struct device *);
146static int sd_init_command(struct scsi_cmnd *);
147static int sd_issue_flush(struct device *, sector_t *);
148static void sd_prepare_flush(request_queue_t *, struct request *);
149static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
150 unsigned char *buffer);
151static void scsi_disk_release(struct class_device *cdev);
152
153static const char *sd_cache_types[] = { 97static const char *sd_cache_types[] = {
154 "write through", "none", "write back", 98 "write through", "none", "write back",
155 "write back, no read (daft)" 99 "write back, no read (daft)"
@@ -199,13 +143,27 @@ static ssize_t sd_store_cache_type(struct class_device *cdev, const char *buf,
199 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 143 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
200 SD_MAX_RETRIES, &data, &sshdr)) { 144 SD_MAX_RETRIES, &data, &sshdr)) {
201 if (scsi_sense_valid(&sshdr)) 145 if (scsi_sense_valid(&sshdr))
202 scsi_print_sense_hdr(sdkp->disk->disk_name, &sshdr); 146 sd_print_sense_hdr(sdkp, &sshdr);
203 return -EINVAL; 147 return -EINVAL;
204 } 148 }
205 sd_revalidate_disk(sdkp->disk); 149 sd_revalidate_disk(sdkp->disk);
206 return count; 150 return count;
207} 151}
208 152
153static ssize_t sd_store_manage_start_stop(struct class_device *cdev,
154 const char *buf, size_t count)
155{
156 struct scsi_disk *sdkp = to_scsi_disk(cdev);
157 struct scsi_device *sdp = sdkp->device;
158
159 if (!capable(CAP_SYS_ADMIN))
160 return -EACCES;
161
162 sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
163
164 return count;
165}
166
209static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf, 167static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf,
210 size_t count) 168 size_t count)
211{ 169{
@@ -238,6 +196,14 @@ static ssize_t sd_show_fua(struct class_device *cdev, char *buf)
238 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); 196 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
239} 197}
240 198
199static ssize_t sd_show_manage_start_stop(struct class_device *cdev, char *buf)
200{
201 struct scsi_disk *sdkp = to_scsi_disk(cdev);
202 struct scsi_device *sdp = sdkp->device;
203
204 return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
205}
206
241static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf) 207static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf)
242{ 208{
243 struct scsi_disk *sdkp = to_scsi_disk(cdev); 209 struct scsi_disk *sdkp = to_scsi_disk(cdev);
@@ -251,6 +217,8 @@ static struct class_device_attribute sd_disk_attrs[] = {
251 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), 217 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
252 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart, 218 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
253 sd_store_allow_restart), 219 sd_store_allow_restart),
220 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
221 sd_store_manage_start_stop),
254 __ATTR_NULL, 222 __ATTR_NULL,
255}; 223};
256 224
@@ -267,6 +235,8 @@ static struct scsi_driver sd_template = {
267 .name = "sd", 235 .name = "sd",
268 .probe = sd_probe, 236 .probe = sd_probe,
269 .remove = sd_remove, 237 .remove = sd_remove,
238 .suspend = sd_suspend,
239 .resume = sd_resume,
270 .shutdown = sd_shutdown, 240 .shutdown = sd_shutdown,
271 }, 241 },
272 .rescan = sd_rescan, 242 .rescan = sd_rescan,
@@ -371,15 +341,19 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
371 unsigned int this_count = SCpnt->request_bufflen >> 9; 341 unsigned int this_count = SCpnt->request_bufflen >> 9;
372 unsigned int timeout = sdp->timeout; 342 unsigned int timeout = sdp->timeout;
373 343
374 SCSI_LOG_HLQUEUE(1, printk("sd_init_command: disk=%s, block=%llu, " 344 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
375 "count=%d\n", disk->disk_name, 345 "sd_init_command: block=%llu, "
376 (unsigned long long)block, this_count)); 346 "count=%d\n",
347 (unsigned long long)block,
348 this_count));
377 349
378 if (!sdp || !scsi_device_online(sdp) || 350 if (!sdp || !scsi_device_online(sdp) ||
379 block + rq->nr_sectors > get_capacity(disk)) { 351 block + rq->nr_sectors > get_capacity(disk)) {
380 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 352 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
381 rq->nr_sectors)); 353 "Finishing %ld sectors\n",
382 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 354 rq->nr_sectors));
355 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
356 "Retry with 0x%p\n", SCpnt));
383 return 0; 357 return 0;
384 } 358 }
385 359
@@ -391,8 +365,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
391 /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ 365 /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
392 return 0; 366 return 0;
393 } 367 }
394 SCSI_LOG_HLQUEUE(2, printk("%s : block=%llu\n", 368 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
395 disk->disk_name, (unsigned long long)block)); 369 (unsigned long long)block));
396 370
397 /* 371 /*
398 * If we have a 1K hardware sectorsize, prevent access to single 372 * If we have a 1K hardware sectorsize, prevent access to single
@@ -407,7 +381,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
407 */ 381 */
408 if (sdp->sector_size == 1024) { 382 if (sdp->sector_size == 1024) {
409 if ((block & 1) || (rq->nr_sectors & 1)) { 383 if ((block & 1) || (rq->nr_sectors & 1)) {
410 printk(KERN_ERR "sd: Bad block number requested"); 384 scmd_printk(KERN_ERR, SCpnt,
385 "Bad block number requested\n");
411 return 0; 386 return 0;
412 } else { 387 } else {
413 block = block >> 1; 388 block = block >> 1;
@@ -416,7 +391,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
416 } 391 }
417 if (sdp->sector_size == 2048) { 392 if (sdp->sector_size == 2048) {
418 if ((block & 3) || (rq->nr_sectors & 3)) { 393 if ((block & 3) || (rq->nr_sectors & 3)) {
419 printk(KERN_ERR "sd: Bad block number requested"); 394 scmd_printk(KERN_ERR, SCpnt,
395 "Bad block number requested\n");
420 return 0; 396 return 0;
421 } else { 397 } else {
422 block = block >> 2; 398 block = block >> 2;
@@ -425,7 +401,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
425 } 401 }
426 if (sdp->sector_size == 4096) { 402 if (sdp->sector_size == 4096) {
427 if ((block & 7) || (rq->nr_sectors & 7)) { 403 if ((block & 7) || (rq->nr_sectors & 7)) {
428 printk(KERN_ERR "sd: Bad block number requested"); 404 scmd_printk(KERN_ERR, SCpnt,
405 "Bad block number requested\n");
429 return 0; 406 return 0;
430 } else { 407 } else {
431 block = block >> 3; 408 block = block >> 3;
@@ -442,13 +419,15 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
442 SCpnt->cmnd[0] = READ_6; 419 SCpnt->cmnd[0] = READ_6;
443 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 420 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
444 } else { 421 } else {
445 printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags); 422 scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
446 return 0; 423 return 0;
447 } 424 }
448 425
449 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
450 disk->disk_name, (rq_data_dir(rq) == WRITE) ? 427 "%s %d/%ld 512 byte blocks.\n",
451 "writing" : "reading", this_count, rq->nr_sectors)); 428 (rq_data_dir(rq) == WRITE) ?
429 "writing" : "reading", this_count,
430 rq->nr_sectors));
452 431
453 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
454 433
@@ -490,7 +469,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
490 * during operation and thus turned off 469 * during operation and thus turned off
491 * use_10_for_rw. 470 * use_10_for_rw.
492 */ 471 */
493 printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n"); 472 scmd_printk(KERN_ERR, SCpnt,
473 "FUA write on READ/WRITE(6) drive\n");
494 return 0; 474 return 0;
495 } 475 }
496 476
@@ -549,7 +529,7 @@ static int sd_open(struct inode *inode, struct file *filp)
549 return -ENXIO; 529 return -ENXIO;
550 530
551 531
552 SCSI_LOG_HLQUEUE(3, printk("sd_open: disk=%s\n", disk->disk_name)); 532 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
553 533
554 sdev = sdkp->device; 534 sdev = sdkp->device;
555 535
@@ -619,7 +599,7 @@ static int sd_release(struct inode *inode, struct file *filp)
619 struct scsi_disk *sdkp = scsi_disk(disk); 599 struct scsi_disk *sdkp = scsi_disk(disk);
620 struct scsi_device *sdev = sdkp->device; 600 struct scsi_device *sdev = sdkp->device;
621 601
622 SCSI_LOG_HLQUEUE(3, printk("sd_release: disk=%s\n", disk->disk_name)); 602 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
623 603
624 if (!--sdkp->openers && sdev->removable) { 604 if (!--sdkp->openers && sdev->removable) {
625 if (scsi_block_when_processing_errors(sdev)) 605 if (scsi_block_when_processing_errors(sdev))
@@ -732,8 +712,7 @@ static int sd_media_changed(struct gendisk *disk)
732 struct scsi_device *sdp = sdkp->device; 712 struct scsi_device *sdp = sdkp->device;
733 int retval; 713 int retval;
734 714
735 SCSI_LOG_HLQUEUE(3, printk("sd_media_changed: disk=%s\n", 715 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
736 disk->disk_name));
737 716
738 if (!sdp->removable) 717 if (!sdp->removable)
739 return 0; 718 return 0;
@@ -786,9 +765,10 @@ not_present:
786 return 1; 765 return 1;
787} 766}
788 767
789static int sd_sync_cache(struct scsi_device *sdp) 768static int sd_sync_cache(struct scsi_disk *sdkp)
790{ 769{
791 int retries, res; 770 int retries, res;
771 struct scsi_device *sdp = sdkp->device;
792 struct scsi_sense_hdr sshdr; 772 struct scsi_sense_hdr sshdr;
793 773
794 if (!scsi_device_online(sdp)) 774 if (!scsi_device_online(sdp))
@@ -809,28 +789,27 @@ static int sd_sync_cache(struct scsi_device *sdp)
809 break; 789 break;
810 } 790 }
811 791
812 if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, " 792 if (res) {
813 "host = %d, driver = %02x\n ", 793 sd_print_result(sdkp, res);
814 status_byte(res), msg_byte(res), 794 if (driver_byte(res) & DRIVER_SENSE)
815 host_byte(res), driver_byte(res)); 795 sd_print_sense_hdr(sdkp, &sshdr);
816 if (driver_byte(res) & DRIVER_SENSE)
817 scsi_print_sense_hdr("sd", &sshdr);
818 } 796 }
819 797
820 return res; 798 if (res)
799 return -EIO;
800 return 0;
821} 801}
822 802
823static int sd_issue_flush(struct device *dev, sector_t *error_sector) 803static int sd_issue_flush(struct device *dev, sector_t *error_sector)
824{ 804{
825 int ret = 0; 805 int ret = 0;
826 struct scsi_device *sdp = to_scsi_device(dev);
827 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 806 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
828 807
829 if (!sdkp) 808 if (!sdkp)
830 return -ENODEV; 809 return -ENODEV;
831 810
832 if (sdkp->WCE) 811 if (sdkp->WCE)
833 ret = sd_sync_cache(sdp); 812 ret = sd_sync_cache(sdkp);
834 scsi_disk_put(sdkp); 813 scsi_disk_put(sdkp);
835 return ret; 814 return ret;
836} 815}
@@ -928,12 +907,14 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
928 sense_deferred = scsi_sense_is_deferred(&sshdr); 907 sense_deferred = scsi_sense_is_deferred(&sshdr);
929 } 908 }
930#ifdef CONFIG_SCSI_LOGGING 909#ifdef CONFIG_SCSI_LOGGING
931 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n", 910 SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
932 SCpnt->request->rq_disk->disk_name, result));
933 if (sense_valid) { 911 if (sense_valid) {
934 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: sb[respc,sk,asc," 912 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
935 "ascq]=%x,%x,%x,%x\n", sshdr.response_code, 913 "sd_rw_intr: sb[respc,sk,asc,"
936 sshdr.sense_key, sshdr.asc, sshdr.ascq)); 914 "ascq]=%x,%x,%x,%x\n",
915 sshdr.response_code,
916 sshdr.sense_key, sshdr.asc,
917 sshdr.ascq));
937 } 918 }
938#endif 919#endif
939 if (driver_byte(result) != DRIVER_SENSE && 920 if (driver_byte(result) != DRIVER_SENSE &&
@@ -1025,7 +1006,7 @@ static int media_not_present(struct scsi_disk *sdkp,
1025 * spinup disk - called only in sd_revalidate_disk() 1006 * spinup disk - called only in sd_revalidate_disk()
1026 */ 1007 */
1027static void 1008static void
1028sd_spinup_disk(struct scsi_disk *sdkp, char *diskname) 1009sd_spinup_disk(struct scsi_disk *sdkp)
1029{ 1010{
1030 unsigned char cmd[10]; 1011 unsigned char cmd[10];
1031 unsigned long spintime_expire = 0; 1012 unsigned long spintime_expire = 0;
@@ -1069,9 +1050,10 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1069 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1050 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1070 /* no sense, TUR either succeeded or failed 1051 /* no sense, TUR either succeeded or failed
1071 * with a status error */ 1052 * with a status error */
1072 if(!spintime && !scsi_status_is_good(the_result)) 1053 if(!spintime && !scsi_status_is_good(the_result)) {
1073 printk(KERN_NOTICE "%s: Unit Not Ready, " 1054 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1074 "error = 0x%x\n", diskname, the_result); 1055 sd_print_result(sdkp, the_result);
1056 }
1075 break; 1057 break;
1076 } 1058 }
1077 1059
@@ -1096,8 +1078,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1096 */ 1078 */
1097 } else if (sense_valid && sshdr.sense_key == NOT_READY) { 1079 } else if (sense_valid && sshdr.sense_key == NOT_READY) {
1098 if (!spintime) { 1080 if (!spintime) {
1099 printk(KERN_NOTICE "%s: Spinning up disk...", 1081 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
1100 diskname);
1101 cmd[0] = START_STOP; 1082 cmd[0] = START_STOP;
1102 cmd[1] = 1; /* Return immediately */ 1083 cmd[1] = 1; /* Return immediately */
1103 memset((void *) &cmd[2], 0, 8); 1084 memset((void *) &cmd[2], 0, 8);
@@ -1130,9 +1111,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1130 /* we don't understand the sense code, so it's 1111 /* we don't understand the sense code, so it's
1131 * probably pointless to loop */ 1112 * probably pointless to loop */
1132 if(!spintime) { 1113 if(!spintime) {
1133 printk(KERN_NOTICE "%s: Unit Not Ready, " 1114 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1134 "sense:\n", diskname); 1115 sd_print_sense_hdr(sdkp, &sshdr);
1135 scsi_print_sense_hdr("", &sshdr);
1136 } 1116 }
1137 break; 1117 break;
1138 } 1118 }
@@ -1151,8 +1131,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1151 * read disk capacity 1131 * read disk capacity
1152 */ 1132 */
1153static void 1133static void
1154sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 1134sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1155 unsigned char *buffer)
1156{ 1135{
1157 unsigned char cmd[16]; 1136 unsigned char cmd[16];
1158 int the_result, retries; 1137 int the_result, retries;
@@ -1191,18 +1170,12 @@ repeat:
1191 } while (the_result && retries); 1170 } while (the_result && retries);
1192 1171
1193 if (the_result && !longrc) { 1172 if (the_result && !longrc) {
1194 printk(KERN_NOTICE "%s : READ CAPACITY failed.\n" 1173 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
1195 "%s : status=%x, message=%02x, host=%d, driver=%02x \n", 1174 sd_print_result(sdkp, the_result);
1196 diskname, diskname,
1197 status_byte(the_result),
1198 msg_byte(the_result),
1199 host_byte(the_result),
1200 driver_byte(the_result));
1201
1202 if (driver_byte(the_result) & DRIVER_SENSE) 1175 if (driver_byte(the_result) & DRIVER_SENSE)
1203 scsi_print_sense_hdr("sd", &sshdr); 1176 sd_print_sense_hdr(sdkp, &sshdr);
1204 else 1177 else
1205 printk("%s : sense not available. \n", diskname); 1178 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1206 1179
1207 /* Set dirty bit for removable devices if not ready - 1180 /* Set dirty bit for removable devices if not ready -
1208 * sometimes drives will not report this properly. */ 1181 * sometimes drives will not report this properly. */
@@ -1218,16 +1191,10 @@ repeat:
1218 return; 1191 return;
1219 } else if (the_result && longrc) { 1192 } else if (the_result && longrc) {
1220 /* READ CAPACITY(16) has been failed */ 1193 /* READ CAPACITY(16) has been failed */
1221 printk(KERN_NOTICE "%s : READ CAPACITY(16) failed.\n" 1194 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1222 "%s : status=%x, message=%02x, host=%d, driver=%02x \n", 1195 sd_print_result(sdkp, the_result);
1223 diskname, diskname, 1196 sd_printk(KERN_NOTICE, sdkp, "Use 0xffffffff as device size\n");
1224 status_byte(the_result), 1197
1225 msg_byte(the_result),
1226 host_byte(the_result),
1227 driver_byte(the_result));
1228 printk(KERN_NOTICE "%s : use 0xffffffff as device size\n",
1229 diskname);
1230
1231 sdkp->capacity = 1 + (sector_t) 0xffffffff; 1198 sdkp->capacity = 1 + (sector_t) 0xffffffff;
1232 goto got_data; 1199 goto got_data;
1233 } 1200 }
@@ -1238,14 +1205,14 @@ repeat:
1238 if (buffer[0] == 0xff && buffer[1] == 0xff && 1205 if (buffer[0] == 0xff && buffer[1] == 0xff &&
1239 buffer[2] == 0xff && buffer[3] == 0xff) { 1206 buffer[2] == 0xff && buffer[3] == 0xff) {
1240 if(sizeof(sdkp->capacity) > 4) { 1207 if(sizeof(sdkp->capacity) > 4) {
1241 printk(KERN_NOTICE "%s : very big device. try to use" 1208 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
1242 " READ CAPACITY(16).\n", diskname); 1209 "Trying to use READ CAPACITY(16).\n");
1243 longrc = 1; 1210 longrc = 1;
1244 goto repeat; 1211 goto repeat;
1245 } 1212 }
1246 printk(KERN_ERR "%s: too big for this kernel. Use a " 1213 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use "
1247 "kernel compiled with support for large block " 1214 "a kernel compiled with support for large "
1248 "devices.\n", diskname); 1215 "block devices.\n");
1249 sdkp->capacity = 0; 1216 sdkp->capacity = 0;
1250 goto got_data; 1217 goto got_data;
1251 } 1218 }
@@ -1284,8 +1251,8 @@ repeat:
1284got_data: 1251got_data:
1285 if (sector_size == 0) { 1252 if (sector_size == 0) {
1286 sector_size = 512; 1253 sector_size = 512;
1287 printk(KERN_NOTICE "%s : sector size 0 reported, " 1254 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
1288 "assuming 512.\n", diskname); 1255 "assuming 512.\n");
1289 } 1256 }
1290 1257
1291 if (sector_size != 512 && 1258 if (sector_size != 512 &&
@@ -1293,8 +1260,8 @@ got_data:
1293 sector_size != 2048 && 1260 sector_size != 2048 &&
1294 sector_size != 4096 && 1261 sector_size != 4096 &&
1295 sector_size != 256) { 1262 sector_size != 256) {
1296 printk(KERN_NOTICE "%s : unsupported sector size " 1263 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
1297 "%d.\n", diskname, sector_size); 1264 sector_size);
1298 /* 1265 /*
1299 * The user might want to re-format the drive with 1266 * The user might want to re-format the drive with
1300 * a supported sectorsize. Once this happens, it 1267 * a supported sectorsize. Once this happens, it
@@ -1327,10 +1294,10 @@ got_data:
1327 mb -= sz - 974; 1294 mb -= sz - 974;
1328 sector_div(mb, 1950); 1295 sector_div(mb, 1950);
1329 1296
1330 printk(KERN_NOTICE "SCSI device %s: " 1297 sd_printk(KERN_NOTICE, sdkp,
1331 "%llu %d-byte hdwr sectors (%llu MB)\n", 1298 "%llu %d-byte hardware sectors (%llu MB)\n",
1332 diskname, (unsigned long long)sdkp->capacity, 1299 (unsigned long long)sdkp->capacity,
1333 hard_sector, (unsigned long long)mb); 1300 hard_sector, (unsigned long long)mb);
1334 } 1301 }
1335 1302
1336 /* Rescale capacity to 512-byte units */ 1303 /* Rescale capacity to 512-byte units */
@@ -1362,8 +1329,7 @@ sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1362 * called with buffer of length SD_BUF_SIZE 1329 * called with buffer of length SD_BUF_SIZE
1363 */ 1330 */
1364static void 1331static void
1365sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname, 1332sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1366 unsigned char *buffer)
1367{ 1333{
1368 int res; 1334 int res;
1369 struct scsi_device *sdp = sdkp->device; 1335 struct scsi_device *sdp = sdkp->device;
@@ -1371,7 +1337,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1371 1337
1372 set_disk_ro(sdkp->disk, 0); 1338 set_disk_ro(sdkp->disk, 0);
1373 if (sdp->skip_ms_page_3f) { 1339 if (sdp->skip_ms_page_3f) {
1374 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname); 1340 sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
1375 return; 1341 return;
1376 } 1342 }
1377 1343
@@ -1403,15 +1369,16 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1403 } 1369 }
1404 1370
1405 if (!scsi_status_is_good(res)) { 1371 if (!scsi_status_is_good(res)) {
1406 printk(KERN_WARNING 1372 sd_printk(KERN_WARNING, sdkp,
1407 "%s: test WP failed, assume Write Enabled\n", diskname); 1373 "Test WP failed, assume Write Enabled\n");
1408 } else { 1374 } else {
1409 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 1375 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
1410 set_disk_ro(sdkp->disk, sdkp->write_prot); 1376 set_disk_ro(sdkp->disk, sdkp->write_prot);
1411 printk(KERN_NOTICE "%s: Write Protect is %s\n", diskname, 1377 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
1412 sdkp->write_prot ? "on" : "off"); 1378 sdkp->write_prot ? "on" : "off");
1413 printk(KERN_DEBUG "%s: Mode Sense: %02x %02x %02x %02x\n", 1379 sd_printk(KERN_DEBUG, sdkp,
1414 diskname, buffer[0], buffer[1], buffer[2], buffer[3]); 1380 "Mode Sense: %02x %02x %02x %02x\n",
1381 buffer[0], buffer[1], buffer[2], buffer[3]);
1415 } 1382 }
1416} 1383}
1417 1384
@@ -1420,8 +1387,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1420 * called with buffer of length SD_BUF_SIZE 1387 * called with buffer of length SD_BUF_SIZE
1421 */ 1388 */
1422static void 1389static void
1423sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, 1390sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1424 unsigned char *buffer)
1425{ 1391{
1426 int len = 0, res; 1392 int len = 0, res;
1427 struct scsi_device *sdp = sdkp->device; 1393 struct scsi_device *sdp = sdkp->device;
@@ -1450,8 +1416,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1450 1416
1451 if (!data.header_length) { 1417 if (!data.header_length) {
1452 modepage = 6; 1418 modepage = 6;
1453 printk(KERN_ERR "%s: missing header in MODE_SENSE response\n", 1419 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
1454 diskname);
1455 } 1420 }
1456 1421
1457 /* that went OK, now ask for the proper length */ 1422 /* that went OK, now ask for the proper length */
@@ -1478,13 +1443,12 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1478 int offset = data.header_length + data.block_descriptor_length; 1443 int offset = data.header_length + data.block_descriptor_length;
1479 1444
1480 if (offset >= SD_BUF_SIZE - 2) { 1445 if (offset >= SD_BUF_SIZE - 2) {
1481 printk(KERN_ERR "%s: malformed MODE SENSE response", 1446 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
1482 diskname);
1483 goto defaults; 1447 goto defaults;
1484 } 1448 }
1485 1449
1486 if ((buffer[offset] & 0x3f) != modepage) { 1450 if ((buffer[offset] & 0x3f) != modepage) {
1487 printk(KERN_ERR "%s: got wrong page\n", diskname); 1451 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
1488 goto defaults; 1452 goto defaults;
1489 } 1453 }
1490 1454
@@ -1498,14 +1462,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1498 1462
1499 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 1463 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1500 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 1464 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1501 printk(KERN_NOTICE "SCSI device %s: uses " 1465 sd_printk(KERN_NOTICE, sdkp,
1502 "READ/WRITE(6), disabling FUA\n", diskname); 1466 "Uses READ/WRITE(6), disabling FUA\n");
1503 sdkp->DPOFUA = 0; 1467 sdkp->DPOFUA = 0;
1504 } 1468 }
1505 1469
1506 printk(KERN_NOTICE "SCSI device %s: " 1470 sd_printk(KERN_NOTICE, sdkp,
1507 "write cache: %s, read cache: %s, %s\n", 1471 "Write cache: %s, read cache: %s, %s\n",
1508 diskname,
1509 sdkp->WCE ? "enabled" : "disabled", 1472 sdkp->WCE ? "enabled" : "disabled",
1510 sdkp->RCD ? "disabled" : "enabled", 1473 sdkp->RCD ? "disabled" : "enabled",
1511 sdkp->DPOFUA ? "supports DPO and FUA" 1474 sdkp->DPOFUA ? "supports DPO and FUA"
@@ -1518,15 +1481,13 @@ bad_sense:
1518 if (scsi_sense_valid(&sshdr) && 1481 if (scsi_sense_valid(&sshdr) &&
1519 sshdr.sense_key == ILLEGAL_REQUEST && 1482 sshdr.sense_key == ILLEGAL_REQUEST &&
1520 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 1483 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1521 printk(KERN_NOTICE "%s: cache data unavailable\n", 1484 /* Invalid field in CDB */
1522 diskname); /* Invalid field in CDB */ 1485 sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
1523 else 1486 else
1524 printk(KERN_ERR "%s: asking for cache data failed\n", 1487 sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
1525 diskname);
1526 1488
1527defaults: 1489defaults:
1528 printk(KERN_ERR "%s: assuming drive cache: write through\n", 1490 sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
1529 diskname);
1530 sdkp->WCE = 0; 1491 sdkp->WCE = 0;
1531 sdkp->RCD = 0; 1492 sdkp->RCD = 0;
1532 sdkp->DPOFUA = 0; 1493 sdkp->DPOFUA = 0;
@@ -1544,7 +1505,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1544 unsigned char *buffer; 1505 unsigned char *buffer;
1545 unsigned ordered; 1506 unsigned ordered;
1546 1507
1547 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1508 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
1509 "sd_revalidate_disk\n"));
1548 1510
1549 /* 1511 /*
1550 * If the device is offline, don't try and read capacity or any 1512 * If the device is offline, don't try and read capacity or any
@@ -1555,8 +1517,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1555 1517
1556 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA); 1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
1557 if (!buffer) { 1519 if (!buffer) {
1558 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation " 1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
1559 "failure.\n"); 1521 "allocation failure.\n");
1560 goto out; 1522 goto out;
1561 } 1523 }
1562 1524
@@ -1568,16 +1530,16 @@ static int sd_revalidate_disk(struct gendisk *disk)
1568 sdkp->WCE = 0; 1530 sdkp->WCE = 0;
1569 sdkp->RCD = 0; 1531 sdkp->RCD = 0;
1570 1532
1571 sd_spinup_disk(sdkp, disk->disk_name); 1533 sd_spinup_disk(sdkp);
1572 1534
1573 /* 1535 /*
1574 * Without media there is no reason to ask; moreover, some devices 1536 * Without media there is no reason to ask; moreover, some devices
1575 * react badly if we do. 1537 * react badly if we do.
1576 */ 1538 */
1577 if (sdkp->media_present) { 1539 if (sdkp->media_present) {
1578 sd_read_capacity(sdkp, disk->disk_name, buffer); 1540 sd_read_capacity(sdkp, buffer);
1579 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); 1541 sd_read_write_protect_flag(sdkp, buffer);
1580 sd_read_cache_type(sdkp, disk->disk_name, buffer); 1542 sd_read_cache_type(sdkp, buffer);
1581 } 1543 }
1582 1544
1583 /* 1545 /*
@@ -1709,8 +1671,8 @@ static int sd_probe(struct device *dev)
1709 dev_set_drvdata(dev, sdkp); 1671 dev_set_drvdata(dev, sdkp);
1710 add_disk(gd); 1672 add_disk(gd);
1711 1673
1712 sdev_printk(KERN_NOTICE, sdp, "Attached scsi %sdisk %s\n", 1674 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1713 sdp->removable ? "removable " : "", gd->disk_name); 1675 sdp->removable ? "removable " : "");
1714 1676
1715 return 0; 1677 return 0;
1716 1678
@@ -1774,6 +1736,31 @@ static void scsi_disk_release(struct class_device *cdev)
1774 kfree(sdkp); 1736 kfree(sdkp);
1775} 1737}
1776 1738
1739static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
1740{
1741 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
1742 struct scsi_sense_hdr sshdr;
1743 struct scsi_device *sdp = sdkp->device;
1744 int res;
1745
1746 if (start)
1747 cmd[4] |= 1; /* START */
1748
1749 if (!scsi_device_online(sdp))
1750 return -ENODEV;
1751
1752 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1753 SD_TIMEOUT, SD_MAX_RETRIES);
1754 if (res) {
1755 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
1756 sd_print_result(sdkp, res);
1757 if (driver_byte(res) & DRIVER_SENSE)
1758 sd_print_sense_hdr(sdkp, &sshdr);
1759 }
1760
1761 return res;
1762}
1763
1777/* 1764/*
1778 * Send a SYNCHRONIZE CACHE instruction down to the device through 1765 * Send a SYNCHRONIZE CACHE instruction down to the device through
1779 * the normal SCSI command structure. Wait for the command to 1766 * the normal SCSI command structure. Wait for the command to
@@ -1781,20 +1768,62 @@ static void scsi_disk_release(struct class_device *cdev)
1781 */ 1768 */
1782static void sd_shutdown(struct device *dev) 1769static void sd_shutdown(struct device *dev)
1783{ 1770{
1784 struct scsi_device *sdp = to_scsi_device(dev);
1785 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 1771 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1786 1772
1787 if (!sdkp) 1773 if (!sdkp)
1788 return; /* this can happen */ 1774 return; /* this can happen */
1789 1775
1790 if (sdkp->WCE) { 1776 if (sdkp->WCE) {
1791 printk(KERN_NOTICE "Synchronizing SCSI cache for disk %s: \n", 1777 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
1792 sdkp->disk->disk_name); 1778 sd_sync_cache(sdkp);
1793 sd_sync_cache(sdp); 1779 }
1780
1781 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
1782 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1783 sd_start_stop_device(sdkp, 0);
1794 } 1784 }
1785
1795 scsi_disk_put(sdkp); 1786 scsi_disk_put(sdkp);
1796} 1787}
1797 1788
1789static int sd_suspend(struct device *dev, pm_message_t mesg)
1790{
1791 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1792 int ret;
1793
1794 if (!sdkp)
1795 return 0; /* this can happen */
1796
1797 if (sdkp->WCE) {
1798 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
1799 ret = sd_sync_cache(sdkp);
1800 if (ret)
1801 return ret;
1802 }
1803
1804 if (mesg.event == PM_EVENT_SUSPEND &&
1805 sdkp->device->manage_start_stop) {
1806 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1807 ret = sd_start_stop_device(sdkp, 0);
1808 if (ret)
1809 return ret;
1810 }
1811
1812 return 0;
1813}
1814
1815static int sd_resume(struct device *dev)
1816{
1817 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1818
1819 if (!sdkp->device->manage_start_stop)
1820 return 0;
1821
1822 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
1823
1824 return sd_start_stop_device(sdkp, 1);
1825}
1826
1798/** 1827/**
1799 * init_sd - entry point for this driver (both when built in or when 1828 * init_sd - entry point for this driver (both when built in or when
1800 * a module). 1829 * a module).
@@ -1852,3 +1881,19 @@ static void __exit exit_sd(void)
1852 1881
1853module_init(init_sd); 1882module_init(init_sd);
1854module_exit(exit_sd); 1883module_exit(exit_sd);
1884
1885static void sd_print_sense_hdr(struct scsi_disk *sdkp,
1886 struct scsi_sense_hdr *sshdr)
1887{
1888 sd_printk(KERN_INFO, sdkp, "");
1889 scsi_show_sense_hdr(sshdr);
1890 sd_printk(KERN_INFO, sdkp, "");
1891 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1892}
1893
1894static void sd_print_result(struct scsi_disk *sdkp, int result)
1895{
1896 sd_printk(KERN_INFO, sdkp, "");
1897 scsi_show_result(result);
1898}
1899
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 81e3bc7b02a1..570977cf9efb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -917,6 +917,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
917 return result; 917 return result;
918 if (val < 0) 918 if (val < 0)
919 return -EINVAL; 919 return -EINVAL;
920 val = min_t(int, val,
921 sdp->device->request_queue->max_sectors * 512);
920 if (val != sfp->reserve.bufflen) { 922 if (val != sfp->reserve.bufflen) {
921 if (sg_res_in_use(sfp) || sfp->mmap_called) 923 if (sg_res_in_use(sfp) || sfp->mmap_called)
922 return -EBUSY; 924 return -EBUSY;
@@ -925,7 +927,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
925 } 927 }
926 return 0; 928 return 0;
927 case SG_GET_RESERVED_SIZE: 929 case SG_GET_RESERVED_SIZE:
928 val = (int) sfp->reserve.bufflen; 930 val = min_t(int, sfp->reserve.bufflen,
931 sdp->device->request_queue->max_sectors * 512);
929 return put_user(val, ip); 932 return put_user(val, ip);
930 case SG_SET_COMMAND_Q: 933 case SG_SET_COMMAND_Q:
931 result = get_user(val, ip); 934 result = get_user(val, ip);
@@ -1061,6 +1064,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 if (sdp->detached) 1064 if (sdp->detached)
1062 return -ENODEV; 1065 return -ENODEV;
1063 return scsi_ioctl(sdp->device, cmd_in, p); 1066 return scsi_ioctl(sdp->device, cmd_in, p);
1067 case BLKSECTGET:
1068 return put_user(sdp->device->request_queue->max_sectors * 512,
1069 ip);
1064 default: 1070 default:
1065 if (read_only) 1071 if (read_only)
1066 return -EPERM; /* don't know so take safe approach */ 1072 return -EPERM; /* don't know so take safe approach */
@@ -2339,6 +2345,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2339{ 2345{
2340 Sg_fd *sfp; 2346 Sg_fd *sfp;
2341 unsigned long iflags; 2347 unsigned long iflags;
2348 int bufflen;
2342 2349
2343 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2350 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2344 if (!sfp) 2351 if (!sfp)
@@ -2369,7 +2376,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2369 if (unlikely(sg_big_buff != def_reserved_size)) 2376 if (unlikely(sg_big_buff != def_reserved_size))
2370 sg_big_buff = def_reserved_size; 2377 sg_big_buff = def_reserved_size;
2371 2378
2372 sg_build_reserve(sfp, sg_big_buff); 2379 bufflen = min_t(int, sg_big_buff,
2380 sdp->device->request_queue->max_sectors * 512);
2381 sg_build_reserve(sfp, bufflen);
2373 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2382 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2374 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2383 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2375 return sfp; 2384 return sfp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1857d68e7195..f9a52af7f5b4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -62,6 +62,8 @@
62MODULE_DESCRIPTION("SCSI cdrom (sr) driver"); 62MODULE_DESCRIPTION("SCSI cdrom (sr) driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); 64MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR);
65MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM);
66MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
65 67
66#define SR_DISKS 256 68#define SR_DISKS 256
67 69
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 98d8411bbccc..55bfeccf68a2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -89,6 +89,7 @@ MODULE_AUTHOR("Kai Makisara");
89MODULE_DESCRIPTION("SCSI tape (st) driver"); 89MODULE_DESCRIPTION("SCSI tape (st) driver");
90MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
91MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR); 91MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR);
92MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
92 93
93/* Set 'perm' (4th argument) to 0 to disable module_param's definition 94/* Set 'perm' (4th argument) to 0 to disable module_param's definition
94 * of sysfs parameters (which module_param doesn't yet support). 95 * of sysfs parameters (which module_param doesn't yet support).
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 8c766bcd1095..bbeb2451d32f 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/delay.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
10 11
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index a583e89238fc..3158949ffa62 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2680,7 +2680,7 @@ static int __init dc390_module_init(void)
2680 printk (KERN_INFO "DC390: Using safe settings.\n"); 2680 printk (KERN_INFO "DC390: Using safe settings.\n");
2681 } 2681 }
2682 2682
2683 return pci_module_init(&dc390_driver); 2683 return pci_register_driver(&dc390_driver);
2684} 2684}
2685 2685
2686static void __exit dc390_module_exit(void) 2686static void __exit dc390_module_exit(void)