aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 16:30:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-05 16:30:44 -0400
commit4f7a307dc6e4d8bfeb56f7cf7231b08cb845687c (patch)
tree3bf90522c87fcb32373cb2a5ff25b1ead33405f5 /drivers/scsi
parentfabb5c4e4a474ff0f7d6c1d3466a1b79bbce5f49 (diff)
parent7297824581755593535fc97d2c8b6c47e2dc2db6 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (87 commits) [SCSI] fusion: fix domain validation loops [SCSI] qla2xxx: fix regression on sparc64 [SCSI] modalias for scsi devices [SCSI] sg: cap reserved_size values at max_sectors [SCSI] BusLogic: stop using check_region [SCSI] tgt: fix rdma transfer bugs [SCSI] aacraid: fix aacraid not finding device [SCSI] aacraid: Correct SMC products in aacraid.txt [SCSI] scsi_error.c: Add EH Start Unit retry [SCSI] aacraid: [Fastboot] Panics for AACRAID driver during 'insmod' for kexec test. [SCSI] ipr: Driver version to 2.3.2 [SCSI] ipr: Faster sg list fetch [SCSI] ipr: Return better qc_issue errors [SCSI] ipr: Disrupt device error [SCSI] ipr: Improve async error logging level control [SCSI] ipr: PCI unblock config access fix [SCSI] ipr: Fix for oops following SATA request sense [SCSI] ipr: Log error for SAS dual path switch [SCSI] ipr: Enable logging of debug error data for all devices [SCSI] ipr: Add new PCI-E IDs to device table ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/BusLogic.c73
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c402
-rw-r--r--drivers/scsi/aacraid/aacraid.h76
-rw-r--r--drivers/scsi/aacraid/commctrl.c286
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c112
-rw-r--r--drivers/scsi/aacraid/dpcsup.c35
-rw-r--r--drivers/scsi/aacraid/linit.c65
-rw-r--r--drivers/scsi/aacraid/nark.c3
-rw-r--r--drivers/scsi/aacraid/rkt.c3
-rw-r--r--drivers/scsi/aacraid/rx.c115
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic79xx12
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx10
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/constants.c274
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h48
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h2
-rw-r--r--drivers/scsi/dpt/dptsig.h4
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/eata_generic.h7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c80
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c37
-rw-r--r--drivers/scsi/ipr.c290
-rw-r--r--drivers/scsi/ipr.h45
-rw-r--r--drivers/scsi/iscsi_tcp.c21
-rw-r--r--drivers/scsi/libiscsi.c29
-rw-r--r--drivers/scsi/libsrp.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/osst.c1
-rw-r--r--drivers/scsi/pci2000.h197
-rw-r--r--drivers/scsi/pcmcia/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c47
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c54
-rw-r--r--drivers/scsi/scsi_tgt_if.c6
-rw-r--r--drivers/scsi/scsi_tgt_lib.c261
-rw-r--r--drivers/scsi/scsi_tgt_priv.h5
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/sd.c405
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c1
57 files changed, 1787 insertions, 1567 deletions
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index e874b8944875..96f4cab07614 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -579,17 +579,17 @@ static void __init BusLogic_InitializeProbeInfoListISA(struct BusLogic_HostAdapt
579 /* 579 /*
580 Append the list of standard BusLogic MultiMaster ISA I/O Addresses. 580 Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
581 */ 581 */
582 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0) 582 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe330)
583 BusLogic_AppendProbeAddressISA(0x330); 583 BusLogic_AppendProbeAddressISA(0x330);
584 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0) 584 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe334)
585 BusLogic_AppendProbeAddressISA(0x334); 585 BusLogic_AppendProbeAddressISA(0x334);
586 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0) 586 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe230)
587 BusLogic_AppendProbeAddressISA(0x230); 587 BusLogic_AppendProbeAddressISA(0x230);
588 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0) 588 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe234)
589 BusLogic_AppendProbeAddressISA(0x234); 589 BusLogic_AppendProbeAddressISA(0x234);
590 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0) 590 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe130)
591 BusLogic_AppendProbeAddressISA(0x130); 591 BusLogic_AppendProbeAddressISA(0x130);
592 if (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0) 592 if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe134)
593 BusLogic_AppendProbeAddressISA(0x134); 593 BusLogic_AppendProbeAddressISA(0x134);
594} 594}
595 595
@@ -795,7 +795,9 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
795 host adapters are probed. 795 host adapters are probed.
796 */ 796 */
797 if (!BusLogic_ProbeOptions.NoProbeISA) 797 if (!BusLogic_ProbeOptions.NoProbeISA)
798 if (PrimaryProbeInfo->IO_Address == 0 && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe330 : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)) { 798 if (PrimaryProbeInfo->IO_Address == 0 &&
799 (!BusLogic_ProbeOptions.LimitedProbeISA ||
800 BusLogic_ProbeOptions.Probe330)) {
799 PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster; 801 PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
800 PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus; 802 PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
801 PrimaryProbeInfo->IO_Address = 0x330; 803 PrimaryProbeInfo->IO_Address = 0x330;
@@ -805,15 +807,25 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
805 omitting the Primary I/O Address which has already been handled. 807 omitting the Primary I/O Address which has already been handled.
806 */ 808 */
807 if (!BusLogic_ProbeOptions.NoProbeISA) { 809 if (!BusLogic_ProbeOptions.NoProbeISA) {
808 if (!StandardAddressSeen[1] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe334 : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0)) 810 if (!StandardAddressSeen[1] &&
811 (!BusLogic_ProbeOptions.LimitedProbeISA ||
812 BusLogic_ProbeOptions.Probe334))
809 BusLogic_AppendProbeAddressISA(0x334); 813 BusLogic_AppendProbeAddressISA(0x334);
810 if (!StandardAddressSeen[2] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe230 : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0)) 814 if (!StandardAddressSeen[2] &&
815 (!BusLogic_ProbeOptions.LimitedProbeISA ||
816 BusLogic_ProbeOptions.Probe230))
811 BusLogic_AppendProbeAddressISA(0x230); 817 BusLogic_AppendProbeAddressISA(0x230);
812 if (!StandardAddressSeen[3] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe234 : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0)) 818 if (!StandardAddressSeen[3] &&
819 (!BusLogic_ProbeOptions.LimitedProbeISA ||
820 BusLogic_ProbeOptions.Probe234))
813 BusLogic_AppendProbeAddressISA(0x234); 821 BusLogic_AppendProbeAddressISA(0x234);
814 if (!StandardAddressSeen[4] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe130 : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0)) 822 if (!StandardAddressSeen[4] &&
823 (!BusLogic_ProbeOptions.LimitedProbeISA ||
824 BusLogic_ProbeOptions.Probe130))
815 BusLogic_AppendProbeAddressISA(0x130); 825 BusLogic_AppendProbeAddressISA(0x130);
816 if (!StandardAddressSeen[5] && (BusLogic_ProbeOptions.LimitedProbeISA ? BusLogic_ProbeOptions.Probe134 : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0)) 826 if (!StandardAddressSeen[5] &&
827 (!BusLogic_ProbeOptions.LimitedProbeISA ||
828 BusLogic_ProbeOptions.Probe134))
817 BusLogic_AppendProbeAddressISA(0x134); 829 BusLogic_AppendProbeAddressISA(0x134);
818 } 830 }
819 /* 831 /*
@@ -2220,22 +2232,35 @@ static int __init BusLogic_init(void)
2220 HostAdapter->PCI_Device = ProbeInfo->PCI_Device; 2232 HostAdapter->PCI_Device = ProbeInfo->PCI_Device;
2221 HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel; 2233 HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel;
2222 HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType]; 2234 HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType];
2235
2236 /*
2237 Make sure region is free prior to probing.
2238 */
2239 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount,
2240 "BusLogic"))
2241 continue;
2223 /* 2242 /*
2224 Probe the Host Adapter. If unsuccessful, abort further initialization. 2243 Probe the Host Adapter. If unsuccessful, abort further initialization.
2225 */ 2244 */
2226 if (!BusLogic_ProbeHostAdapter(HostAdapter)) 2245 if (!BusLogic_ProbeHostAdapter(HostAdapter)) {
2246 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2227 continue; 2247 continue;
2248 }
2228 /* 2249 /*
2229 Hard Reset the Host Adapter. If unsuccessful, abort further 2250 Hard Reset the Host Adapter. If unsuccessful, abort further
2230 initialization. 2251 initialization.
2231 */ 2252 */
2232 if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) 2253 if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) {
2254 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2233 continue; 2255 continue;
2256 }
2234 /* 2257 /*
2235 Check the Host Adapter. If unsuccessful, abort further initialization. 2258 Check the Host Adapter. If unsuccessful, abort further initialization.
2236 */ 2259 */
2237 if (!BusLogic_CheckHostAdapter(HostAdapter)) 2260 if (!BusLogic_CheckHostAdapter(HostAdapter)) {
2261 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
2238 continue; 2262 continue;
2263 }
2239 /* 2264 /*
2240 Initialize the Driver Options field if provided. 2265 Initialize the Driver Options field if provided.
2241 */ 2266 */
@@ -2247,16 +2272,6 @@ static int __init BusLogic_init(void)
2247 */ 2272 */
2248 BusLogic_AnnounceDriver(HostAdapter); 2273 BusLogic_AnnounceDriver(HostAdapter);
2249 /* 2274 /*
2250 Register usage of the I/O Address range. From this point onward, any
2251 failure will be assumed to be due to a problem with the Host Adapter,
2252 rather than due to having mistakenly identified this port as belonging
2253 to a BusLogic Host Adapter. The I/O Address range will not be
2254 released, thereby preventing it from being incorrectly identified as
2255 any other type of Host Adapter.
2256 */
2257 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, "BusLogic"))
2258 continue;
2259 /*
2260 Register the SCSI Host structure. 2275 Register the SCSI Host structure.
2261 */ 2276 */
2262 2277
@@ -2280,6 +2295,12 @@ static int __init BusLogic_init(void)
2280 Acquire the System Resources necessary to use the Host Adapter, then 2295 Acquire the System Resources necessary to use the Host Adapter, then
2281 Create the Initial CCBs, Initialize the Host Adapter, and finally 2296 Create the Initial CCBs, Initialize the Host Adapter, and finally
2282 perform Target Device Inquiry. 2297 perform Target Device Inquiry.
2298
2299 From this point onward, any failure will be assumed to be due to a
2300 problem with the Host Adapter, rather than due to having mistakenly
2301 identified this port as belonging to a BusLogic Host Adapter. The
2302 I/O Address range will not be released, thereby preventing it from
2303 being incorrectly identified as any other type of Host Adapter.
2283 */ 2304 */
2284 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && 2305 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
2285 BusLogic_ReportHostAdapterConfiguration(HostAdapter) && 2306 BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
@@ -3598,6 +3619,7 @@ static void __exit BusLogic_exit(void)
3598 3619
3599__setup("BusLogic=", BusLogic_Setup); 3620__setup("BusLogic=", BusLogic_Setup);
3600 3621
3622#ifdef MODULE
3601static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = { 3623static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
3602 { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 3624 { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
3603 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 3625 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -3607,6 +3629,7 @@ static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
3607 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 3629 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3608 { } 3630 { }
3609}; 3631};
3632#endif
3610MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl); 3633MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl);
3611 3634
3612module_init(BusLogic_init); 3635module_init(BusLogic_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e1ebed0f0755..58c811d20eb2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -241,6 +241,12 @@ config SCSI_SCAN_ASYNC
241 You can override this choice by specifying "scsi_mod.scan=sync" 241 You can override this choice by specifying "scsi_mod.scan=sync"
242 or async on the kernel's command line. 242 or async on the kernel's command line.
243 243
244config SCSI_WAIT_SCAN
245 tristate
246 default m
247 depends on SCSI
248 depends on MODULES
249
244menu "SCSI Transports" 250menu "SCSI Transports"
245 depends on SCSI 251 depends on SCSI
246 252
@@ -1194,17 +1200,6 @@ config SCSI_NCR53C8XX_SYNC
1194 There is no safe option other than using good cabling, right 1200 There is no safe option other than using good cabling, right
1195 terminations and SCSI conformant devices. 1201 terminations and SCSI conformant devices.
1196 1202
1197config SCSI_NCR53C8XX_PROFILE
1198 bool "enable profiling"
1199 depends on SCSI_ZALON || SCSI_NCR_Q720
1200 help
1201 This option allows you to enable profiling information gathering.
1202 These statistics are not very accurate due to the low frequency
1203 of the kernel clock (100 Hz on i386) and have performance impact
1204 on systems that use very fast devices.
1205
1206 The normal answer therefore is N.
1207
1208config SCSI_NCR53C8XX_NO_DISCONNECT 1203config SCSI_NCR53C8XX_NO_DISCONNECT
1209 bool "not allow targets to disconnect" 1204 bool "not allow targets to disconnect"
1210 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0 1205 depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
@@ -1334,11 +1329,6 @@ config SCSI_SIM710
1334 1329
1335 It currently supports Compaq EISA cards and NCR MCA cards 1330 It currently supports Compaq EISA cards and NCR MCA cards
1336 1331
1337config 53C700_IO_MAPPED
1338 bool
1339 depends on SCSI_SIM710
1340 default y
1341
1342config SCSI_SYM53C416 1332config SCSI_SYM53C416
1343 tristate "Symbios 53c416 SCSI support" 1333 tristate "Symbios 53c416 SCSI support"
1344 depends on ISA && SCSI 1334 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 70cff4c599d7..51e884fa10b0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -146,7 +146,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
146# This goes last, so that "real" scsi devices probe earlier 146# This goes last, so that "real" scsi devices probe earlier
147obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 147obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
148 148
149obj-$(CONFIG_SCSI) += scsi_wait_scan.o 149obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
150 150
151scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 151scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
152 scsicam.o scsi_error.o scsi_lib.o \ 152 scsicam.o scsi_error.o scsi_lib.o \
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d789e61bdc49..1e82c69b36b0 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -172,6 +172,30 @@ MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size.
172int expose_physicals = -1; 172int expose_physicals = -1;
173module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 173module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
174MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); 174MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
175
176
177static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
178 struct fib *fibptr) {
179 struct scsi_device *device;
180
181 if (unlikely(!scsicmd || !scsicmd->scsi_done )) {
182 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"))
183;
184 aac_fib_complete(fibptr);
185 aac_fib_free(fibptr);
186 return 0;
187 }
188 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
189 device = scsicmd->device;
190 if (unlikely(!device || !scsi_device_online(device))) {
191 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
192 aac_fib_complete(fibptr);
193 aac_fib_free(fibptr);
194 return 0;
195 }
196 return 1;
197}
198
175/** 199/**
176 * aac_get_config_status - check the adapter configuration 200 * aac_get_config_status - check the adapter configuration
177 * @common: adapter to query 201 * @common: adapter to query
@@ -258,13 +282,10 @@ int aac_get_containers(struct aac_dev *dev)
258 u32 index; 282 u32 index;
259 int status = 0; 283 int status = 0;
260 struct fib * fibptr; 284 struct fib * fibptr;
261 unsigned instance;
262 struct aac_get_container_count *dinfo; 285 struct aac_get_container_count *dinfo;
263 struct aac_get_container_count_resp *dresp; 286 struct aac_get_container_count_resp *dresp;
264 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 287 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
265 288
266 instance = dev->scsi_host_ptr->unique_id;
267
268 if (!(fibptr = aac_fib_alloc(dev))) 289 if (!(fibptr = aac_fib_alloc(dev)))
269 return -ENOMEM; 290 return -ENOMEM;
270 291
@@ -284,88 +305,35 @@ int aac_get_containers(struct aac_dev *dev)
284 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 305 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
285 aac_fib_complete(fibptr); 306 aac_fib_complete(fibptr);
286 } 307 }
308 aac_fib_free(fibptr);
287 309
288 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 310 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
289 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 311 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
290 fsa_dev_ptr = kmalloc( 312 fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
291 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 313 GFP_KERNEL);
292 if (!fsa_dev_ptr) { 314 if (!fsa_dev_ptr)
293 aac_fib_free(fibptr);
294 return -ENOMEM; 315 return -ENOMEM;
295 }
296 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 316 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
297 317
298 dev->fsa_dev = fsa_dev_ptr; 318 dev->fsa_dev = fsa_dev_ptr;
299 dev->maximum_num_containers = maximum_num_containers; 319 dev->maximum_num_containers = maximum_num_containers;
300 320
301 for (index = 0; index < dev->maximum_num_containers; index++) { 321 for (index = 0; index < dev->maximum_num_containers; ) {
302 struct aac_query_mount *dinfo;
303 struct aac_mount *dresp;
304
305 fsa_dev_ptr[index].devname[0] = '\0'; 322 fsa_dev_ptr[index].devname[0] = '\0';
306 323
307 aac_fib_init(fibptr); 324 status = aac_probe_container(dev, index);
308 dinfo = (struct aac_query_mount *) fib_data(fibptr);
309
310 dinfo->command = cpu_to_le32(VM_NameServe);
311 dinfo->count = cpu_to_le32(index);
312 dinfo->type = cpu_to_le32(FT_FILESYS);
313 325
314 status = aac_fib_send(ContainerCommand, 326 if (status < 0) {
315 fibptr,
316 sizeof (struct aac_query_mount),
317 FsaNormal,
318 1, 1,
319 NULL, NULL);
320 if (status < 0 ) {
321 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); 327 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
322 break; 328 break;
323 } 329 }
324 dresp = (struct aac_mount *)fib_data(fibptr);
325 330
326 if ((le32_to_cpu(dresp->status) == ST_OK) &&
327 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
328 dinfo->command = cpu_to_le32(VM_NameServe64);
329 dinfo->count = cpu_to_le32(index);
330 dinfo->type = cpu_to_le32(FT_FILESYS);
331
332 if (aac_fib_send(ContainerCommand,
333 fibptr,
334 sizeof(struct aac_query_mount),
335 FsaNormal,
336 1, 1,
337 NULL, NULL) < 0)
338 continue;
339 } else
340 dresp->mnt[0].capacityhigh = 0;
341
342 dprintk ((KERN_DEBUG
343 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
344 (int)index, (int)le32_to_cpu(dresp->status),
345 (int)le32_to_cpu(dresp->mnt[0].vol),
346 (int)le32_to_cpu(dresp->mnt[0].state),
347 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
348 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
349 if ((le32_to_cpu(dresp->status) == ST_OK) &&
350 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
351 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
352 fsa_dev_ptr[index].valid = 1;
353 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
354 fsa_dev_ptr[index].size
355 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
356 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
357 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
358 fsa_dev_ptr[index].ro = 1;
359 }
360 aac_fib_complete(fibptr);
361 /* 331 /*
362 * If there are no more containers, then stop asking. 332 * If there are no more containers, then stop asking.
363 */ 333 */
364 if ((index + 1) >= le32_to_cpu(dresp->count)){ 334 if (++index >= status)
365 break; 335 break;
366 }
367 } 336 }
368 aac_fib_free(fibptr);
369 return status; 337 return status;
370} 338}
371 339
@@ -382,8 +350,9 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
382 buf = scsicmd->request_buffer; 350 buf = scsicmd->request_buffer;
383 transfer_len = min(scsicmd->request_bufflen, len + offset); 351 transfer_len = min(scsicmd->request_bufflen, len + offset);
384 } 352 }
385 353 transfer_len -= offset;
386 memcpy(buf + offset, data, transfer_len - offset); 354 if (buf && transfer_len)
355 memcpy(buf + offset, data, transfer_len);
387 356
388 if (scsicmd->use_sg) 357 if (scsicmd->use_sg)
389 kunmap_atomic(buf - sg->offset, KM_IRQ0); 358 kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -396,7 +365,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
396 struct scsi_cmnd * scsicmd; 365 struct scsi_cmnd * scsicmd;
397 366
398 scsicmd = (struct scsi_cmnd *) context; 367 scsicmd = (struct scsi_cmnd *) context;
399 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 368
369 if (!aac_valid_context(scsicmd, fibptr))
370 return;
400 371
401 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); 372 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
402 BUG_ON(fibptr == NULL); 373 BUG_ON(fibptr == NULL);
@@ -431,7 +402,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
431/** 402/**
432 * aac_get_container_name - get container name, none blocking. 403 * aac_get_container_name - get container name, none blocking.
433 */ 404 */
434static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) 405static int aac_get_container_name(struct scsi_cmnd * scsicmd)
435{ 406{
436 int status; 407 int status;
437 struct aac_get_name *dinfo; 408 struct aac_get_name *dinfo;
@@ -448,7 +419,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
448 419
449 dinfo->command = cpu_to_le32(VM_ContainerConfig); 420 dinfo->command = cpu_to_le32(VM_ContainerConfig);
450 dinfo->type = cpu_to_le32(CT_READ_NAME); 421 dinfo->type = cpu_to_le32(CT_READ_NAME);
451 dinfo->cid = cpu_to_le32(cid); 422 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
452 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 423 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
453 424
454 status = aac_fib_send(ContainerCommand, 425 status = aac_fib_send(ContainerCommand,
@@ -473,85 +444,192 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
473 return -1; 444 return -1;
474} 445}
475 446
476/** 447static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
477 * aac_probe_container - query a logical volume 448{
478 * @dev: device to query 449 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
479 * @cid: container identifier 450
480 * 451 if (fsa_dev_ptr[scmd_id(scsicmd)].valid)
481 * Queries the controller about the given volume. The volume information 452 return aac_scsi_cmd(scsicmd);
482 * is updated in the struct fsa_dev_info structure rather than returned. 453
483 */ 454 scsicmd->result = DID_NO_CONNECT << 16;
484 455 scsicmd->scsi_done(scsicmd);
485int aac_probe_container(struct aac_dev *dev, int cid) 456 return 0;
457}
458
459static int _aac_probe_container2(void * context, struct fib * fibptr)
486{ 460{
487 struct fsa_dev_info *fsa_dev_ptr; 461 struct fsa_dev_info *fsa_dev_ptr;
488 int status; 462 int (*callback)(struct scsi_cmnd *);
463 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
464
465 if (!aac_valid_context(scsicmd, fibptr))
466 return 0;
467
468 fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
469
470 scsicmd->SCp.Status = 0;
471 if (fsa_dev_ptr) {
472 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
473 fsa_dev_ptr += scmd_id(scsicmd);
474
475 if ((le32_to_cpu(dresp->status) == ST_OK) &&
476 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
477 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
478 fsa_dev_ptr->valid = 1;
479 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
480 fsa_dev_ptr->size
481 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
482 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
483 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
484 }
485 if ((fsa_dev_ptr->valid & 1) == 0)
486 fsa_dev_ptr->valid = 0;
487 scsicmd->SCp.Status = le32_to_cpu(dresp->count);
488 }
489 aac_fib_complete(fibptr);
490 aac_fib_free(fibptr);
491 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
492 scsicmd->SCp.ptr = NULL;
493 return (*callback)(scsicmd);
494}
495
496static int _aac_probe_container1(void * context, struct fib * fibptr)
497{
498 struct scsi_cmnd * scsicmd;
499 struct aac_mount * dresp;
489 struct aac_query_mount *dinfo; 500 struct aac_query_mount *dinfo;
490 struct aac_mount *dresp; 501 int status;
491 struct fib * fibptr;
492 unsigned instance;
493 502
494 fsa_dev_ptr = dev->fsa_dev; 503 dresp = (struct aac_mount *) fib_data(fibptr);
495 if (!fsa_dev_ptr) 504 dresp->mnt[0].capacityhigh = 0;
496 return -ENOMEM; 505 if ((le32_to_cpu(dresp->status) != ST_OK) ||
497 instance = dev->scsi_host_ptr->unique_id; 506 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE))
507 return _aac_probe_container2(context, fibptr);
508 scsicmd = (struct scsi_cmnd *) context;
509 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
498 510
499 if (!(fibptr = aac_fib_alloc(dev))) 511 if (!aac_valid_context(scsicmd, fibptr))
500 return -ENOMEM; 512 return 0;
501 513
502 aac_fib_init(fibptr); 514 aac_fib_init(fibptr);
503 515
504 dinfo = (struct aac_query_mount *)fib_data(fibptr); 516 dinfo = (struct aac_query_mount *)fib_data(fibptr);
505 517
506 dinfo->command = cpu_to_le32(VM_NameServe); 518 dinfo->command = cpu_to_le32(VM_NameServe64);
507 dinfo->count = cpu_to_le32(cid); 519 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
508 dinfo->type = cpu_to_le32(FT_FILESYS); 520 dinfo->type = cpu_to_le32(FT_FILESYS);
509 521
510 status = aac_fib_send(ContainerCommand, 522 status = aac_fib_send(ContainerCommand,
511 fibptr, 523 fibptr,
512 sizeof(struct aac_query_mount), 524 sizeof(struct aac_query_mount),
513 FsaNormal, 525 FsaNormal,
514 1, 1, 526 0, 1,
515 NULL, NULL); 527 (fib_callback) _aac_probe_container2,
528 (void *) scsicmd);
529 /*
530 * Check that the command queued to the controller
531 */
532 if (status == -EINPROGRESS) {
533 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
534 return 0;
535 }
516 if (status < 0) { 536 if (status < 0) {
517 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n"); 537 /* Inherit results from VM_NameServe, if any */
518 goto error; 538 dresp->status = cpu_to_le32(ST_OK);
539 return _aac_probe_container2(context, fibptr);
519 } 540 }
541 return 0;
542}
520 543
521 dresp = (struct aac_mount *) fib_data(fibptr); 544static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
545{
546 struct fib * fibptr;
547 int status = -ENOMEM;
522 548
523 if ((le32_to_cpu(dresp->status) == ST_OK) && 549 if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
524 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) { 550 struct aac_query_mount *dinfo;
525 dinfo->command = cpu_to_le32(VM_NameServe64);
526 dinfo->count = cpu_to_le32(cid);
527 dinfo->type = cpu_to_le32(FT_FILESYS);
528 551
529 if (aac_fib_send(ContainerCommand, 552 aac_fib_init(fibptr);
530 fibptr, 553
531 sizeof(struct aac_query_mount), 554 dinfo = (struct aac_query_mount *)fib_data(fibptr);
532 FsaNormal, 555
533 1, 1, 556 dinfo->command = cpu_to_le32(VM_NameServe);
534 NULL, NULL) < 0) 557 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
535 goto error; 558 dinfo->type = cpu_to_le32(FT_FILESYS);
536 } else 559 scsicmd->SCp.ptr = (char *)callback;
537 dresp->mnt[0].capacityhigh = 0;
538 560
539 if ((le32_to_cpu(dresp->status) == ST_OK) && 561 status = aac_fib_send(ContainerCommand,
540 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 562 fibptr,
541 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 563 sizeof(struct aac_query_mount),
542 fsa_dev_ptr[cid].valid = 1; 564 FsaNormal,
543 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 565 0, 1,
544 fsa_dev_ptr[cid].size 566 (fib_callback) _aac_probe_container1,
545 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + 567 (void *) scsicmd);
546 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); 568 /*
547 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 569 * Check that the command queued to the controller
548 fsa_dev_ptr[cid].ro = 1; 570 */
571 if (status == -EINPROGRESS) {
572 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
573 return 0;
574 }
575 if (status < 0) {
576 scsicmd->SCp.ptr = NULL;
577 aac_fib_complete(fibptr);
578 aac_fib_free(fibptr);
579 }
549 } 580 }
581 if (status < 0) {
582 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
583 if (fsa_dev_ptr) {
584 fsa_dev_ptr += scmd_id(scsicmd);
585 if ((fsa_dev_ptr->valid & 1) == 0) {
586 fsa_dev_ptr->valid = 0;
587 return (*callback)(scsicmd);
588 }
589 }
590 }
591 return status;
592}
550 593
551error: 594/**
552 aac_fib_complete(fibptr); 595 * aac_probe_container - query a logical volume
553 aac_fib_free(fibptr); 596 * @dev: device to query
597 * @cid: container identifier
598 *
599 * Queries the controller about the given volume. The volume information
600 * is updated in the struct fsa_dev_info structure rather than returned.
601 */
602static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
603{
604 scsicmd->device = NULL;
605 return 0;
606}
607
608int aac_probe_container(struct aac_dev *dev, int cid)
609{
610 struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
611 struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
612 int status;
554 613
614 if (!scsicmd || !scsidev) {
615 kfree(scsicmd);
616 kfree(scsidev);
617 return -ENOMEM;
618 }
619 scsicmd->list.next = NULL;
620 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1;
621
622 scsicmd->device = scsidev;
623 scsidev->sdev_state = 0;
624 scsidev->id = cid;
625 scsidev->host = dev->scsi_host_ptr;
626
627 if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
628 while (scsicmd->device == scsidev)
629 schedule();
630 kfree(scsidev);
631 status = scsicmd->SCp.Status;
632 kfree(scsicmd);
555 return status; 633 return status;
556} 634}
557 635
@@ -1115,6 +1193,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1115 printk(KERN_INFO "%s%d: serial %x\n", 1193 printk(KERN_INFO "%s%d: serial %x\n",
1116 dev->name, dev->id, 1194 dev->name, dev->id,
1117 le32_to_cpu(dev->adapter_info.serial[0])); 1195 le32_to_cpu(dev->adapter_info.serial[0]));
1196 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
1197 printk(KERN_INFO "%s%d: TSID %.*s\n",
1198 dev->name, dev->id,
1199 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
1200 dev->supplement_adapter_info.VpdInfo.Tsid);
1201 }
1118 } 1202 }
1119 1203
1120 dev->nondasd_support = 0; 1204 dev->nondasd_support = 0;
@@ -1241,7 +1325,9 @@ static void io_callback(void *context, struct fib * fibptr)
1241 u32 cid; 1325 u32 cid;
1242 1326
1243 scsicmd = (struct scsi_cmnd *) context; 1327 scsicmd = (struct scsi_cmnd *) context;
1244 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 1328
1329 if (!aac_valid_context(scsicmd, fibptr))
1330 return;
1245 1331
1246 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1332 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1247 cid = scmd_id(scsicmd); 1333 cid = scmd_id(scsicmd);
@@ -1317,7 +1403,7 @@ static void io_callback(void *context, struct fib * fibptr)
1317 scsicmd->scsi_done(scsicmd); 1403 scsicmd->scsi_done(scsicmd);
1318} 1404}
1319 1405
1320static int aac_read(struct scsi_cmnd * scsicmd, int cid) 1406static int aac_read(struct scsi_cmnd * scsicmd)
1321{ 1407{
1322 u64 lba; 1408 u64 lba;
1323 u32 count; 1409 u32 count;
@@ -1331,7 +1417,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1331 */ 1417 */
1332 switch (scsicmd->cmnd[0]) { 1418 switch (scsicmd->cmnd[0]) {
1333 case READ_6: 1419 case READ_6:
1334 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 1420 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
1335 1421
1336 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 1422 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
1337 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 1423 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
@@ -1341,7 +1427,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1341 count = 256; 1427 count = 256;
1342 break; 1428 break;
1343 case READ_16: 1429 case READ_16:
1344 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid)); 1430 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
1345 1431
1346 lba = ((u64)scsicmd->cmnd[2] << 56) | 1432 lba = ((u64)scsicmd->cmnd[2] << 56) |
1347 ((u64)scsicmd->cmnd[3] << 48) | 1433 ((u64)scsicmd->cmnd[3] << 48) |
@@ -1355,7 +1441,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1355 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1441 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1356 break; 1442 break;
1357 case READ_12: 1443 case READ_12:
1358 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid)); 1444 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
1359 1445
1360 lba = ((u64)scsicmd->cmnd[2] << 24) | 1446 lba = ((u64)scsicmd->cmnd[2] << 24) |
1361 (scsicmd->cmnd[3] << 16) | 1447 (scsicmd->cmnd[3] << 16) |
@@ -1365,7 +1451,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1365 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1451 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1366 break; 1452 break;
1367 default: 1453 default:
1368 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 1454 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
1369 1455
1370 lba = ((u64)scsicmd->cmnd[2] << 24) | 1456 lba = ((u64)scsicmd->cmnd[2] << 24) |
1371 (scsicmd->cmnd[3] << 16) | 1457 (scsicmd->cmnd[3] << 16) |
@@ -1405,7 +1491,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1405 return 0; 1491 return 0;
1406} 1492}
1407 1493
1408static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1494static int aac_write(struct scsi_cmnd * scsicmd)
1409{ 1495{
1410 u64 lba; 1496 u64 lba;
1411 u32 count; 1497 u32 count;
@@ -1424,7 +1510,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1424 if (count == 0) 1510 if (count == 0)
1425 count = 256; 1511 count = 256;
1426 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 1512 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1427 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid)); 1513 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
1428 1514
1429 lba = ((u64)scsicmd->cmnd[2] << 56) | 1515 lba = ((u64)scsicmd->cmnd[2] << 56) |
1430 ((u64)scsicmd->cmnd[3] << 48) | 1516 ((u64)scsicmd->cmnd[3] << 48) |
@@ -1436,14 +1522,14 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1436 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 1522 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1437 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1523 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1438 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ 1524 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1439 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid)); 1525 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
1440 1526
1441 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) 1527 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1442 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1528 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1443 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) 1529 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1444 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1530 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1445 } else { 1531 } else {
1446 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1532 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
1447 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1533 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1448 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1534 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1449 } 1535 }
@@ -1488,7 +1574,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1488 struct scsi_cmnd *cmd; 1574 struct scsi_cmnd *cmd;
1489 1575
1490 cmd = context; 1576 cmd = context;
1491 cmd->SCp.phase = AAC_OWNER_MIDLEVEL; 1577
1578 if (!aac_valid_context(cmd, fibptr))
1579 return;
1492 1580
1493 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 1581 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
1494 smp_processor_id(), jiffies)); 1582 smp_processor_id(), jiffies));
@@ -1523,7 +1611,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1523 cmd->scsi_done(cmd); 1611 cmd->scsi_done(cmd);
1524} 1612}
1525 1613
1526static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) 1614static int aac_synchronize(struct scsi_cmnd *scsicmd)
1527{ 1615{
1528 int status; 1616 int status;
1529 struct fib *cmd_fibcontext; 1617 struct fib *cmd_fibcontext;
@@ -1568,7 +1656,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1568 synchronizecmd = fib_data(cmd_fibcontext); 1656 synchronizecmd = fib_data(cmd_fibcontext);
1569 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1657 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
1570 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); 1658 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
1571 synchronizecmd->cid = cpu_to_le32(cid); 1659 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
1572 synchronizecmd->count = 1660 synchronizecmd->count =
1573 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 1661 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
1574 1662
@@ -1646,29 +1734,12 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1646 case TEST_UNIT_READY: 1734 case TEST_UNIT_READY:
1647 if (dev->in_reset) 1735 if (dev->in_reset)
1648 return -1; 1736 return -1;
1649 spin_unlock_irq(host->host_lock); 1737 return _aac_probe_container(scsicmd,
1650 aac_probe_container(dev, cid); 1738 aac_probe_container_callback2);
1651 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1652 fsa_dev_ptr[cid].valid = 0;
1653 spin_lock_irq(host->host_lock);
1654 if (fsa_dev_ptr[cid].valid == 0) {
1655 scsicmd->result = DID_NO_CONNECT << 16;
1656 scsicmd->scsi_done(scsicmd);
1657 return 0;
1658 }
1659 default: 1739 default:
1660 break; 1740 break;
1661 } 1741 }
1662 } 1742 }
1663 /*
1664 * If the target container still doesn't exist,
1665 * return failure
1666 */
1667 if (fsa_dev_ptr[cid].valid == 0) {
1668 scsicmd->result = DID_BAD_TARGET << 16;
1669 scsicmd->scsi_done(scsicmd);
1670 return 0;
1671 }
1672 } else { /* check for physical non-dasd devices */ 1743 } else { /* check for physical non-dasd devices */
1673 if ((dev->nondasd_support == 1) || expose_physicals) { 1744 if ((dev->nondasd_support == 1) || expose_physicals) {
1674 if (dev->in_reset) 1745 if (dev->in_reset)
@@ -1733,7 +1804,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1733 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1804 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1734 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1805 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1735 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1806 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1736 return aac_get_container_name(scsicmd, cid); 1807 return aac_get_container_name(scsicmd);
1737 } 1808 }
1738 case SERVICE_ACTION_IN: 1809 case SERVICE_ACTION_IN:
1739 if (!(dev->raw_io_interface) || 1810 if (!(dev->raw_io_interface) ||
@@ -1899,7 +1970,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1899 min(sizeof(fsa_dev_ptr[cid].devname), 1970 min(sizeof(fsa_dev_ptr[cid].devname),
1900 sizeof(scsicmd->request->rq_disk->disk_name) + 1)); 1971 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1901 1972
1902 return aac_read(scsicmd, cid); 1973 return aac_read(scsicmd);
1903 1974
1904 case WRITE_6: 1975 case WRITE_6:
1905 case WRITE_10: 1976 case WRITE_10:
@@ -1907,11 +1978,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1907 case WRITE_16: 1978 case WRITE_16:
1908 if (dev->in_reset) 1979 if (dev->in_reset)
1909 return -1; 1980 return -1;
1910 return aac_write(scsicmd, cid); 1981 return aac_write(scsicmd);
1911 1982
1912 case SYNCHRONIZE_CACHE: 1983 case SYNCHRONIZE_CACHE:
1913 /* Issue FIB to tell Firmware to flush it's cache */ 1984 /* Issue FIB to tell Firmware to flush it's cache */
1914 return aac_synchronize(scsicmd, cid); 1985 return aac_synchronize(scsicmd);
1915 1986
1916 default: 1987 default:
1917 /* 1988 /*
@@ -2058,7 +2129,10 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2058 struct scsi_cmnd *scsicmd; 2129 struct scsi_cmnd *scsicmd;
2059 2130
2060 scsicmd = (struct scsi_cmnd *) context; 2131 scsicmd = (struct scsi_cmnd *) context;
2061 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 2132
2133 if (!aac_valid_context(scsicmd, fibptr))
2134 return;
2135
2062 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2136 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2063 2137
2064 BUG_ON(fibptr == NULL); 2138 BUG_ON(fibptr == NULL);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39ecd0d22eb0..45ca3e801619 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,8 +12,8 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2423 15# define AAC_DRIVER_BUILD 2437
16# define AAC_DRIVER_BRANCH "-mh3" 16# define AAC_DRIVER_BRANCH "-mh4"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
19 19
@@ -48,49 +48,13 @@ struct diskparm
48 48
49 49
50/* 50/*
51 * DON'T CHANGE THE ORDER, this is set by the firmware 51 * Firmware constants
52 */ 52 */
53 53
54#define CT_NONE 0 54#define CT_NONE 0
55#define CT_VOLUME 1
56#define CT_MIRROR 2
57#define CT_STRIPE 3
58#define CT_RAID5 4
59#define CT_SSRW 5
60#define CT_SSRO 6
61#define CT_MORPH 7
62#define CT_PASSTHRU 8
63#define CT_RAID4 9
64#define CT_RAID10 10 /* stripe of mirror */
65#define CT_RAID00 11 /* stripe of stripe */
66#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
67#define CT_PSEUDO_RAID 13 /* really raid4 */
68#define CT_LAST_VOLUME_TYPE 14
69#define CT_OK 218 55#define CT_OK 218
70
71/*
72 * Types of objects addressable in some fashion by the client.
73 * This is a superset of those objects handled just by the filesystem
74 * and includes "raw" objects that an administrator would use to
75 * configure containers and filesystems.
76 */
77
78#define FT_REG 1 /* regular file */
79#define FT_DIR 2 /* directory */
80#define FT_BLK 3 /* "block" device - reserved */
81#define FT_CHR 4 /* "character special" device - reserved */
82#define FT_LNK 5 /* symbolic link */
83#define FT_SOCK 6 /* socket */
84#define FT_FIFO 7 /* fifo */
85#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */ 56#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
86#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */ 57#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
87#define FT_SLICE 10 /* virtual disk - raw volume - slice */
88#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
89#define FT_VOLUME 12 /* Container - Volume Set */
90#define FT_STRIPE 13 /* Container - Stripe Set */
91#define FT_MIRROR 14 /* Container - Mirror Set */
92#define FT_RAID5 15 /* Container - Raid 5 Set */
93#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
94 58
95/* 59/*
96 * Host side memory scatter gather list 60 * Host side memory scatter gather list
@@ -497,6 +461,7 @@ struct adapter_ops
497 void (*adapter_enable_int)(struct aac_dev *dev); 461 void (*adapter_enable_int)(struct aac_dev *dev);
498 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 462 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
499 int (*adapter_check_health)(struct aac_dev *dev); 463 int (*adapter_check_health)(struct aac_dev *dev);
464 int (*adapter_restart)(struct aac_dev *dev, int bled);
500 /* Transport operations */ 465 /* Transport operations */
501 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
502 irqreturn_t (*adapter_intr)(int irq, void *dev_id); 467 irqreturn_t (*adapter_intr)(int irq, void *dev_id);
@@ -833,7 +798,7 @@ struct fib {
833 */ 798 */
834 struct list_head fiblink; 799 struct list_head fiblink;
835 void *data; 800 void *data;
836 struct hw_fib *hw_fib; /* Actual shared object */ 801 struct hw_fib *hw_fib_va; /* Actual shared object */
837 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ 802 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
838}; 803};
839 804
@@ -878,10 +843,25 @@ struct aac_supplement_adapter_info
878 __le32 Version; 843 __le32 Version;
879 __le32 FeatureBits; 844 __le32 FeatureBits;
880 u8 SlotNumber; 845 u8 SlotNumber;
881 u8 ReservedPad0[0]; 846 u8 ReservedPad0[3];
882 u8 BuildDate[12]; 847 u8 BuildDate[12];
883 __le32 CurrentNumberPorts; 848 __le32 CurrentNumberPorts;
884 __le32 ReservedGrowth[24]; 849 struct {
850 u8 AssemblyPn[8];
851 u8 FruPn[8];
852 u8 BatteryFruPn[8];
853 u8 EcVersionString[8];
854 u8 Tsid[12];
855 } VpdInfo;
856 __le32 FlashFirmwareRevision;
857 __le32 FlashFirmwareBuild;
858 __le32 RaidTypeMorphOptions;
859 __le32 FlashFirmwareBootRevision;
860 __le32 FlashFirmwareBootBuild;
861 u8 MfgPcbaSerialNo[12];
862 u8 MfgWWNName[8];
863 __le32 MoreFeatureBits;
864 __le32 ReservedGrowth[1];
885}; 865};
886#define AAC_FEATURE_FALCON 0x00000010 866#define AAC_FEATURE_FALCON 0x00000010
887#define AAC_SIS_VERSION_V3 3 867#define AAC_SIS_VERSION_V3 3
@@ -970,7 +950,6 @@ struct aac_dev
970 struct fib *fibs; 950 struct fib *fibs;
971 951
972 struct fib *free_fib; 952 struct fib *free_fib;
973 struct fib *timeout_fib;
974 spinlock_t fib_lock; 953 spinlock_t fib_lock;
975 954
976 struct aac_queue_block *queues; 955 struct aac_queue_block *queues;
@@ -1060,6 +1039,9 @@ struct aac_dev
1060#define aac_adapter_check_health(dev) \ 1039#define aac_adapter_check_health(dev) \
1061 (dev)->a_ops.adapter_check_health(dev) 1040 (dev)->a_ops.adapter_check_health(dev)
1062 1041
1042#define aac_adapter_restart(dev,bled) \
1043 (dev)->a_ops.adapter_restart(dev,bled)
1044
1063#define aac_adapter_ioremap(dev, size) \ 1045#define aac_adapter_ioremap(dev, size) \
1064 (dev)->a_ops.adapter_ioremap(dev, size) 1046 (dev)->a_ops.adapter_ioremap(dev, size)
1065 1047
@@ -1516,8 +1498,7 @@ struct aac_mntent {
1516 struct creation_info create_info; /* if applicable */ 1498 struct creation_info create_info; /* if applicable */
1517 __le32 capacity; 1499 __le32 capacity;
1518 __le32 vol; /* substrate structure */ 1500 __le32 vol; /* substrate structure */
1519 __le32 obj; /* FT_FILESYS, 1501 __le32 obj; /* FT_FILESYS, etc. */
1520 FT_DATABASE, etc. */
1521 __le32 state; /* unready for mounting, 1502 __le32 state; /* unready for mounting,
1522 readonly, etc. */ 1503 readonly, etc. */
1523 union aac_contentinfo fileinfo; /* Info specific to content 1504 union aac_contentinfo fileinfo; /* Info specific to content
@@ -1817,7 +1798,7 @@ int aac_fib_send(u16 command, struct fib * context, unsigned long size, int prio
1817int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1798int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1818void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1799void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1819int aac_fib_complete(struct fib * context); 1800int aac_fib_complete(struct fib * context);
1820#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1801#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
1821struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1802struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1822int aac_get_config_status(struct aac_dev *dev, int commit_flag); 1803int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1823int aac_get_containers(struct aac_dev *dev); 1804int aac_get_containers(struct aac_dev *dev);
@@ -1840,8 +1821,11 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype);
1840int aac_get_adapter_info(struct aac_dev* dev); 1821int aac_get_adapter_info(struct aac_dev* dev);
1841int aac_send_shutdown(struct aac_dev *dev); 1822int aac_send_shutdown(struct aac_dev *dev);
1842int aac_probe_container(struct aac_dev *dev, int cid); 1823int aac_probe_container(struct aac_dev *dev, int cid);
1824int _aac_rx_init(struct aac_dev *dev);
1825int aac_rx_select_comm(struct aac_dev *dev, int comm);
1843extern int numacb; 1826extern int numacb;
1844extern int acbsize; 1827extern int acbsize;
1845extern char aac_driver_version[]; 1828extern char aac_driver_version[];
1846extern int startup_timeout; 1829extern int startup_timeout;
1847extern int aif_timeout; 1830extern int aif_timeout;
1831extern int expose_physicals;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e21070f4eac1..72b0393b4596 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -64,12 +64,15 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
64 unsigned size; 64 unsigned size;
65 int retval; 65 int retval;
66 66
67 if (dev->in_reset) {
68 return -EBUSY;
69 }
67 fibptr = aac_fib_alloc(dev); 70 fibptr = aac_fib_alloc(dev);
68 if(fibptr == NULL) { 71 if(fibptr == NULL) {
69 return -ENOMEM; 72 return -ENOMEM;
70 } 73 }
71 74
72 kfib = fibptr->hw_fib; 75 kfib = fibptr->hw_fib_va;
73 /* 76 /*
74 * First copy in the header so that we can check the size field. 77 * First copy in the header so that we can check the size field.
75 */ 78 */
@@ -91,9 +94,9 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
91 goto cleanup; 94 goto cleanup;
92 } 95 }
93 /* Highjack the hw_fib */ 96 /* Highjack the hw_fib */
94 hw_fib = fibptr->hw_fib; 97 hw_fib = fibptr->hw_fib_va;
95 hw_fib_pa = fibptr->hw_fib_pa; 98 hw_fib_pa = fibptr->hw_fib_pa;
96 fibptr->hw_fib = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa); 99 fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
97 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 100 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
98 memcpy(kfib, hw_fib, dev->max_fib_size); 101 memcpy(kfib, hw_fib, dev->max_fib_size);
99 } 102 }
@@ -137,7 +140,7 @@ cleanup:
137 if (hw_fib) { 140 if (hw_fib) {
138 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); 141 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
139 fibptr->hw_fib_pa = hw_fib_pa; 142 fibptr->hw_fib_pa = hw_fib_pa;
140 fibptr->hw_fib = hw_fib; 143 fibptr->hw_fib_va = hw_fib;
141 } 144 }
142 if (retval != -EINTR) 145 if (retval != -EINTR)
143 aac_fib_free(fibptr); 146 aac_fib_free(fibptr);
@@ -282,15 +285,15 @@ return_fib:
282 fib = list_entry(entry, struct fib, fiblink); 285 fib = list_entry(entry, struct fib, fiblink);
283 fibctx->count--; 286 fibctx->count--;
284 spin_unlock_irqrestore(&dev->fib_lock, flags); 287 spin_unlock_irqrestore(&dev->fib_lock, flags);
285 if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) { 288 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
286 kfree(fib->hw_fib); 289 kfree(fib->hw_fib_va);
287 kfree(fib); 290 kfree(fib);
288 return -EFAULT; 291 return -EFAULT;
289 } 292 }
290 /* 293 /*
291 * Free the space occupied by this copy of the fib. 294 * Free the space occupied by this copy of the fib.
292 */ 295 */
293 kfree(fib->hw_fib); 296 kfree(fib->hw_fib_va);
294 kfree(fib); 297 kfree(fib);
295 status = 0; 298 status = 0;
296 } else { 299 } else {
@@ -340,7 +343,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
340 /* 343 /*
341 * Free the space occupied by this copy of the fib. 344 * Free the space occupied by this copy of the fib.
342 */ 345 */
343 kfree(fib->hw_fib); 346 kfree(fib->hw_fib_va);
344 kfree(fib); 347 kfree(fib);
345 } 348 }
346 /* 349 /*
@@ -388,10 +391,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
388 /* 391 /*
389 * Extract the fibctx from the input parameters 392 * Extract the fibctx from the input parameters
390 */ 393 */
391 if (fibctx->unique == (u32)(unsigned long)arg) { 394 if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */
392 /* We found a winner */
393 break; 395 break;
394 }
395 entry = entry->next; 396 entry = entry->next;
396 fibctx = NULL; 397 fibctx = NULL;
397 } 398 }
@@ -465,16 +466,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
465 void *sg_list[32]; 466 void *sg_list[32];
466 u32 sg_indx = 0; 467 u32 sg_indx = 0;
467 u32 byte_count = 0; 468 u32 byte_count = 0;
468 u32 actual_fibsize = 0; 469 u32 actual_fibsize64, actual_fibsize = 0;
469 int i; 470 int i;
470 471
471 472
473 if (dev->in_reset) {
474 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
475 return -EBUSY;
476 }
472 if (!capable(CAP_SYS_ADMIN)){ 477 if (!capable(CAP_SYS_ADMIN)){
473 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 478 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
474 return -EPERM; 479 return -EPERM;
475 } 480 }
476 /* 481 /*
477 * Allocate and initialize a Fib then setup a BlockWrite command 482 * Allocate and initialize a Fib then setup a SRB command
478 */ 483 */
479 if (!(srbfib = aac_fib_alloc(dev))) { 484 if (!(srbfib = aac_fib_alloc(dev))) {
480 return -ENOMEM; 485 return -ENOMEM;
@@ -541,129 +546,183 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
541 rcode = -EINVAL; 546 rcode = -EINVAL;
542 goto cleanup; 547 goto cleanup;
543 } 548 }
544 if (dev->dac_support == 1) { 549 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
550 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
551 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
552 (sizeof(struct sgentry64) - sizeof(struct sgentry));
553 /* User made a mistake - should not continue */
554 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
555 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
556 "Raw SRB command calculated fibsize=%lu;%lu "
557 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
558 "issued fibsize=%d\n",
559 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
560 sizeof(struct aac_srb), sizeof(struct sgentry),
561 sizeof(struct sgentry64), fibsize));
562 rcode = -EINVAL;
563 goto cleanup;
564 }
565 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
566 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
567 rcode = -EINVAL;
568 goto cleanup;
569 }
570 byte_count = 0;
571 if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
545 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 572 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
546 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 573 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
547 struct user_sgmap* usg;
548 byte_count = 0;
549 574
550 /* 575 /*
551 * This should also catch if user used the 32 bit sgmap 576 * This should also catch if user used the 32 bit sgmap
552 */ 577 */
553 actual_fibsize = sizeof(struct aac_srb) - 578 if (actual_fibsize64 == fibsize) {
554 sizeof(struct sgentry) + 579 actual_fibsize = actual_fibsize64;
555 ((upsg->count & 0xff) * 580 for (i = 0; i < upsg->count; i++) {
556 sizeof(struct sgentry)); 581 u64 addr;
557 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 582 void* p;
558 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 583 /* Does this really need to be GFP_DMA? */
559 rcode = -EINVAL; 584 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
560 goto cleanup; 585 if(p == 0) {
561 } 586 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
562 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) 587 upsg->sg[i].count,i,upsg->count));
563 + sizeof(struct sgmap), GFP_KERNEL); 588 rcode = -ENOMEM;
564 if (!usg) { 589 goto cleanup;
565 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); 590 }
566 rcode = -ENOMEM; 591 addr = (u64)upsg->sg[i].addr[0];
567 goto cleanup; 592 addr += ((u64)upsg->sg[i].addr[1]) << 32;
568 } 593 sg_user[i] = (void __user *)(ptrdiff_t)addr;
569 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) 594 sg_list[i] = p; // save so we can clean up later
570 + sizeof(struct sgmap)); 595 sg_indx = i;
571 actual_fibsize = sizeof(struct aac_srb) - 596
572 sizeof(struct sgentry) + ((usg->count & 0xff) * 597 if( flags & SRB_DataOut ){
573 sizeof(struct sgentry64)); 598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
574 if ((data_dir == DMA_NONE) && upsg->count) { 599 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
575 kfree (usg); 600 rcode = -EFAULT;
576 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 601 goto cleanup;
577 rcode = -EINVAL; 602 }
578 goto cleanup; 603 }
579 } 604 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
580 605
581 for (i = 0; i < usg->count; i++) { 606 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
582 u64 addr; 607 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
583 void* p; 608 byte_count += upsg->sg[i].count;
584 /* Does this really need to be GFP_DMA? */ 609 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
585 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 610 }
586 if(p == 0) { 611 } else {
587 kfree (usg); 612 struct user_sgmap* usg;
588 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 613 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
589 usg->sg[i].count,i,usg->count)); 614 + sizeof(struct sgmap), GFP_KERNEL);
615 if (!usg) {
616 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
590 rcode = -ENOMEM; 617 rcode = -ENOMEM;
591 goto cleanup; 618 goto cleanup;
592 } 619 }
593 sg_user[i] = (void __user *)(long)usg->sg[i].addr; 620 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
594 sg_list[i] = p; // save so we can clean up later 621 + sizeof(struct sgmap));
595 sg_indx = i; 622 actual_fibsize = actual_fibsize64;
596 623
597 if( flags & SRB_DataOut ){ 624 for (i = 0; i < usg->count; i++) {
598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 625 u64 addr;
626 void* p;
627 /* Does this really need to be GFP_DMA? */
628 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
629 if(p == 0) {
599 kfree (usg); 630 kfree (usg);
600 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 631 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
601 rcode = -EFAULT; 632 usg->sg[i].count,i,usg->count));
633 rcode = -ENOMEM;
602 goto cleanup; 634 goto cleanup;
603 } 635 }
604 } 636 sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr;
605 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 637 sg_list[i] = p; // save so we can clean up later
638 sg_indx = i;
639
640 if( flags & SRB_DataOut ){
641 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
642 kfree (usg);
643 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
644 rcode = -EFAULT;
645 goto cleanup;
646 }
647 }
648 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
606 649
607 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 650 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
608 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 651 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
609 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 652 byte_count += usg->sg[i].count;
610 byte_count += usg->sg[i].count; 653 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
654 }
655 kfree (usg);
611 } 656 }
612 kfree (usg);
613
614 srbcmd->count = cpu_to_le32(byte_count); 657 srbcmd->count = cpu_to_le32(byte_count);
615 psg->count = cpu_to_le32(sg_indx+1); 658 psg->count = cpu_to_le32(sg_indx+1);
616 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 659 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
617 } else { 660 } else {
618 struct user_sgmap* upsg = &user_srbcmd->sg; 661 struct user_sgmap* upsg = &user_srbcmd->sg;
619 struct sgmap* psg = &srbcmd->sg; 662 struct sgmap* psg = &srbcmd->sg;
620 byte_count = 0; 663
621 664 if (actual_fibsize64 == fibsize) {
622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 665 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 666 for (i = 0; i < upsg->count; i++) {
624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in " 667 u64 addr;
625 "Raw SRB command calculated fibsize=%d " 668 void* p;
626 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d " 669 /* Does this really need to be GFP_DMA? */
627 "issued fibsize=%d\n", 670 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
628 actual_fibsize, user_srbcmd->sg.count, 671 if(p == 0) {
629 sizeof(struct aac_srb), sizeof(struct sgentry), 672 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
630 fibsize)); 673 usg->sg[i].count,i,usg->count));
631 rcode = -EINVAL; 674 rcode = -ENOMEM;
632 goto cleanup;
633 }
634 if ((data_dir == DMA_NONE) && upsg->count) {
635 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
636 rcode = -EINVAL;
637 goto cleanup;
638 }
639 for (i = 0; i < upsg->count; i++) {
640 dma_addr_t addr;
641 void* p;
642 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
643 if(p == 0) {
644 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
645 upsg->sg[i].count, i, upsg->count));
646 rcode = -ENOMEM;
647 goto cleanup;
648 }
649 sg_user[i] = (void __user *)(long)upsg->sg[i].addr;
650 sg_list[i] = p; // save so we can clean up later
651 sg_indx = i;
652
653 if( flags & SRB_DataOut ){
654 if(copy_from_user(p, sg_user[i],
655 upsg->sg[i].count)) {
656 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
657 rcode = -EFAULT;
658 goto cleanup; 675 goto cleanup;
659 } 676 }
677 addr = (u64)usg->sg[i].addr[0];
678 addr += ((u64)usg->sg[i].addr[1]) << 32;
679 sg_user[i] = (void __user *)(ptrdiff_t)addr;
680 sg_list[i] = p; // save so we can clean up later
681 sg_indx = i;
682
683 if( flags & SRB_DataOut ){
684 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
685 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
686 rcode = -EFAULT;
687 goto cleanup;
688 }
689 }
690 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
691
692 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
693 byte_count += usg->sg[i].count;
694 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
660 } 695 }
661 addr = pci_map_single(dev->pdev, p, 696 } else {
662 upsg->sg[i].count, data_dir); 697 for (i = 0; i < upsg->count; i++) {
698 dma_addr_t addr;
699 void* p;
700 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
701 if(p == 0) {
702 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
703 upsg->sg[i].count, i, upsg->count));
704 rcode = -ENOMEM;
705 goto cleanup;
706 }
707 sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr;
708 sg_list[i] = p; // save so we can clean up later
709 sg_indx = i;
710
711 if( flags & SRB_DataOut ){
712 if(copy_from_user(p, sg_user[i],
713 upsg->sg[i].count)) {
714 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
715 rcode = -EFAULT;
716 goto cleanup;
717 }
718 }
719 addr = pci_map_single(dev->pdev, p,
720 upsg->sg[i].count, data_dir);
663 721
664 psg->sg[i].addr = cpu_to_le32(addr); 722 psg->sg[i].addr = cpu_to_le32(addr);
665 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 723 byte_count += upsg->sg[i].count;
666 byte_count += upsg->sg[i].count; 724 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
725 }
667 } 726 }
668 srbcmd->count = cpu_to_le32(byte_count); 727 srbcmd->count = cpu_to_le32(byte_count);
669 psg->count = cpu_to_le32(sg_indx+1); 728 psg->count = cpu_to_le32(sg_indx+1);
@@ -682,7 +741,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
682 741
683 if( flags & SRB_DataIn ) { 742 if( flags & SRB_DataIn ) {
684 for(i = 0 ; i <= sg_indx; i++){ 743 for(i = 0 ; i <= sg_indx; i++){
685 byte_count = le32_to_cpu((dev->dac_support == 1) 744 byte_count = le32_to_cpu(
745 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
686 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count 746 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
687 : srbcmd->sg.sg[i].count); 747 : srbcmd->sg.sg[i].count);
688 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ 748 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index ae34768987a4..33682ce96a5d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
110 /* 110 /*
111 * Align the beginning of Headers to commalign 111 * Align the beginning of Headers to commalign
112 */ 112 */
113 align = (commalign - ((unsigned long)(base) & (commalign - 1))); 113 align = (commalign - ((ptrdiff_t)(base) & (commalign - 1)));
114 base = base + align; 114 base = base + align;
115 phys = phys + align; 115 phys = phys + align;
116 /* 116 /*
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1b97f60652ba..5824a757a753 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -94,7 +94,7 @@ void aac_fib_map_free(struct aac_dev *dev)
94int aac_fib_setup(struct aac_dev * dev) 94int aac_fib_setup(struct aac_dev * dev)
95{ 95{
96 struct fib *fibptr; 96 struct fib *fibptr;
97 struct hw_fib *hw_fib_va; 97 struct hw_fib *hw_fib;
98 dma_addr_t hw_fib_pa; 98 dma_addr_t hw_fib_pa;
99 int i; 99 int i;
100 100
@@ -106,24 +106,24 @@ int aac_fib_setup(struct aac_dev * dev)
106 if (i<0) 106 if (i<0)
107 return -ENOMEM; 107 return -ENOMEM;
108 108
109 hw_fib_va = dev->hw_fib_va; 109 hw_fib = dev->hw_fib_va;
110 hw_fib_pa = dev->hw_fib_pa; 110 hw_fib_pa = dev->hw_fib_pa;
111 memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 111 memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112 /* 112 /*
113 * Initialise the fibs 113 * Initialise the fibs
114 */ 114 */
115 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) 115 for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116 { 116 {
117 fibptr->dev = dev; 117 fibptr->dev = dev;
118 fibptr->hw_fib = hw_fib_va; 118 fibptr->hw_fib_va = hw_fib;
119 fibptr->data = (void *) fibptr->hw_fib->data; 119 fibptr->data = (void *) fibptr->hw_fib_va->data;
120 fibptr->next = fibptr+1; /* Forward chain the fibs */ 120 fibptr->next = fibptr+1; /* Forward chain the fibs */
121 init_MUTEX_LOCKED(&fibptr->event_wait); 121 init_MUTEX_LOCKED(&fibptr->event_wait);
122 spin_lock_init(&fibptr->event_lock); 122 spin_lock_init(&fibptr->event_lock);
123 hw_fib_va->header.XferState = cpu_to_le32(0xffffffff); 123 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
124 hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size); 124 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125 fibptr->hw_fib_pa = hw_fib_pa; 125 fibptr->hw_fib_pa = hw_fib_pa;
126 hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size); 126 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
127 hw_fib_pa = hw_fib_pa + dev->max_fib_size; 127 hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128 } 128 }
129 /* 129 /*
@@ -166,7 +166,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
166 * Null out fields that depend on being zero at the start of 166 * Null out fields that depend on being zero at the start of
167 * each I/O 167 * each I/O
168 */ 168 */
169 fibptr->hw_fib->header.XferState = 0; 169 fibptr->hw_fib_va->header.XferState = 0;
170 fibptr->callback = NULL; 170 fibptr->callback = NULL;
171 fibptr->callback_data = NULL; 171 fibptr->callback_data = NULL;
172 172
@@ -178,7 +178,6 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
178 * @fibptr: fib to free up 178 * @fibptr: fib to free up
179 * 179 *
180 * Frees up a fib and places it on the appropriate queue 180 * Frees up a fib and places it on the appropriate queue
181 * (either free or timed out)
182 */ 181 */
183 182
184void aac_fib_free(struct fib *fibptr) 183void aac_fib_free(struct fib *fibptr)
@@ -186,19 +185,15 @@ void aac_fib_free(struct fib *fibptr)
186 unsigned long flags; 185 unsigned long flags;
187 186
188 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 187 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
189 if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { 188 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
190 aac_config.fib_timeouts++; 189 aac_config.fib_timeouts++;
191 fibptr->next = fibptr->dev->timeout_fib; 190 if (fibptr->hw_fib_va->header.XferState != 0) {
192 fibptr->dev->timeout_fib = fibptr; 191 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
193 } else { 192 (void*)fibptr,
194 if (fibptr->hw_fib->header.XferState != 0) { 193 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
195 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 194 }
196 (void*)fibptr, 195 fibptr->next = fibptr->dev->free_fib;
197 le32_to_cpu(fibptr->hw_fib->header.XferState)); 196 fibptr->dev->free_fib = fibptr;
198 }
199 fibptr->next = fibptr->dev->free_fib;
200 fibptr->dev->free_fib = fibptr;
201 }
202 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); 197 spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
203} 198}
204 199
@@ -211,7 +206,7 @@ void aac_fib_free(struct fib *fibptr)
211 206
212void aac_fib_init(struct fib *fibptr) 207void aac_fib_init(struct fib *fibptr)
213{ 208{
214 struct hw_fib *hw_fib = fibptr->hw_fib; 209 struct hw_fib *hw_fib = fibptr->hw_fib_va;
215 210
216 hw_fib->header.StructType = FIB_MAGIC; 211 hw_fib->header.StructType = FIB_MAGIC;
217 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size); 212 hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
@@ -231,7 +226,7 @@ void aac_fib_init(struct fib *fibptr)
231 226
232static void fib_dealloc(struct fib * fibptr) 227static void fib_dealloc(struct fib * fibptr)
233{ 228{
234 struct hw_fib *hw_fib = fibptr->hw_fib; 229 struct hw_fib *hw_fib = fibptr->hw_fib_va;
235 BUG_ON(hw_fib->header.StructType != FIB_MAGIC); 230 BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
236 hw_fib->header.XferState = 0; 231 hw_fib->header.XferState = 0;
237} 232}
@@ -386,7 +381,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
386 void *callback_data) 381 void *callback_data)
387{ 382{
388 struct aac_dev * dev = fibptr->dev; 383 struct aac_dev * dev = fibptr->dev;
389 struct hw_fib * hw_fib = fibptr->hw_fib; 384 struct hw_fib * hw_fib = fibptr->hw_fib_va;
390 unsigned long flags = 0; 385 unsigned long flags = 0;
391 unsigned long qflags; 386 unsigned long qflags;
392 387
@@ -430,7 +425,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
430 */ 425 */
431 hw_fib->header.Command = cpu_to_le16(command); 426 hw_fib->header.Command = cpu_to_le16(command);
432 hw_fib->header.XferState |= cpu_to_le32(SentFromHost); 427 hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
433 fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ 428 fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
434 /* 429 /*
435 * Set the size of the Fib we want to send to the adapter 430 * Set the size of the Fib we want to send to the adapter
436 */ 431 */
@@ -462,7 +457,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
462 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command))); 457 dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
463 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command))); 458 dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
464 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState))); 459 dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
465 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); 460 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
466 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 461 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
467 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 462 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
468 463
@@ -513,22 +508,20 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
513 } 508 }
514 udelay(5); 509 udelay(5);
515 } 510 }
516 } else if (down_interruptible(&fibptr->event_wait)) { 511 } else
517 spin_lock_irqsave(&fibptr->event_lock, flags); 512 (void)down_interruptible(&fibptr->event_wait);
518 if (fibptr->done == 0) { 513 spin_lock_irqsave(&fibptr->event_lock, flags);
519 fibptr->done = 2; /* Tell interrupt we aborted */ 514 if (fibptr->done == 0) {
520 spin_unlock_irqrestore(&fibptr->event_lock, flags); 515 fibptr->done = 2; /* Tell interrupt we aborted */
521 return -EINTR;
522 }
523 spin_unlock_irqrestore(&fibptr->event_lock, flags); 516 spin_unlock_irqrestore(&fibptr->event_lock, flags);
517 return -EINTR;
524 } 518 }
519 spin_unlock_irqrestore(&fibptr->event_lock, flags);
525 BUG_ON(fibptr->done == 0); 520 BUG_ON(fibptr->done == 0);
526 521
527 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 522 if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
528 return -ETIMEDOUT; 523 return -ETIMEDOUT;
529 } else { 524 return 0;
530 return 0;
531 }
532 } 525 }
533 /* 526 /*
534 * If the user does not want a response than return success otherwise 527 * If the user does not want a response than return success otherwise
@@ -624,7 +617,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
624 617
625int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) 618int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
626{ 619{
627 struct hw_fib * hw_fib = fibptr->hw_fib; 620 struct hw_fib * hw_fib = fibptr->hw_fib_va;
628 struct aac_dev * dev = fibptr->dev; 621 struct aac_dev * dev = fibptr->dev;
629 struct aac_queue * q; 622 struct aac_queue * q;
630 unsigned long nointr = 0; 623 unsigned long nointr = 0;
@@ -688,7 +681,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
688 681
689int aac_fib_complete(struct fib *fibptr) 682int aac_fib_complete(struct fib *fibptr)
690{ 683{
691 struct hw_fib * hw_fib = fibptr->hw_fib; 684 struct hw_fib * hw_fib = fibptr->hw_fib_va;
692 685
693 /* 686 /*
694 * Check for a fib which has already been completed 687 * Check for a fib which has already been completed
@@ -774,9 +767,8 @@ void aac_printf(struct aac_dev *dev, u32 val)
774#define AIF_SNIFF_TIMEOUT (30*HZ) 767#define AIF_SNIFF_TIMEOUT (30*HZ)
775static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) 768static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
776{ 769{
777 struct hw_fib * hw_fib = fibptr->hw_fib; 770 struct hw_fib * hw_fib = fibptr->hw_fib_va;
778 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; 771 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
779 int busy;
780 u32 container; 772 u32 container;
781 struct scsi_device *device; 773 struct scsi_device *device;
782 enum { 774 enum {
@@ -988,9 +980,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
988 * behind you. 980 * behind you.
989 */ 981 */
990 982
991 busy = 0;
992
993
994 /* 983 /*
995 * Find the scsi_device associated with the SCSI address, 984 * Find the scsi_device associated with the SCSI address,
996 * and mark it as changed, invalidating the cache. This deals 985 * and mark it as changed, invalidating the cache. This deals
@@ -1035,7 +1024,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1035static int _aac_reset_adapter(struct aac_dev *aac) 1024static int _aac_reset_adapter(struct aac_dev *aac)
1036{ 1025{
1037 int index, quirks; 1026 int index, quirks;
1038 u32 ret;
1039 int retval; 1027 int retval;
1040 struct Scsi_Host *host; 1028 struct Scsi_Host *host;
1041 struct scsi_device *dev; 1029 struct scsi_device *dev;
@@ -1059,35 +1047,29 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1059 * If a positive health, means in a known DEAD PANIC 1047 * If a positive health, means in a known DEAD PANIC
1060 * state and the adapter could be reset to `try again'. 1048 * state and the adapter could be reset to `try again'.
1061 */ 1049 */
1062 retval = aac_adapter_check_health(aac); 1050 retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
1063 if (retval == 0)
1064 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1065 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1066 if (retval)
1067 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1068 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1069 1051
1070 if (retval) 1052 if (retval)
1071 goto out; 1053 goto out;
1072 if (ret != 0x00000001) {
1073 retval = -ENODEV;
1074 goto out;
1075 }
1076 1054
1077 /* 1055 /*
1078 * Loop through the fibs, close the synchronous FIBS 1056 * Loop through the fibs, close the synchronous FIBS
1079 */ 1057 */
1080 for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) { 1058 for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1081 struct fib *fib = &aac->fibs[index]; 1059 struct fib *fib = &aac->fibs[index];
1082 if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && 1060 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1083 (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) { 1061 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1084 unsigned long flagv; 1062 unsigned long flagv;
1085 spin_lock_irqsave(&fib->event_lock, flagv); 1063 spin_lock_irqsave(&fib->event_lock, flagv);
1086 up(&fib->event_wait); 1064 up(&fib->event_wait);
1087 spin_unlock_irqrestore(&fib->event_lock, flagv); 1065 spin_unlock_irqrestore(&fib->event_lock, flagv);
1088 schedule(); 1066 schedule();
1067 retval = 0;
1089 } 1068 }
1090 } 1069 }
1070 /* Give some extra time for ioctls to complete. */
1071 if (retval == 0)
1072 ssleep(2);
1091 index = aac->cardtype; 1073 index = aac->cardtype;
1092 1074
1093 /* 1075 /*
@@ -1248,7 +1230,7 @@ int aac_check_health(struct aac_dev * aac)
1248 1230
1249 memset(hw_fib, 0, sizeof(struct hw_fib)); 1231 memset(hw_fib, 0, sizeof(struct hw_fib));
1250 memset(fib, 0, sizeof(struct fib)); 1232 memset(fib, 0, sizeof(struct fib));
1251 fib->hw_fib = hw_fib; 1233 fib->hw_fib_va = hw_fib;
1252 fib->dev = aac; 1234 fib->dev = aac;
1253 aac_fib_init(fib); 1235 aac_fib_init(fib);
1254 fib->type = FSAFS_NTC_FIB_CONTEXT; 1236 fib->type = FSAFS_NTC_FIB_CONTEXT;
@@ -1354,11 +1336,11 @@ int aac_command_thread(void *data)
1354 * do anything at this point since we don't have 1336 * do anything at this point since we don't have
1355 * anything defined for this thread to do. 1337 * anything defined for this thread to do.
1356 */ 1338 */
1357 hw_fib = fib->hw_fib; 1339 hw_fib = fib->hw_fib_va;
1358 memset(fib, 0, sizeof(struct fib)); 1340 memset(fib, 0, sizeof(struct fib));
1359 fib->type = FSAFS_NTC_FIB_CONTEXT; 1341 fib->type = FSAFS_NTC_FIB_CONTEXT;
1360 fib->size = sizeof( struct fib ); 1342 fib->size = sizeof( struct fib );
1361 fib->hw_fib = hw_fib; 1343 fib->hw_fib_va = hw_fib;
1362 fib->data = hw_fib->data; 1344 fib->data = hw_fib->data;
1363 fib->dev = dev; 1345 fib->dev = dev;
1364 /* 1346 /*
@@ -1485,7 +1467,7 @@ int aac_command_thread(void *data)
1485 */ 1467 */
1486 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); 1468 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1487 memcpy(newfib, fib, sizeof(struct fib)); 1469 memcpy(newfib, fib, sizeof(struct fib));
1488 newfib->hw_fib = hw_newfib; 1470 newfib->hw_fib_va = hw_newfib;
1489 /* 1471 /*
1490 * Put the FIB onto the 1472 * Put the FIB onto the
1491 * fibctx's fibs 1473 * fibctx's fibs
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 66aeb57dcc2d..42c7dcda6d9b 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -72,7 +72,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
72 u32 index = le32_to_cpu(entry->addr); 72 u32 index = le32_to_cpu(entry->addr);
73 fast = index & 0x01; 73 fast = index & 0x01;
74 fib = &dev->fibs[index >> 2]; 74 fib = &dev->fibs[index >> 2];
75 hwfib = fib->hw_fib; 75 hwfib = fib->hw_fib_va;
76 76
77 aac_consumer_free(dev, q, HostNormRespQueue); 77 aac_consumer_free(dev, q, HostNormRespQueue);
78 /* 78 /*
@@ -83,11 +83,13 @@ unsigned int aac_response_normal(struct aac_queue * q)
83 * continue. The caller has already been notified that 83 * continue. The caller has already been notified that
84 * the fib timed out. 84 * the fib timed out.
85 */ 85 */
86 if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 86 dev->queues->queue[AdapNormCmdQueue].numpending--;
87 dev->queues->queue[AdapNormCmdQueue].numpending--; 87
88 else { 88 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
89 printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); 89 spin_unlock_irqrestore(q->lock, flags);
90 printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); 90 aac_fib_complete(fib);
91 aac_fib_free(fib);
92 spin_lock_irqsave(q->lock, flags);
91 continue; 93 continue;
92 } 94 }
93 spin_unlock_irqrestore(q->lock, flags); 95 spin_unlock_irqrestore(q->lock, flags);
@@ -192,7 +194,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
192 INIT_LIST_HEAD(&fib->fiblink); 194 INIT_LIST_HEAD(&fib->fiblink);
193 fib->type = FSAFS_NTC_FIB_CONTEXT; 195 fib->type = FSAFS_NTC_FIB_CONTEXT;
194 fib->size = sizeof(struct fib); 196 fib->size = sizeof(struct fib);
195 fib->hw_fib = hw_fib; 197 fib->hw_fib_va = hw_fib;
196 fib->data = hw_fib->data; 198 fib->data = hw_fib->data;
197 fib->dev = dev; 199 fib->dev = dev;
198 200
@@ -253,12 +255,13 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
253 return 1; 255 return 1;
254 } 256 }
255 memset(hw_fib, 0, sizeof(struct hw_fib)); 257 memset(hw_fib, 0, sizeof(struct hw_fib));
256 memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib)); 258 memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
259 (index & ~0x00000002L)), sizeof(struct hw_fib));
257 memset(fib, 0, sizeof(struct fib)); 260 memset(fib, 0, sizeof(struct fib));
258 INIT_LIST_HEAD(&fib->fiblink); 261 INIT_LIST_HEAD(&fib->fiblink);
259 fib->type = FSAFS_NTC_FIB_CONTEXT; 262 fib->type = FSAFS_NTC_FIB_CONTEXT;
260 fib->size = sizeof(struct fib); 263 fib->size = sizeof(struct fib);
261 fib->hw_fib = hw_fib; 264 fib->hw_fib_va = hw_fib;
262 fib->data = hw_fib->data; 265 fib->data = hw_fib->data;
263 fib->dev = dev; 266 fib->dev = dev;
264 267
@@ -270,7 +273,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
270 } else { 273 } else {
271 int fast = index & 0x01; 274 int fast = index & 0x01;
272 struct fib * fib = &dev->fibs[index >> 2]; 275 struct fib * fib = &dev->fibs[index >> 2];
273 struct hw_fib * hwfib = fib->hw_fib; 276 struct hw_fib * hwfib = fib->hw_fib_va;
274 277
275 /* 278 /*
276 * Remove this fib from the Outstanding I/O queue. 279 * Remove this fib from the Outstanding I/O queue.
@@ -280,14 +283,14 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
280 * continue. The caller has already been notified that 283 * continue. The caller has already been notified that
281 * the fib timed out. 284 * the fib timed out.
282 */ 285 */
283 if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 286 dev->queues->queue[AdapNormCmdQueue].numpending--;
284 printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); 287
285 printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); 288 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
289 aac_fib_complete(fib);
290 aac_fib_free(fib);
286 return 0; 291 return 0;
287 } 292 }
288 293
289 dev->queues->queue[AdapNormCmdQueue].numpending--;
290
291 if (fast) { 294 if (fast) {
292 /* 295 /*
293 * Doctor the fib 296 * Doctor the fib
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0f948c2fb609..350ea7feb61d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -82,8 +82,6 @@ static LIST_HEAD(aac_devices);
82static int aac_cfg_major = -1; 82static int aac_cfg_major = -1;
83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; 83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 84
85extern int expose_physicals;
86
87/* 85/*
88 * Because of the way Linux names scsi devices, the order in this table has 86 * Because of the way Linux names scsi devices, the order in this table has
89 * become important. Check for on-board Raid first, add-in cards second. 87 * become important. Check for on-board Raid first, add-in cards second.
@@ -247,7 +245,19 @@ static struct aac_driver_ident aac_drivers[] = {
247 245
248static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 246static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
249{ 247{
248 struct Scsi_Host *host = cmd->device->host;
249 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
250 u32 count = 0;
250 cmd->scsi_done = done; 251 cmd->scsi_done = done;
252 for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
253 struct fib * fib = &dev->fibs[count];
254 struct scsi_cmnd * command;
255 if (fib->hw_fib_va->header.XferState &&
256 ((command = fib->callback_data)) &&
257 (command == cmd) &&
258 (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
259 return 0; /* Already owned by Adapter */
260 }
251 cmd->SCp.phase = AAC_OWNER_LOWLEVEL; 261 cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
252 return (aac_scsi_cmd(cmd) ? FAILED : 0); 262 return (aac_scsi_cmd(cmd) ? FAILED : 0);
253} 263}
@@ -446,6 +456,40 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
446 return aac_do_ioctl(dev, cmd, arg); 456 return aac_do_ioctl(dev, cmd, arg);
447} 457}
448 458
459static int aac_eh_abort(struct scsi_cmnd* cmd)
460{
461 struct scsi_device * dev = cmd->device;
462 struct Scsi_Host * host = dev->host;
463 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
464 int count;
465 int ret = FAILED;
466
467 printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
468 AAC_DRIVERNAME,
469 host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
470 switch (cmd->cmnd[0]) {
471 case SERVICE_ACTION_IN:
472 if (!(aac->raw_io_interface) ||
473 !(aac->raw_io_64) ||
474 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
475 break;
476 case INQUIRY:
477 case READ_CAPACITY:
478 case TEST_UNIT_READY:
479 /* Mark associated FIB to not complete, eh handler does this */
480 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
481 struct fib * fib = &aac->fibs[count];
482 if (fib->hw_fib_va->header.XferState &&
483 (fib->callback_data == cmd)) {
484 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
485 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
486 ret = SUCCESS;
487 }
488 }
489 }
490 return ret;
491}
492
449/* 493/*
450 * aac_eh_reset - Reset command handling 494 * aac_eh_reset - Reset command handling
451 * @scsi_cmd: SCSI command block causing the reset 495 * @scsi_cmd: SCSI command block causing the reset
@@ -457,12 +501,20 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
457 struct Scsi_Host * host = dev->host; 501 struct Scsi_Host * host = dev->host;
458 struct scsi_cmnd * command; 502 struct scsi_cmnd * command;
459 int count; 503 int count;
460 struct aac_dev * aac; 504 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
461 unsigned long flags; 505 unsigned long flags;
462 506
507 /* Mark the associated FIB to not complete, eh handler does this */
508 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
509 struct fib * fib = &aac->fibs[count];
510 if (fib->hw_fib_va->header.XferState &&
511 (fib->callback_data == cmd)) {
512 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
513 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
514 }
515 }
463 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 516 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
464 AAC_DRIVERNAME); 517 AAC_DRIVERNAME);
465 aac = (struct aac_dev *)host->hostdata;
466 518
467 if ((count = aac_check_health(aac))) 519 if ((count = aac_check_health(aac)))
468 return count; 520 return count;
@@ -496,7 +548,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
496 ssleep(1); 548 ssleep(1);
497 } 549 }
498 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME); 550 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
499 return -ETIMEDOUT; 551 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
500} 552}
501 553
502/** 554/**
@@ -796,6 +848,7 @@ static struct scsi_host_template aac_driver_template = {
796 .bios_param = aac_biosparm, 848 .bios_param = aac_biosparm,
797 .shost_attrs = aac_attrs, 849 .shost_attrs = aac_attrs,
798 .slave_configure = aac_slave_configure, 850 .slave_configure = aac_slave_configure,
851 .eh_abort_handler = aac_eh_abort,
799 .eh_host_reset_handler = aac_eh_reset, 852 .eh_host_reset_handler = aac_eh_reset,
800 .can_queue = AAC_NUM_IO_FIB, 853 .can_queue = AAC_NUM_IO_FIB,
801 .this_id = MAXIMUM_NUM_CONTAINERS, 854 .this_id = MAXIMUM_NUM_CONTAINERS,
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index c76b611b6afb..a8ace5677813 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -74,9 +74,6 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
74 74
75int aac_nark_init(struct aac_dev * dev) 75int aac_nark_init(struct aac_dev * dev)
76{ 76{
77 extern int _aac_rx_init(struct aac_dev *dev);
78 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
79
80 /* 77 /*
81 * Fill in the function dispatch table. 78 * Fill in the function dispatch table.
82 */ 79 */
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index d953c3fe998a..9c5fcfb398c2 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -45,7 +45,6 @@
45static int aac_rkt_select_comm(struct aac_dev *dev, int comm) 45static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
46{ 46{
47 int retval; 47 int retval;
48 extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
49 retval = aac_rx_select_comm(dev, comm); 48 retval = aac_rx_select_comm(dev, comm);
50 if (comm == AAC_COMM_MESSAGE) { 49 if (comm == AAC_COMM_MESSAGE) {
51 /* 50 /*
@@ -97,8 +96,6 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
97 96
98int aac_rkt_init(struct aac_dev *dev) 97int aac_rkt_init(struct aac_dev *dev)
99{ 98{
100 extern int _aac_rx_init(struct aac_dev *dev);
101
102 /* 99 /*
103 * Fill in the function dispatch table. 100 * Fill in the function dispatch table.
104 */ 101 */
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index d242e2611d67..0c71315cbf1a 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -5,7 +5,7 @@
5 * based on the old aacraid driver that is.. 5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -57,25 +57,25 @@ static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
57 * been enabled. 57 * been enabled.
58 * Check to see if this is our interrupt. If it isn't just return 58 * Check to see if this is our interrupt. If it isn't just return
59 */ 59 */
60 if (intstat & ~(dev->OIMR)) { 60 if (likely(intstat & ~(dev->OIMR))) {
61 bellbits = rx_readl(dev, OutboundDoorbellReg); 61 bellbits = rx_readl(dev, OutboundDoorbellReg);
62 if (bellbits & DoorBellPrintfReady) { 62 if (unlikely(bellbits & DoorBellPrintfReady)) {
63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); 63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
66 } 66 }
67 else if (bellbits & DoorBellAdapterNormCmdReady) { 67 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); 68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
70 } 70 }
71 else if (bellbits & DoorBellAdapterNormRespReady) { 71 else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); 72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); 73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
74 } 74 }
75 else if (bellbits & DoorBellAdapterNormCmdNotFull) { 75 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
77 } 77 }
78 else if (bellbits & DoorBellAdapterNormRespNotFull) { 78 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); 80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
81 } 81 }
@@ -88,11 +88,11 @@ static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
88{ 88{
89 struct aac_dev *dev = dev_id; 89 struct aac_dev *dev = dev_id;
90 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 90 u32 Index = rx_readl(dev, MUnit.OutboundQueue);
91 if (Index == 0xFFFFFFFFL) 91 if (unlikely(Index == 0xFFFFFFFFL))
92 Index = rx_readl(dev, MUnit.OutboundQueue); 92 Index = rx_readl(dev, MUnit.OutboundQueue);
93 if (Index != 0xFFFFFFFFL) { 93 if (likely(Index != 0xFFFFFFFFL)) {
94 do { 94 do {
95 if (aac_intr_normal(dev, Index)) { 95 if (unlikely(aac_intr_normal(dev, Index))) {
96 rx_writel(dev, MUnit.OutboundQueue, Index); 96 rx_writel(dev, MUnit.OutboundQueue, Index);
97 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 97 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
98 } 98 }
@@ -204,7 +204,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
204 */ 204 */
205 msleep(1); 205 msleep(1);
206 } 206 }
207 if (ok != 1) { 207 if (unlikely(ok != 1)) {
208 /* 208 /*
209 * Restore interrupt mask even though we timed out 209 * Restore interrupt mask even though we timed out
210 */ 210 */
@@ -294,7 +294,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
294 * Start up processing on an i960 based AAC adapter 294 * Start up processing on an i960 based AAC adapter
295 */ 295 */
296 296
297void aac_rx_start_adapter(struct aac_dev *dev) 297static void aac_rx_start_adapter(struct aac_dev *dev)
298{ 298{
299 struct aac_init *init; 299 struct aac_init *init;
300 300
@@ -319,12 +319,12 @@ static int aac_rx_check_health(struct aac_dev *dev)
319 /* 319 /*
320 * Check to see if the board failed any self tests. 320 * Check to see if the board failed any self tests.
321 */ 321 */
322 if (status & SELF_TEST_FAILED) 322 if (unlikely(status & SELF_TEST_FAILED))
323 return -1; 323 return -1;
324 /* 324 /*
325 * Check to see if the board panic'd. 325 * Check to see if the board panic'd.
326 */ 326 */
327 if (status & KERNEL_PANIC) { 327 if (unlikely(status & KERNEL_PANIC)) {
328 char * buffer; 328 char * buffer;
329 struct POSTSTATUS { 329 struct POSTSTATUS {
330 __le32 Post_Command; 330 __le32 Post_Command;
@@ -333,15 +333,15 @@ static int aac_rx_check_health(struct aac_dev *dev)
333 dma_addr_t paddr, baddr; 333 dma_addr_t paddr, baddr;
334 int ret; 334 int ret;
335 335
336 if ((status & 0xFF000000L) == 0xBC000000L) 336 if (likely((status & 0xFF000000L) == 0xBC000000L))
337 return (status >> 16) & 0xFF; 337 return (status >> 16) & 0xFF;
338 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); 338 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
339 ret = -2; 339 ret = -2;
340 if (buffer == NULL) 340 if (unlikely(buffer == NULL))
341 return ret; 341 return ret;
342 post = pci_alloc_consistent(dev->pdev, 342 post = pci_alloc_consistent(dev->pdev,
343 sizeof(struct POSTSTATUS), &paddr); 343 sizeof(struct POSTSTATUS), &paddr);
344 if (post == NULL) { 344 if (unlikely(post == NULL)) {
345 pci_free_consistent(dev->pdev, 512, buffer, baddr); 345 pci_free_consistent(dev->pdev, 512, buffer, baddr);
346 return ret; 346 return ret;
347 } 347 }
@@ -353,7 +353,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
353 NULL, NULL, NULL, NULL, NULL); 353 NULL, NULL, NULL, NULL, NULL);
354 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), 354 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
355 post, paddr); 355 post, paddr);
356 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) { 356 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
357 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); 357 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
358 ret <<= 4; 358 ret <<= 4;
359 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); 359 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
@@ -364,7 +364,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
364 /* 364 /*
365 * Wait for the adapter to be up and running. 365 * Wait for the adapter to be up and running.
366 */ 366 */
367 if (!(status & KERNEL_UP_AND_RUNNING)) 367 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
368 return -3; 368 return -3;
369 /* 369 /*
370 * Everything is OK 370 * Everything is OK
@@ -387,7 +387,7 @@ static int aac_rx_deliver_producer(struct fib * fib)
387 unsigned long nointr = 0; 387 unsigned long nointr = 0;
388 388
389 spin_lock_irqsave(q->lock, qflags); 389 spin_lock_irqsave(q->lock, qflags);
390 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr); 390 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
391 391
392 q->numpending++; 392 q->numpending++;
393 *(q->headers.producer) = cpu_to_le32(Index + 1); 393 *(q->headers.producer) = cpu_to_le32(Index + 1);
@@ -419,9 +419,9 @@ static int aac_rx_deliver_message(struct fib * fib)
419 spin_unlock_irqrestore(q->lock, qflags); 419 spin_unlock_irqrestore(q->lock, qflags);
420 for(;;) { 420 for(;;) {
421 Index = rx_readl(dev, MUnit.InboundQueue); 421 Index = rx_readl(dev, MUnit.InboundQueue);
422 if (Index == 0xFFFFFFFFL) 422 if (unlikely(Index == 0xFFFFFFFFL))
423 Index = rx_readl(dev, MUnit.InboundQueue); 423 Index = rx_readl(dev, MUnit.InboundQueue);
424 if (Index != 0xFFFFFFFFL) 424 if (likely(Index != 0xFFFFFFFFL))
425 break; 425 break;
426 if (--count == 0) { 426 if (--count == 0) {
427 spin_lock_irqsave(q->lock, qflags); 427 spin_lock_irqsave(q->lock, qflags);
@@ -437,7 +437,7 @@ static int aac_rx_deliver_message(struct fib * fib)
437 device += sizeof(u32); 437 device += sizeof(u32);
438 writel((u32)(addr >> 32), device); 438 writel((u32)(addr >> 32), device);
439 device += sizeof(u32); 439 device += sizeof(u32);
440 writel(le16_to_cpu(fib->hw_fib->header.Size), device); 440 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
441 rx_writel(dev, MUnit.InboundQueue, Index); 441 rx_writel(dev, MUnit.InboundQueue, Index);
442 return 0; 442 return 0;
443} 443}
@@ -460,22 +460,34 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
460 return 0; 460 return 0;
461} 461}
462 462
463static int aac_rx_restart_adapter(struct aac_dev *dev) 463static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
464{ 464{
465 u32 var; 465 u32 var;
466 466
467 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 467 if (bled)
468 dev->name, dev->id); 468 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
469 469 dev->name, dev->id, bled);
470 if (aac_rx_check_health(dev) <= 0) 470 else {
471 return 1; 471 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
472 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, 472 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
473 &var, NULL, NULL, NULL, NULL)) 473 if (!bled && (var != 0x00000001))
474 return 1; 474 bled = -EINVAL;
475 }
476 if (bled && (bled != -ETIMEDOUT))
477 bled = aac_adapter_sync_cmd(dev, IOP_RESET,
478 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
479
480 if (bled && (bled != -ETIMEDOUT))
481 return -EINVAL;
482 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
483 rx_writel(dev, MUnit.reserved2, 3);
484 msleep(5000); /* Delay 5 seconds */
485 var = 0x00000001;
486 }
475 if (var != 0x00000001) 487 if (var != 0x00000001)
476 return 1; 488 return -EINVAL;
477 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 489 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
478 return 1; 490 return -ENODEV;
479 return 0; 491 return 0;
480} 492}
481 493
@@ -517,24 +529,29 @@ int _aac_rx_init(struct aac_dev *dev)
517{ 529{
518 unsigned long start; 530 unsigned long start;
519 unsigned long status; 531 unsigned long status;
520 int instance; 532 int restart = 0;
521 const char * name; 533 int instance = dev->id;
522 534 const char * name = dev->name;
523 instance = dev->id;
524 name = dev->name;
525 535
526 if (aac_adapter_ioremap(dev, dev->base_size)) { 536 if (aac_adapter_ioremap(dev, dev->base_size)) {
527 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 537 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
528 goto error_iounmap; 538 goto error_iounmap;
529 } 539 }
530 540
541 /* Failure to reset here is an option ... */
542 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
543 if ((((status & 0xff) != 0xff) || reset_devices) &&
544 !aac_rx_restart_adapter(dev, 0))
545 ++restart;
531 /* 546 /*
532 * Check to see if the board panic'd while booting. 547 * Check to see if the board panic'd while booting.
533 */ 548 */
534 status = rx_readl(dev, MUnit.OMRx[0]); 549 status = rx_readl(dev, MUnit.OMRx[0]);
535 if (status & KERNEL_PANIC) 550 if (status & KERNEL_PANIC) {
536 if (aac_rx_restart_adapter(dev)) 551 if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
537 goto error_iounmap; 552 goto error_iounmap;
553 ++restart;
554 }
538 /* 555 /*
539 * Check to see if the board failed any self tests. 556 * Check to see if the board failed any self tests.
540 */ 557 */
@@ -556,12 +573,23 @@ int _aac_rx_init(struct aac_dev *dev)
556 */ 573 */
557 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) 574 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
558 { 575 {
559 if(time_after(jiffies, start+startup_timeout*HZ)) 576 if ((restart &&
560 { 577 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
578 time_after(jiffies, start+HZ*startup_timeout)) {
561 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 579 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
562 dev->name, instance, status); 580 dev->name, instance, status);
563 goto error_iounmap; 581 goto error_iounmap;
564 } 582 }
583 if (!restart &&
584 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
585 time_after(jiffies, start + HZ *
586 ((startup_timeout > 60)
587 ? (startup_timeout - 60)
588 : (startup_timeout / 2))))) {
589 if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
590 start = jiffies;
591 ++restart;
592 }
565 msleep(1); 593 msleep(1);
566 } 594 }
567 /* 595 /*
@@ -572,6 +600,7 @@ int _aac_rx_init(struct aac_dev *dev)
572 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 600 dev->a_ops.adapter_notify = aac_rx_notify_adapter;
573 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 601 dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
574 dev->a_ops.adapter_check_health = aac_rx_check_health; 602 dev->a_ops.adapter_check_health = aac_rx_check_health;
603 dev->a_ops.adapter_restart = aac_rx_restart_adapter;
575 604
576 /* 605 /*
577 * First clear out all interrupts. Then enable the one's that we 606 * First clear out all interrupts. Then enable the one's that we
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 911ea1756e55..5e6620f8dabc 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -57,18 +57,6 @@ config AIC79XX_BUILD_FIRMWARE
57 or modify the assembler Makefile or the files it includes if your 57 or modify the assembler Makefile or the files it includes if your
58 build environment is different than that of the author. 58 build environment is different than that of the author.
59 59
60config AIC79XX_ENABLE_RD_STRM
61 bool "Enable Read Streaming for All Targets"
62 depends on SCSI_AIC79XX
63 default n
64 help
65 Read Streaming is a U320 protocol option that should enhance
66 performance. Early U320 drive firmware actually performs slower
67 with read streaming enabled so it is disabled by default. Read
68 Streaming can be configured in much the same way as tagged queueing
69 using the "rd_strm" command line option. See
70 drivers/scsi/aic7xxx/README.aic79xx for details.
71
72config AIC79XX_DEBUG_ENABLE 60config AIC79XX_DEBUG_ENABLE
73 bool "Compile in Debugging Code" 61 bool "Compile in Debugging Code"
74 depends on SCSI_AIC79XX 62 depends on SCSI_AIC79XX
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index cd93f9a8611f..88da670a7915 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -50,16 +50,6 @@ config AIC7XXX_RESET_DELAY_MS
50 50
51 Default: 5000 (5 seconds) 51 Default: 5000 (5 seconds)
52 52
53config AIC7XXX_PROBE_EISA_VL
54 bool "Probe for EISA and VL AIC7XXX Adapters"
55 depends on SCSI_AIC7XXX && EISA
56 help
57 Probe for EISA and VLB Aic7xxx controllers. In many newer systems,
58 the invasive probes necessary to detect these controllers can cause
59 other devices to fail. For this reason, the non-PCI probe code is
60 disabled by default. The current value of this option can be "toggled"
61 via the no_probe kernel command line option.
62
63config AIC7XXX_BUILD_FIRMWARE 53config AIC7XXX_BUILD_FIRMWARE
64 bool "Build Adapter Firmware with Kernel Build" 54 bool "Build Adapter Firmware with Kernel Build"
65 depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD 55 depends on SCSI_AIC7XXX && !PREVENT_FIRMWARE_BUILD
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2be03e975d97..6054881f21f1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -363,6 +363,8 @@ static int ahd_linux_run_command(struct ahd_softc*,
363 struct scsi_cmnd *); 363 struct scsi_cmnd *);
364static void ahd_linux_setup_tag_info_global(char *p); 364static void ahd_linux_setup_tag_info_global(char *p);
365static int aic79xx_setup(char *c); 365static int aic79xx_setup(char *c);
366static void ahd_freeze_simq(struct ahd_softc *ahd);
367static void ahd_release_simq(struct ahd_softc *ahd);
366 368
367static int ahd_linux_unit; 369static int ahd_linux_unit;
368 370
@@ -2016,13 +2018,13 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
2016 cmd->scsi_done(cmd); 2018 cmd->scsi_done(cmd);
2017} 2019}
2018 2020
2019void 2021static void
2020ahd_freeze_simq(struct ahd_softc *ahd) 2022ahd_freeze_simq(struct ahd_softc *ahd)
2021{ 2023{
2022 scsi_block_requests(ahd->platform_data->host); 2024 scsi_block_requests(ahd->platform_data->host);
2023} 2025}
2024 2026
2025void 2027static void
2026ahd_release_simq(struct ahd_softc *ahd) 2028ahd_release_simq(struct ahd_softc *ahd)
2027{ 2029{
2028 scsi_unblock_requests(ahd->platform_data->host); 2030 scsi_unblock_requests(ahd->platform_data->host);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 147c83c456a5..9218f29314fa 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -837,8 +837,6 @@ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
837void ahd_platform_free(struct ahd_softc *ahd); 837void ahd_platform_free(struct ahd_softc *ahd);
838void ahd_platform_init(struct ahd_softc *ahd); 838void ahd_platform_init(struct ahd_softc *ahd);
839void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb); 839void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
840void ahd_freeze_simq(struct ahd_softc *ahd);
841void ahd_release_simq(struct ahd_softc *ahd);
842 840
843static __inline void 841static __inline void
844ahd_freeze_scb(struct scb *scb) 842ahd_freeze_scb(struct scb *scb)
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 954c7c24501d..e1bd57b9f23d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -1278,11 +1278,6 @@ typedef enum {
1278 AHC_QUEUE_TAGGED 1278 AHC_QUEUE_TAGGED
1279} ahc_queue_alg; 1279} ahc_queue_alg;
1280 1280
1281void ahc_set_tags(struct ahc_softc *ahc,
1282 struct scsi_cmnd *cmd,
1283 struct ahc_devinfo *devinfo,
1284 ahc_queue_alg alg);
1285
1286/**************************** Target Mode *************************************/ 1281/**************************** Target Mode *************************************/
1287#ifdef AHC_TARGET_MODE 1282#ifdef AHC_TARGET_MODE
1288void ahc_send_lstate_events(struct ahc_softc *, 1283void ahc_send_lstate_events(struct ahc_softc *,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 50ef785224de..75733b09f27a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2073,7 +2073,7 @@ ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2073/* 2073/*
2074 * Update the current state of tagged queuing for a given target. 2074 * Update the current state of tagged queuing for a given target.
2075 */ 2075 */
2076void 2076static void
2077ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd, 2077ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2078 struct ahc_devinfo *devinfo, ahc_queue_alg alg) 2078 struct ahc_devinfo *devinfo, ahc_queue_alg alg)
2079{ 2079{
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 61f6024b61ba..2a458d66b6ff 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -202,31 +202,29 @@ static const char * get_sa_name(const struct value_name_pair * arr,
202} 202}
203 203
204/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */ 204/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */
205static void print_opcode_name(unsigned char * cdbp, int cdb_len, 205static void print_opcode_name(unsigned char * cdbp, int cdb_len)
206 int start_of_line)
207{ 206{
208 int sa, len, cdb0; 207 int sa, len, cdb0;
209 const char * name; 208 const char * name;
210 const char * leadin = start_of_line ? KERN_INFO : "";
211 209
212 cdb0 = cdbp[0]; 210 cdb0 = cdbp[0];
213 switch(cdb0) { 211 switch(cdb0) {
214 case VARIABLE_LENGTH_CMD: 212 case VARIABLE_LENGTH_CMD:
215 len = cdbp[7] + 8; 213 len = cdbp[7] + 8;
216 if (len < 10) { 214 if (len < 10) {
217 printk("%sshort variable length command, " 215 printk("short variable length command, "
218 "len=%d ext_len=%d", leadin, len, cdb_len); 216 "len=%d ext_len=%d", len, cdb_len);
219 break; 217 break;
220 } 218 }
221 sa = (cdbp[8] << 8) + cdbp[9]; 219 sa = (cdbp[8] << 8) + cdbp[9];
222 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 220 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
223 if (name) { 221 if (name) {
224 printk("%s%s", leadin, name); 222 printk("%s", name);
225 if ((cdb_len > 0) && (len != cdb_len)) 223 if ((cdb_len > 0) && (len != cdb_len))
226 printk(", in_cdb_len=%d, ext_len=%d", 224 printk(", in_cdb_len=%d, ext_len=%d",
227 len, cdb_len); 225 len, cdb_len);
228 } else { 226 } else {
229 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 227 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
230 if ((cdb_len > 0) && (len != cdb_len)) 228 if ((cdb_len > 0) && (len != cdb_len))
231 printk(", in_cdb_len=%d, ext_len=%d", 229 printk(", in_cdb_len=%d, ext_len=%d",
232 len, cdb_len); 230 len, cdb_len);
@@ -236,83 +234,80 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
236 sa = cdbp[1] & 0x1f; 234 sa = cdbp[1] & 0x1f;
237 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); 235 name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
238 if (name) 236 if (name)
239 printk("%s%s", leadin, name); 237 printk("%s", name);
240 else 238 else
241 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 239 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
242 break; 240 break;
243 case MAINTENANCE_OUT: 241 case MAINTENANCE_OUT:
244 sa = cdbp[1] & 0x1f; 242 sa = cdbp[1] & 0x1f;
245 name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa); 243 name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa);
246 if (name) 244 if (name)
247 printk("%s%s", leadin, name); 245 printk("%s", name);
248 else 246 else
249 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 247 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
250 break; 248 break;
251 case SERVICE_ACTION_IN_12: 249 case SERVICE_ACTION_IN_12:
252 sa = cdbp[1] & 0x1f; 250 sa = cdbp[1] & 0x1f;
253 name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa); 251 name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa);
254 if (name) 252 if (name)
255 printk("%s%s", leadin, name); 253 printk("%s", name);
256 else 254 else
257 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 255 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
258 break; 256 break;
259 case SERVICE_ACTION_OUT_12: 257 case SERVICE_ACTION_OUT_12:
260 sa = cdbp[1] & 0x1f; 258 sa = cdbp[1] & 0x1f;
261 name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa); 259 name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa);
262 if (name) 260 if (name)
263 printk("%s%s", leadin, name); 261 printk("%s", name);
264 else 262 else
265 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 263 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
266 break; 264 break;
267 case SERVICE_ACTION_IN_16: 265 case SERVICE_ACTION_IN_16:
268 sa = cdbp[1] & 0x1f; 266 sa = cdbp[1] & 0x1f;
269 name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa); 267 name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa);
270 if (name) 268 if (name)
271 printk("%s%s", leadin, name); 269 printk("%s", name);
272 else 270 else
273 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 271 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
274 break; 272 break;
275 case SERVICE_ACTION_OUT_16: 273 case SERVICE_ACTION_OUT_16:
276 sa = cdbp[1] & 0x1f; 274 sa = cdbp[1] & 0x1f;
277 name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa); 275 name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa);
278 if (name) 276 if (name)
279 printk("%s%s", leadin, name); 277 printk("%s", name);
280 else 278 else
281 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 279 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
282 break; 280 break;
283 default: 281 default:
284 if (cdb0 < 0xc0) { 282 if (cdb0 < 0xc0) {
285 name = cdb_byte0_names[cdb0]; 283 name = cdb_byte0_names[cdb0];
286 if (name) 284 if (name)
287 printk("%s%s", leadin, name); 285 printk("%s", name);
288 else 286 else
289 printk("%scdb[0]=0x%x (reserved)", 287 printk("cdb[0]=0x%x (reserved)", cdb0);
290 leadin, cdb0);
291 } else 288 } else
292 printk("%scdb[0]=0x%x (vendor)", leadin, cdb0); 289 printk("cdb[0]=0x%x (vendor)", cdb0);
293 break; 290 break;
294 } 291 }
295} 292}
296 293
297#else /* ifndef CONFIG_SCSI_CONSTANTS */ 294#else /* ifndef CONFIG_SCSI_CONSTANTS */
298 295
299static void print_opcode_name(unsigned char * cdbp, int cdb_len, 296static void print_opcode_name(unsigned char * cdbp, int cdb_len)
300 int start_of_line)
301{ 297{
302 int sa, len, cdb0; 298 int sa, len, cdb0;
303 const char * leadin = start_of_line ? KERN_INFO : "";
304 299
305 cdb0 = cdbp[0]; 300 cdb0 = cdbp[0];
306 switch(cdb0) { 301 switch(cdb0) {
307 case VARIABLE_LENGTH_CMD: 302 case VARIABLE_LENGTH_CMD:
308 len = cdbp[7] + 8; 303 len = cdbp[7] + 8;
309 if (len < 10) { 304 if (len < 10) {
310 printk("%sshort opcode=0x%x command, len=%d " 305 printk("short opcode=0x%x command, len=%d "
311 "ext_len=%d", leadin, cdb0, len, cdb_len); 306 "ext_len=%d", cdb0, len, cdb_len);
312 break; 307 break;
313 } 308 }
314 sa = (cdbp[8] << 8) + cdbp[9]; 309 sa = (cdbp[8] << 8) + cdbp[9];
315 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 310 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
316 if (len != cdb_len) 311 if (len != cdb_len)
317 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); 312 printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
318 break; 313 break;
@@ -323,49 +318,48 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len,
323 case SERVICE_ACTION_IN_16: 318 case SERVICE_ACTION_IN_16:
324 case SERVICE_ACTION_OUT_16: 319 case SERVICE_ACTION_OUT_16:
325 sa = cdbp[1] & 0x1f; 320 sa = cdbp[1] & 0x1f;
326 printk("%scdb[0]=0x%x, sa=0x%x", leadin, cdb0, sa); 321 printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
327 break; 322 break;
328 default: 323 default:
329 if (cdb0 < 0xc0) 324 if (cdb0 < 0xc0)
330 printk("%scdb[0]=0x%x", leadin, cdb0); 325 printk("cdb[0]=0x%x", cdb0);
331 else 326 else
332 printk("%scdb[0]=0x%x (vendor)", leadin, cdb0); 327 printk("cdb[0]=0x%x (vendor)", cdb0);
333 break; 328 break;
334 } 329 }
335} 330}
336#endif 331#endif
337 332
338void __scsi_print_command(unsigned char *command) 333void __scsi_print_command(unsigned char *cdb)
339{ 334{
340 int k, len; 335 int k, len;
341 336
342 print_opcode_name(command, 0, 1); 337 print_opcode_name(cdb, 0);
343 if (VARIABLE_LENGTH_CMD == command[0]) 338 if (VARIABLE_LENGTH_CMD == cdb[0])
344 len = command[7] + 8; 339 len = cdb[7] + 8;
345 else 340 else
346 len = COMMAND_SIZE(command[0]); 341 len = COMMAND_SIZE(cdb[0]);
347 /* print out all bytes in cdb */ 342 /* print out all bytes in cdb */
348 for (k = 0; k < len; ++k) 343 for (k = 0; k < len; ++k)
349 printk(" %02x", command[k]); 344 printk(" %02x", cdb[k]);
350 printk("\n"); 345 printk("\n");
351} 346}
352EXPORT_SYMBOL(__scsi_print_command); 347EXPORT_SYMBOL(__scsi_print_command);
353 348
354/* This function (perhaps with the addition of peripheral device type) 349void scsi_print_command(struct scsi_cmnd *cmd)
355 * is more approriate than __scsi_print_command(). Perhaps that static
356 * can be dropped later if it replaces the __scsi_print_command version.
357 */
358static void scsi_print_cdb(unsigned char *cdb, int cdb_len, int start_of_line)
359{ 350{
360 int k; 351 int k;
361 352
362 print_opcode_name(cdb, cdb_len, start_of_line); 353 scmd_printk(KERN_INFO, cmd, "CDB: ");
354 print_opcode_name(cmd->cmnd, cmd->cmd_len);
355
363 /* print out all bytes in cdb */ 356 /* print out all bytes in cdb */
364 printk(":"); 357 printk(":");
365 for (k = 0; k < cdb_len; ++k) 358 for (k = 0; k < cmd->cmd_len; ++k)
366 printk(" %02x", cdb[k]); 359 printk(" %02x", cmd->cmnd[k]);
367 printk("\n"); 360 printk("\n");
368} 361}
362EXPORT_SYMBOL(scsi_print_command);
369 363
370/** 364/**
371 * 365 *
@@ -410,7 +404,11 @@ struct error_info {
410 const char * text; 404 const char * text;
411}; 405};
412 406
413static struct error_info additional[] = 407/*
408 * The canonical list of T10 Additional Sense Codes is available at:
409 * http://www.t10.org/lists/asc-num.txt
410 */
411static const struct error_info additional[] =
414{ 412{
415 {0x0000, "No additional sense information"}, 413 {0x0000, "No additional sense information"},
416 {0x0001, "Filemark detected"}, 414 {0x0001, "Filemark detected"},
@@ -714,6 +712,7 @@ static struct error_info additional[] =
714 712
715 {0x2F00, "Commands cleared by another initiator"}, 713 {0x2F00, "Commands cleared by another initiator"},
716 {0x2F01, "Commands cleared by power loss notification"}, 714 {0x2F01, "Commands cleared by power loss notification"},
715 {0x2F02, "Commands cleared by device server"},
717 716
718 {0x3000, "Incompatible medium installed"}, 717 {0x3000, "Incompatible medium installed"},
719 {0x3001, "Cannot read medium - unknown format"}, 718 {0x3001, "Cannot read medium - unknown format"},
@@ -1176,67 +1175,77 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
1176} 1175}
1177EXPORT_SYMBOL(scsi_extd_sense_format); 1176EXPORT_SYMBOL(scsi_extd_sense_format);
1178 1177
1179/* Print extended sense information; no leadin, no linefeed */ 1178void
1180static void
1181scsi_show_extd_sense(unsigned char asc, unsigned char ascq) 1179scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
1182{ 1180{
1183 const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq); 1181 const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq);
1184 1182
1185 if (extd_sense_fmt) { 1183 if (extd_sense_fmt) {
1186 if (strstr(extd_sense_fmt, "%x")) { 1184 if (strstr(extd_sense_fmt, "%x")) {
1187 printk("Additional sense: "); 1185 printk("Add. Sense: ");
1188 printk(extd_sense_fmt, ascq); 1186 printk(extd_sense_fmt, ascq);
1189 } else 1187 } else
1190 printk("Additional sense: %s", extd_sense_fmt); 1188 printk("Add. Sense: %s", extd_sense_fmt);
1191 } else { 1189 } else {
1192 if (asc >= 0x80) 1190 if (asc >= 0x80)
1193 printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc, ascq); 1191 printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc,
1192 ascq);
1194 if (ascq >= 0x80) 1193 if (ascq >= 0x80)
1195 printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc, ascq); 1194 printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc,
1195 ascq);
1196 else 1196 else
1197 printk("ASC=0x%x ASCQ=0x%x", asc, ascq); 1197 printk("ASC=0x%x ASCQ=0x%x", asc, ascq);
1198 } 1198 }
1199
1200 printk("\n");
1199} 1201}
1202EXPORT_SYMBOL(scsi_show_extd_sense);
1200 1203
1201void 1204void
1202scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr) 1205scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr)
1203{ 1206{
1204 const char *sense_txt; 1207 const char *sense_txt;
1205 /* An example of deferred is when an earlier write to disk cache
1206 * succeeded, but now the disk discovers that it cannot write the
1207 * data to the magnetic media.
1208 */
1209 const char *error = scsi_sense_is_deferred(sshdr) ?
1210 "<<DEFERRED>>" : "Current";
1211 printk(KERN_INFO "%s: %s", name, error);
1212 if (sshdr->response_code >= 0x72)
1213 printk(" [descriptor]");
1214 1208
1215 sense_txt = scsi_sense_key_string(sshdr->sense_key); 1209 sense_txt = scsi_sense_key_string(sshdr->sense_key);
1216 if (sense_txt) 1210 if (sense_txt)
1217 printk(": sense key: %s\n", sense_txt); 1211 printk("Sense Key : %s ", sense_txt);
1218 else 1212 else
1219 printk(": sense key=0x%x\n", sshdr->sense_key); 1213 printk("Sense Key : 0x%x ", sshdr->sense_key);
1220 printk(KERN_INFO " "); 1214
1221 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 1215 printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " :
1216 "[current] ");
1217
1218 if (sshdr->response_code >= 0x72)
1219 printk("[descriptor]");
1220
1222 printk("\n"); 1221 printk("\n");
1223} 1222}
1223EXPORT_SYMBOL(scsi_show_sense_hdr);
1224
1225/*
1226 * Print normalized SCSI sense header with a prefix.
1227 */
1228void
1229scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
1230{
1231 printk(KERN_INFO "%s: ", name);
1232 scsi_show_sense_hdr(sshdr);
1233 printk(KERN_INFO "%s: ", name);
1234 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1235}
1224EXPORT_SYMBOL(scsi_print_sense_hdr); 1236EXPORT_SYMBOL(scsi_print_sense_hdr);
1225 1237
1226/* Print sense information */
1227void 1238void
1228__scsi_print_sense(const char *name, const unsigned char *sense_buffer, 1239scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len,
1229 int sense_len) 1240 struct scsi_sense_hdr *sshdr)
1230{ 1241{
1231 int k, num, res; 1242 int k, num, res;
1232 unsigned int info;
1233 struct scsi_sense_hdr ssh;
1234 1243
1235 res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); 1244 res = scsi_normalize_sense(sense_buffer, sense_len, sshdr);
1236 if (0 == res) { 1245 if (0 == res) {
1237 /* this may be SCSI-1 sense data */ 1246 /* this may be SCSI-1 sense data */
1238 num = (sense_len < 32) ? sense_len : 32; 1247 num = (sense_len < 32) ? sense_len : 32;
1239 printk(KERN_INFO "Unrecognized sense data (in hex):"); 1248 printk("Unrecognized sense data (in hex):");
1240 for (k = 0; k < num; ++k) { 1249 for (k = 0; k < num; ++k) {
1241 if (0 == (k % 16)) { 1250 if (0 == (k % 16)) {
1242 printk("\n"); 1251 printk("\n");
@@ -1247,11 +1256,20 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1247 printk("\n"); 1256 printk("\n");
1248 return; 1257 return;
1249 } 1258 }
1250 scsi_print_sense_hdr(name, &ssh); 1259}
1251 if (ssh.response_code < 0x72) { 1260
1261void
1262scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len,
1263 struct scsi_sense_hdr *sshdr)
1264{
1265 int k, num, res;
1266
1267 if (sshdr->response_code < 0x72)
1268 {
1252 /* only decode extras for "fixed" format now */ 1269 /* only decode extras for "fixed" format now */
1253 char buff[80]; 1270 char buff[80];
1254 int blen, fixed_valid; 1271 int blen, fixed_valid;
1272 unsigned int info;
1255 1273
1256 fixed_valid = sense_buffer[0] & 0x80; 1274 fixed_valid = sense_buffer[0] & 0x80;
1257 info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) | 1275 info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) |
@@ -1281,13 +1299,13 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1281 res += snprintf(buff + res, blen - res, "ILI"); 1299 res += snprintf(buff + res, blen - res, "ILI");
1282 } 1300 }
1283 if (res > 0) 1301 if (res > 0)
1284 printk(KERN_INFO "%s\n", buff); 1302 printk("%s\n", buff);
1285 } else if (ssh.additional_length > 0) { 1303 } else if (sshdr->additional_length > 0) {
1286 /* descriptor format with sense descriptors */ 1304 /* descriptor format with sense descriptors */
1287 num = 8 + ssh.additional_length; 1305 num = 8 + sshdr->additional_length;
1288 num = (sense_len < num) ? sense_len : num; 1306 num = (sense_len < num) ? sense_len : num;
1289 printk(KERN_INFO "Descriptor sense data with sense " 1307 printk("Descriptor sense data with sense descriptors "
1290 "descriptors (in hex):"); 1308 "(in hex):");
1291 for (k = 0; k < num; ++k) { 1309 for (k = 0; k < num; ++k) {
1292 if (0 == (k % 16)) { 1310 if (0 == (k % 16)) {
1293 printk("\n"); 1311 printk("\n");
@@ -1295,29 +1313,42 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1295 } 1313 }
1296 printk("%02x ", sense_buffer[k]); 1314 printk("%02x ", sense_buffer[k]);
1297 } 1315 }
1316
1298 printk("\n"); 1317 printk("\n");
1299 } 1318 }
1319
1300} 1320}
1301EXPORT_SYMBOL(__scsi_print_sense);
1302 1321
1303void scsi_print_sense(const char *devclass, struct scsi_cmnd *cmd) 1322/* Normalize and print sense buffer with name prefix */
1323void __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
1324 int sense_len)
1304{ 1325{
1305 const char *name = devclass; 1326 struct scsi_sense_hdr sshdr;
1306 1327
1307 if (cmd->request->rq_disk) 1328 printk(KERN_INFO "%s: ", name);
1308 name = cmd->request->rq_disk->disk_name; 1329 scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr);
1309 __scsi_print_sense(name, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); 1330 scsi_show_sense_hdr(&sshdr);
1331 scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr);
1332 printk(KERN_INFO "%s: ", name);
1333 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1310} 1334}
1311EXPORT_SYMBOL(scsi_print_sense); 1335EXPORT_SYMBOL(__scsi_print_sense);
1312 1336
1313void scsi_print_command(struct scsi_cmnd *cmd) 1337/* Normalize and print sense buffer in SCSI command */
1338void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1314{ 1339{
1315 /* Assume appended output (i.e. not at start of line) */ 1340 struct scsi_sense_hdr sshdr;
1316 sdev_printk("", cmd->device, "\n"); 1341
1317 printk(KERN_INFO " command: "); 1342 scmd_printk(KERN_INFO, cmd, "");
1318 scsi_print_cdb(cmd->cmnd, cmd->cmd_len, 0); 1343 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1344 &sshdr);
1345 scsi_show_sense_hdr(&sshdr);
1346 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1347 &sshdr);
1348 scmd_printk(KERN_INFO, cmd, "");
1349 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1319} 1350}
1320EXPORT_SYMBOL(scsi_print_command); 1351EXPORT_SYMBOL(scsi_print_sense);
1321 1352
1322#ifdef CONFIG_SCSI_CONSTANTS 1353#ifdef CONFIG_SCSI_CONSTANTS
1323 1354
@@ -1327,25 +1358,6 @@ static const char * const hostbyte_table[]={
1327"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"}; 1358"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
1328#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) 1359#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1329 1360
1330void scsi_print_hostbyte(int scsiresult)
1331{
1332 int hb = host_byte(scsiresult);
1333
1334 printk("Hostbyte=0x%02x", hb);
1335 if (hb < NUM_HOSTBYTE_STRS)
1336 printk("(%s) ", hostbyte_table[hb]);
1337 else
1338 printk("is invalid ");
1339}
1340#else
1341void scsi_print_hostbyte(int scsiresult)
1342{
1343 printk("Hostbyte=0x%02x ", host_byte(scsiresult));
1344}
1345#endif
1346
1347#ifdef CONFIG_SCSI_CONSTANTS
1348
1349static const char * const driverbyte_table[]={ 1361static const char * const driverbyte_table[]={
1350"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", 1362"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
1351"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; 1363"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
@@ -1356,19 +1368,35 @@ static const char * const driversuggest_table[]={"SUGGEST_OK",
1356"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"}; 1368"SUGGEST_5", "SUGGEST_6", "SUGGEST_7", "SUGGEST_SENSE"};
1357#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table) 1369#define NUM_SUGGEST_STRS ARRAY_SIZE(driversuggest_table)
1358 1370
1359void scsi_print_driverbyte(int scsiresult) 1371void scsi_show_result(int result)
1360{ 1372{
1361 int dr = (driver_byte(scsiresult) & DRIVER_MASK); 1373 int hb = host_byte(result);
1362 int su = ((driver_byte(scsiresult) & SUGGEST_MASK) >> 4); 1374 int db = (driver_byte(result) & DRIVER_MASK);
1375 int su = ((driver_byte(result) & SUGGEST_MASK) >> 4);
1363 1376
1364 printk("Driverbyte=0x%02x ", driver_byte(scsiresult)); 1377 printk("Result: hostbyte=%s driverbyte=%s,%s\n",
1365 printk("(%s,%s) ", 1378 (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"),
1366 (dr < NUM_DRIVERBYTE_STRS ? driverbyte_table[dr] : "invalid"), 1379 (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid"),
1367 (su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid")); 1380 (su < NUM_SUGGEST_STRS ? driversuggest_table[su] : "invalid"));
1368} 1381}
1382
1369#else 1383#else
1370void scsi_print_driverbyte(int scsiresult) 1384
1385void scsi_show_result(int result)
1371{ 1386{
1372 printk("Driverbyte=0x%02x ", driver_byte(scsiresult)); 1387 printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n",
1388 host_byte(result), driver_byte(result));
1373} 1389}
1390
1374#endif 1391#endif
1392EXPORT_SYMBOL(scsi_show_result);
1393
1394
1395void scsi_print_result(struct scsi_cmnd *cmd)
1396{
1397 scmd_printk(KERN_INFO, cmd, "");
1398 scsi_show_result(cmd->result);
1399}
1400EXPORT_SYMBOL(scsi_print_result);
1401
1402
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 5a49216fe4cf..100b49baca7f 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -31,7 +31,7 @@
31 * Tunable parameters first 31 * Tunable parameters first
32 */ 32 */
33 33
34/* How many different OSM's are we allowing */ 34/* How many different OSM's are we allowing */
35#define MAX_I2O_MODULES 64 35#define MAX_I2O_MODULES 64
36 36
37#define I2O_EVT_CAPABILITY_OTHER 0x01 37#define I2O_EVT_CAPABILITY_OTHER 0x01
@@ -63,7 +63,7 @@ struct i2o_message
63 u16 size; 63 u16 size;
64 u32 target_tid:12; 64 u32 target_tid:12;
65 u32 init_tid:12; 65 u32 init_tid:12;
66 u32 function:8; 66 u32 function:8;
67 u32 initiator_context; 67 u32 initiator_context;
68 /* List follows */ 68 /* List follows */
69}; 69};
@@ -77,7 +77,7 @@ struct i2o_device
77 77
78 char dev_name[8]; /* linux /dev name if available */ 78 char dev_name[8]; /* linux /dev name if available */
79 i2o_lct_entry lct_data;/* Device LCT information */ 79 i2o_lct_entry lct_data;/* Device LCT information */
80 u32 flags; 80 u32 flags;
81 struct proc_dir_entry* proc_entry; /* /proc dir */ 81 struct proc_dir_entry* proc_entry; /* /proc dir */
82 struct adpt_device *owner; 82 struct adpt_device *owner;
83 struct _adpt_hba *controller; /* Controlling IOP */ 83 struct _adpt_hba *controller; /* Controlling IOP */
@@ -86,7 +86,7 @@ struct i2o_device
86/* 86/*
87 * Each I2O controller has one of these objects 87 * Each I2O controller has one of these objects
88 */ 88 */
89 89
90struct i2o_controller 90struct i2o_controller
91{ 91{
92 char name[16]; 92 char name[16];
@@ -111,9 +111,9 @@ struct i2o_sys_tbl_entry
111 u32 iop_id:12; 111 u32 iop_id:12;
112 u32 reserved2:20; 112 u32 reserved2:20;
113 u16 seg_num:12; 113 u16 seg_num:12;
114 u16 i2o_version:4; 114 u16 i2o_version:4;
115 u8 iop_state; 115 u8 iop_state;
116 u8 msg_type; 116 u8 msg_type;
117 u16 frame_size; 117 u16 frame_size;
118 u16 reserved3; 118 u16 reserved3;
119 u32 last_changed; 119 u32 last_changed;
@@ -124,14 +124,14 @@ struct i2o_sys_tbl_entry
124 124
125struct i2o_sys_tbl 125struct i2o_sys_tbl
126{ 126{
127 u8 num_entries; 127 u8 num_entries;
128 u8 version; 128 u8 version;
129 u16 reserved1; 129 u16 reserved1;
130 u32 change_ind; 130 u32 change_ind;
131 u32 reserved2; 131 u32 reserved2;
132 u32 reserved3; 132 u32 reserved3;
133 struct i2o_sys_tbl_entry iops[0]; 133 struct i2o_sys_tbl_entry iops[0];
134}; 134};
135 135
136/* 136/*
137 * I2O classes / subclasses 137 * I2O classes / subclasses
@@ -146,7 +146,7 @@ struct i2o_sys_tbl
146/* Class code names 146/* Class code names
147 * (from v1.5 Table 6-1 Class Code Assignments.) 147 * (from v1.5 Table 6-1 Class Code Assignments.)
148 */ 148 */
149 149
150#define I2O_CLASS_EXECUTIVE 0x000 150#define I2O_CLASS_EXECUTIVE 0x000
151#define I2O_CLASS_DDM 0x001 151#define I2O_CLASS_DDM 0x001
152#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010 152#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
@@ -166,7 +166,7 @@ struct i2o_sys_tbl
166 166
167/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes 167/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
168 */ 168 */
169 169
170#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff 170#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
171 171
172/* Subclasses 172/* Subclasses
@@ -175,7 +175,7 @@ struct i2o_sys_tbl
175#define I2O_SUBCLASS_i960 0x001 175#define I2O_SUBCLASS_i960 0x001
176#define I2O_SUBCLASS_HDM 0x020 176#define I2O_SUBCLASS_HDM 0x020
177#define I2O_SUBCLASS_ISM 0x021 177#define I2O_SUBCLASS_ISM 0x021
178 178
179/* Operation functions */ 179/* Operation functions */
180 180
181#define I2O_PARAMS_FIELD_GET 0x0001 181#define I2O_PARAMS_FIELD_GET 0x0001
@@ -219,7 +219,7 @@ struct i2o_sys_tbl
219/* 219/*
220 * Messaging API values 220 * Messaging API values
221 */ 221 */
222 222
223#define I2O_CMD_ADAPTER_ASSIGN 0xB3 223#define I2O_CMD_ADAPTER_ASSIGN 0xB3
224#define I2O_CMD_ADAPTER_READ 0xB2 224#define I2O_CMD_ADAPTER_READ 0xB2
225#define I2O_CMD_ADAPTER_RELEASE 0xB5 225#define I2O_CMD_ADAPTER_RELEASE 0xB5
@@ -284,16 +284,16 @@ struct i2o_sys_tbl
284#define I2O_PRIVATE_MSG 0xFF 284#define I2O_PRIVATE_MSG 0xFF
285 285
286/* 286/*
287 * Init Outbound Q status 287 * Init Outbound Q status
288 */ 288 */
289 289
290#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01 290#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
291#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02 291#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
292#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03 292#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
293#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04 293#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
294 294
295/* 295/*
296 * I2O Get Status State values 296 * I2O Get Status State values
297 */ 297 */
298 298
299#define ADAPTER_STATE_INITIALIZING 0x01 299#define ADAPTER_STATE_INITIALIZING 0x01
@@ -303,7 +303,7 @@ struct i2o_sys_tbl
303#define ADAPTER_STATE_OPERATIONAL 0x08 303#define ADAPTER_STATE_OPERATIONAL 0x08
304#define ADAPTER_STATE_FAILED 0x10 304#define ADAPTER_STATE_FAILED 0x10
305#define ADAPTER_STATE_FAULTED 0x11 305#define ADAPTER_STATE_FAULTED 0x11
306 306
307/* I2O API function return values */ 307/* I2O API function return values */
308 308
309#define I2O_RTN_NO_ERROR 0 309#define I2O_RTN_NO_ERROR 0
@@ -321,9 +321,9 @@ struct i2o_sys_tbl
321 321
322/* Reply message status defines for all messages */ 322/* Reply message status defines for all messages */
323 323
324#define I2O_REPLY_STATUS_SUCCESS 0x00 324#define I2O_REPLY_STATUS_SUCCESS 0x00
325#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 325#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
326#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 326#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
327#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 327#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
328#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 328#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
329#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 329#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
@@ -338,7 +338,7 @@ struct i2o_sys_tbl
338 338
339#define I2O_PARAMS_STATUS_SUCCESS 0x00 339#define I2O_PARAMS_STATUS_SUCCESS 0x00
340#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 340#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
341#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 341#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
342#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 342#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
343#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 343#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
344#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 344#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
@@ -390,7 +390,7 @@ struct i2o_sys_tbl
390#define I2O_CLAIM_MANAGEMENT 0x02000000 390#define I2O_CLAIM_MANAGEMENT 0x02000000
391#define I2O_CLAIM_AUTHORIZED 0x03000000 391#define I2O_CLAIM_AUTHORIZED 0x03000000
392#define I2O_CLAIM_SECONDARY 0x04000000 392#define I2O_CLAIM_SECONDARY 0x04000000
393 393
394/* Message header defines for VersionOffset */ 394/* Message header defines for VersionOffset */
395#define I2OVER15 0x0001 395#define I2OVER15 0x0001
396#define I2OVER20 0x0002 396#define I2OVER20 0x0002
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index 82d24864be0c..cc784e8f6e9d 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -99,7 +99,7 @@ typedef struct {
99 uCHAR eataVersion; /* EATA Version */ 99 uCHAR eataVersion; /* EATA Version */
100 uLONG cpLength; /* EATA Command Packet Length */ 100 uLONG cpLength; /* EATA Command Packet Length */
101 uLONG spLength; /* EATA Status Packet Length */ 101 uLONG spLength; /* EATA Status Packet Length */
102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */ 102 uCHAR drqNum; /* DRQ Index (0,5,6,7) */
103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */ 103 uCHAR flag1; /* EATA Flags 1 (Byte 9) */
104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */ 104 uCHAR flag2; /* EATA Flags 2 (Byte 30) */
105} CtrlInfo; 105} CtrlInfo;
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
index 4bf447792129..94bc894d1200 100644
--- a/drivers/scsi/dpt/dptsig.h
+++ b/drivers/scsi/dpt/dptsig.h
@@ -145,8 +145,8 @@ typedef unsigned long sigLONG;
145#define FT_LOGGER 12 /* Event Logger */ 145#define FT_LOGGER 12 /* Event Logger */
146#define FT_INSTALL 13 /* An Install Program */ 146#define FT_INSTALL 13 /* An Install Program */
147#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */ 147#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
148#define FT_RESOURCE 15 /* Storage Manager Resource File */ 148#define FT_RESOURCE 15 /* Storage Manager Resource File */
149#define FT_MODEM_DB 16 /* Storage Manager Modem Database */ 149#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
150 150
151/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */ 151/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
152/* ------------------------------------------------------------------ */ 152/* ------------------------------------------------------------------ */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index cd36e81b2d93..f7b9dbd64a96 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -195,8 +195,6 @@ static int adpt_detect(struct scsi_host_template* sht)
195 pci_dev_get(pDev); 195 pci_dev_get(pDev);
196 } 196 }
197 } 197 }
198 if (pDev)
199 pci_dev_put(pDev);
200 198
201 /* In INIT state, Activate IOPs */ 199 /* In INIT state, Activate IOPs */
202 for (pHba = hba_chain; pHba; pHba = pHba->next) { 200 for (pHba = hba_chain; pHba; pHba = pHba->next) {
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 635c14861f86..5016af5cf860 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -18,13 +18,6 @@
18 * Misc. definitions * 18 * Misc. definitions *
19 *********************************************/ 19 *********************************************/
20 20
21#ifndef TRUE
22#define TRUE 1
23#endif
24#ifndef FALSE
25#define FALSE 0
26#endif
27
28#define R_LIMIT 0x20000 21#define R_LIMIT 0x20000
29 22
30#define MAXISA 4 23#define MAXISA 4
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index fbc1d5c3b0a7..b10eefe735c5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -85,7 +85,7 @@
85static int max_id = 64; 85static int max_id = 64;
86static int max_channel = 3; 86static int max_channel = 3;
87static int init_timeout = 5; 87static int init_timeout = 5;
88static int max_requests = 50; 88static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
89 89
90#define IBMVSCSI_VERSION "1.5.8" 90#define IBMVSCSI_VERSION "1.5.8"
91 91
@@ -538,7 +538,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
538 int request_status; 538 int request_status;
539 int rc; 539 int rc;
540 540
541 /* If we have exhausted our request limit, just fail this request. 541 /* If we have exhausted our request limit, just fail this request,
542 * unless it is for a reset or abort.
542 * Note that there are rare cases involving driver generated requests 543 * Note that there are rare cases involving driver generated requests
543 * (such as task management requests) that the mid layer may think we 544 * (such as task management requests) that the mid layer may think we
544 * can handle more requests (can_queue) when we actually can't 545 * can handle more requests (can_queue) when we actually can't
@@ -551,9 +552,30 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
551 */ 552 */
552 if (request_status < -1) 553 if (request_status < -1)
553 goto send_error; 554 goto send_error;
554 /* Otherwise, if we have run out of requests */ 555 /* Otherwise, we may have run out of requests. */
555 else if (request_status < 0) 556 /* Abort and reset calls should make it through.
556 goto send_busy; 557 * Nothing except abort and reset should use the last two
558 * slots unless we had two or less to begin with.
559 */
560 else if (request_status < 2 &&
561 evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
562 /* In the case that we have less than two requests
563 * available, check the server limit as a combination
564 * of the request limit and the number of requests
565 * in-flight (the size of the send list). If the
566 * server limit is greater than 2, return busy so
567 * that the last two are reserved for reset and abort.
568 */
569 int server_limit = request_status;
570 struct srp_event_struct *tmp_evt;
571
572 list_for_each_entry(tmp_evt, &hostdata->sent, list) {
573 server_limit++;
574 }
575
576 if (server_limit > 2)
577 goto send_busy;
578 }
557 } 579 }
558 580
559 /* Copy the IU into the transfer area */ 581 /* Copy the IU into the transfer area */
@@ -572,6 +594,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
572 594
573 printk(KERN_ERR "ibmvscsi: send error %d\n", 595 printk(KERN_ERR "ibmvscsi: send error %d\n",
574 rc); 596 rc);
597 atomic_inc(&hostdata->request_limit);
575 goto send_error; 598 goto send_error;
576 } 599 }
577 600
@@ -581,7 +604,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 604 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
582 605
583 free_event_struct(&hostdata->pool, evt_struct); 606 free_event_struct(&hostdata->pool, evt_struct);
584 return SCSI_MLQUEUE_HOST_BUSY; 607 atomic_inc(&hostdata->request_limit);
608 return SCSI_MLQUEUE_HOST_BUSY;
585 609
586 send_error: 610 send_error:
587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 611 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
@@ -831,23 +855,16 @@ static void login_rsp(struct srp_event_struct *evt_struct)
831 855
832 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 856 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
833 857
834 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > 858 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
835 (max_requests - 2)) 859 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
836 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
837 max_requests - 2;
838 860
839 /* Now we know what the real request-limit is */ 861 /* Now we know what the real request-limit is.
862 * This value is set rather than added to request_limit because
863 * request_limit could have been set to -1 by this client.
864 */
840 atomic_set(&hostdata->request_limit, 865 atomic_set(&hostdata->request_limit,
841 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 866 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
842 867
843 hostdata->host->can_queue =
844 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
845
846 if (hostdata->host->can_queue < 1) {
847 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
848 return;
849 }
850
851 /* If we had any pending I/Os, kick them */ 868 /* If we had any pending I/Os, kick them */
852 scsi_unblock_requests(hostdata->host); 869 scsi_unblock_requests(hostdata->host);
853 870
@@ -1337,6 +1354,27 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1337 return rc; 1354 return rc;
1338} 1355}
1339 1356
1357/**
1358 * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
1359 * @sdev: struct scsi_device device to configure
1360 *
1361 * Enable allow_restart for a device if it is a disk. Adjust the
1362 * queue_depth here also as is required by the documentation for
1363 * struct scsi_host_template.
1364 */
1365static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1366{
1367 struct Scsi_Host *shost = sdev->host;
1368 unsigned long lock_flags = 0;
1369
1370 spin_lock_irqsave(shost->host_lock, lock_flags);
1371 if (sdev->type == TYPE_DISK)
1372 sdev->allow_restart = 1;
1373 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1374 spin_unlock_irqrestore(shost->host_lock, lock_flags);
1375 return 0;
1376}
1377
1340/* ------------------------------------------------------------ 1378/* ------------------------------------------------------------
1341 * sysfs attributes 1379 * sysfs attributes
1342 */ 1380 */
@@ -1482,8 +1520,9 @@ static struct scsi_host_template driver_template = {
1482 .queuecommand = ibmvscsi_queuecommand, 1520 .queuecommand = ibmvscsi_queuecommand,
1483 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1521 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1484 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1522 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1523 .slave_configure = ibmvscsi_slave_configure,
1485 .cmd_per_lun = 16, 1524 .cmd_per_lun = 16,
1486 .can_queue = 1, /* Updated after SRP_LOGIN */ 1525 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1487 .this_id = -1, 1526 .this_id = -1,
1488 .sg_tablesize = SG_ALL, 1527 .sg_tablesize = SG_ALL,
1489 .use_clustering = ENABLE_CLUSTERING, 1528 .use_clustering = ENABLE_CLUSTERING,
@@ -1503,6 +1542,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1503 1542
1504 vdev->dev.driver_data = NULL; 1543 vdev->dev.driver_data = NULL;
1505 1544
1545 driver_template.can_queue = max_requests;
1506 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1546 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1507 if (!host) { 1547 if (!host) {
1508 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1548 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 5c6d93582929..77cc1d40f5bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -44,6 +44,8 @@ struct Scsi_Host;
44 */ 44 */
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48
47/* ------------------------------------------------------------ 49/* ------------------------------------------------------------
48 * Data Structures 50 * Data Structures
49 */ 51 */
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index a39a478bb39a..6d223dd76440 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -35,7 +35,7 @@
35#include "ibmvscsi.h" 35#include "ibmvscsi.h"
36 36
37#define INITIAL_SRP_LIMIT 16 37#define INITIAL_SRP_LIMIT 16
38#define DEFAULT_MAX_SECTORS 512 38#define DEFAULT_MAX_SECTORS 256
39 39
40#define TGT_NAME "ibmvstgt" 40#define TGT_NAME "ibmvstgt"
41 41
@@ -248,8 +248,8 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
248 md[i].va + mdone); 248 md[i].va + mdone);
249 249
250 if (err != H_SUCCESS) { 250 if (err != H_SUCCESS) {
251 eprintk("rdma error %d %d\n", dir, slen); 251 eprintk("rdma error %d %d %ld\n", dir, slen, err);
252 goto out; 252 return -EIO;
253 } 253 }
254 254
255 mlen -= slen; 255 mlen -= slen;
@@ -265,45 +265,35 @@ static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
265 if (sidx > nsg) { 265 if (sidx > nsg) {
266 eprintk("out of sg %p %d %d\n", 266 eprintk("out of sg %p %d %d\n",
267 iue, sidx, nsg); 267 iue, sidx, nsg);
268 goto out; 268 return -EIO;
269 } 269 }
270 } 270 }
271 }; 271 };
272 272
273 rest -= mlen; 273 rest -= mlen;
274 } 274 }
275out:
276
277 return 0; 275 return 0;
278} 276}
279 277
280static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
281 void (*done)(struct scsi_cmnd *))
282{
283 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
284 int err;
285
286 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
287
288 done(sc);
289
290 return err;
291}
292
293static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, 278static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
294 void (*done)(struct scsi_cmnd *)) 279 void (*done)(struct scsi_cmnd *))
295{ 280{
296 unsigned long flags; 281 unsigned long flags;
297 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; 282 struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
298 struct srp_target *target = iue->target; 283 struct srp_target *target = iue->target;
284 int err = 0;
299 285
300 dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); 286 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
287 cmd->usg_sg);
288
289 if (sc->use_sg)
290 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
301 291
302 spin_lock_irqsave(&target->lock, flags); 292 spin_lock_irqsave(&target->lock, flags);
303 list_del(&iue->ilist); 293 list_del(&iue->ilist);
304 spin_unlock_irqrestore(&target->lock, flags); 294 spin_unlock_irqrestore(&target->lock, flags);
305 295
306 if (sc->result != SAM_STAT_GOOD) { 296 if (err|| sc->result != SAM_STAT_GOOD) {
307 eprintk("operation failed %p %d %x\n", 297 eprintk("operation failed %p %d %x\n",
308 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]); 298 iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
309 send_rsp(iue, sc, HARDWARE_ERROR, 0x00); 299 send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
@@ -503,7 +493,8 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
503{ 493{
504 struct vio_port *vport = target_to_port(target); 494 struct vio_port *vport = target_to_port(target);
505 struct iu_entry *iue; 495 struct iu_entry *iue;
506 long err, done; 496 long err;
497 int done = 1;
507 498
508 iue = srp_iu_get(target); 499 iue = srp_iu_get(target);
509 if (!iue) { 500 if (!iue) {
@@ -518,7 +509,6 @@ static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
518 509
519 if (err != H_SUCCESS) { 510 if (err != H_SUCCESS) {
520 eprintk("%ld transferring data error %p\n", err, iue); 511 eprintk("%ld transferring data error %p\n", err, iue);
521 done = 1;
522 goto out; 512 goto out;
523 } 513 }
524 514
@@ -794,7 +784,6 @@ static struct scsi_host_template ibmvstgt_sht = {
794 .use_clustering = DISABLE_CLUSTERING, 784 .use_clustering = DISABLE_CLUSTERING,
795 .max_sectors = DEFAULT_MAX_SECTORS, 785 .max_sectors = DEFAULT_MAX_SECTORS,
796 .transfer_response = ibmvstgt_cmd_done, 786 .transfer_response = ibmvstgt_cmd_done,
797 .transfer_data = ibmvstgt_transfer_data,
798 .eh_abort_handler = ibmvstgt_eh_abort_handler, 787 .eh_abort_handler = ibmvstgt_eh_abort_handler,
799 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response, 788 .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
800 .shost_attrs = ibmvstgt_attrs, 789 .shost_attrs = ibmvstgt_attrs,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e9bd29975db4..2c7b77e833f9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -89,10 +89,9 @@ static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
89static unsigned int ipr_max_speed = 1; 89static unsigned int ipr_max_speed = 1;
90static int ipr_testmode = 0; 90static int ipr_testmode = 0;
91static unsigned int ipr_fastfail = 0; 91static unsigned int ipr_fastfail = 0;
92static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; 92static unsigned int ipr_transop_timeout = 0;
93static unsigned int ipr_enable_cache = 1; 93static unsigned int ipr_enable_cache = 1;
94static unsigned int ipr_debug = 0; 94static unsigned int ipr_debug = 0;
95static int ipr_auto_create = 1;
96static DEFINE_SPINLOCK(ipr_driver_lock); 95static DEFINE_SPINLOCK(ipr_driver_lock);
97 96
98/* This table describes the differences between DMA controller chips */ 97/* This table describes the differences between DMA controller chips */
@@ -159,15 +158,13 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0);
159MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); 158MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
160module_param_named(debug, ipr_debug, int, 0); 159module_param_named(debug, ipr_debug, int, 0);
161MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 160MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
162module_param_named(auto_create, ipr_auto_create, int, 0);
163MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
164MODULE_LICENSE("GPL"); 161MODULE_LICENSE("GPL");
165MODULE_VERSION(IPR_DRIVER_VERSION); 162MODULE_VERSION(IPR_DRIVER_VERSION);
166 163
167/* A constant array of IOASCs/URCs/Error Messages */ 164/* A constant array of IOASCs/URCs/Error Messages */
168static const 165static const
169struct ipr_error_table_t ipr_error_table[] = { 166struct ipr_error_table_t ipr_error_table[] = {
170 {0x00000000, 1, 1, 167 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
171 "8155: An unknown error was received"}, 168 "8155: An unknown error was received"},
172 {0x00330000, 0, 0, 169 {0x00330000, 0, 0,
173 "Soft underlength error"}, 170 "Soft underlength error"},
@@ -175,37 +172,37 @@ struct ipr_error_table_t ipr_error_table[] = {
175 "Command to be cancelled not found"}, 172 "Command to be cancelled not found"},
176 {0x00808000, 0, 0, 173 {0x00808000, 0, 0,
177 "Qualified success"}, 174 "Qualified success"},
178 {0x01080000, 1, 1, 175 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
179 "FFFE: Soft device bus error recovered by the IOA"}, 176 "FFFE: Soft device bus error recovered by the IOA"},
180 {0x01088100, 0, 1, 177 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
181 "4101: Soft device bus fabric error"}, 178 "4101: Soft device bus fabric error"},
182 {0x01170600, 0, 1, 179 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
183 "FFF9: Device sector reassign successful"}, 180 "FFF9: Device sector reassign successful"},
184 {0x01170900, 0, 1, 181 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
185 "FFF7: Media error recovered by device rewrite procedures"}, 182 "FFF7: Media error recovered by device rewrite procedures"},
186 {0x01180200, 0, 1, 183 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
187 "7001: IOA sector reassignment successful"}, 184 "7001: IOA sector reassignment successful"},
188 {0x01180500, 0, 1, 185 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
189 "FFF9: Soft media error. Sector reassignment recommended"}, 186 "FFF9: Soft media error. Sector reassignment recommended"},
190 {0x01180600, 0, 1, 187 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
191 "FFF7: Media error recovered by IOA rewrite procedures"}, 188 "FFF7: Media error recovered by IOA rewrite procedures"},
192 {0x01418000, 0, 1, 189 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
193 "FF3D: Soft PCI bus error recovered by the IOA"}, 190 "FF3D: Soft PCI bus error recovered by the IOA"},
194 {0x01440000, 1, 1, 191 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
195 "FFF6: Device hardware error recovered by the IOA"}, 192 "FFF6: Device hardware error recovered by the IOA"},
196 {0x01448100, 0, 1, 193 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
197 "FFF6: Device hardware error recovered by the device"}, 194 "FFF6: Device hardware error recovered by the device"},
198 {0x01448200, 1, 1, 195 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
199 "FF3D: Soft IOA error recovered by the IOA"}, 196 "FF3D: Soft IOA error recovered by the IOA"},
200 {0x01448300, 0, 1, 197 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
201 "FFFA: Undefined device response recovered by the IOA"}, 198 "FFFA: Undefined device response recovered by the IOA"},
202 {0x014A0000, 1, 1, 199 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
203 "FFF6: Device bus error, message or command phase"}, 200 "FFF6: Device bus error, message or command phase"},
204 {0x014A8000, 0, 1, 201 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
205 "FFFE: Task Management Function failed"}, 202 "FFFE: Task Management Function failed"},
206 {0x015D0000, 0, 1, 203 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
207 "FFF6: Failure prediction threshold exceeded"}, 204 "FFF6: Failure prediction threshold exceeded"},
208 {0x015D9200, 0, 1, 205 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
209 "8009: Impending cache battery pack failure"}, 206 "8009: Impending cache battery pack failure"},
210 {0x02040400, 0, 0, 207 {0x02040400, 0, 0,
211 "34FF: Disk device format in progress"}, 208 "34FF: Disk device format in progress"},
@@ -215,85 +212,85 @@ struct ipr_error_table_t ipr_error_table[] = {
215 "No ready, IOA shutdown"}, 212 "No ready, IOA shutdown"},
216 {0x025A0000, 0, 0, 213 {0x025A0000, 0, 0,
217 "Not ready, IOA has been shutdown"}, 214 "Not ready, IOA has been shutdown"},
218 {0x02670100, 0, 1, 215 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
219 "3020: Storage subsystem configuration error"}, 216 "3020: Storage subsystem configuration error"},
220 {0x03110B00, 0, 0, 217 {0x03110B00, 0, 0,
221 "FFF5: Medium error, data unreadable, recommend reassign"}, 218 "FFF5: Medium error, data unreadable, recommend reassign"},
222 {0x03110C00, 0, 0, 219 {0x03110C00, 0, 0,
223 "7000: Medium error, data unreadable, do not reassign"}, 220 "7000: Medium error, data unreadable, do not reassign"},
224 {0x03310000, 0, 1, 221 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
225 "FFF3: Disk media format bad"}, 222 "FFF3: Disk media format bad"},
226 {0x04050000, 0, 1, 223 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
227 "3002: Addressed device failed to respond to selection"}, 224 "3002: Addressed device failed to respond to selection"},
228 {0x04080000, 1, 1, 225 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
229 "3100: Device bus error"}, 226 "3100: Device bus error"},
230 {0x04080100, 0, 1, 227 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
231 "3109: IOA timed out a device command"}, 228 "3109: IOA timed out a device command"},
232 {0x04088000, 0, 0, 229 {0x04088000, 0, 0,
233 "3120: SCSI bus is not operational"}, 230 "3120: SCSI bus is not operational"},
234 {0x04088100, 0, 1, 231 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
235 "4100: Hard device bus fabric error"}, 232 "4100: Hard device bus fabric error"},
236 {0x04118000, 0, 1, 233 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
237 "9000: IOA reserved area data check"}, 234 "9000: IOA reserved area data check"},
238 {0x04118100, 0, 1, 235 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
239 "9001: IOA reserved area invalid data pattern"}, 236 "9001: IOA reserved area invalid data pattern"},
240 {0x04118200, 0, 1, 237 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
241 "9002: IOA reserved area LRC error"}, 238 "9002: IOA reserved area LRC error"},
242 {0x04320000, 0, 1, 239 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "102E: Out of alternate sectors for disk storage"}, 240 "102E: Out of alternate sectors for disk storage"},
244 {0x04330000, 1, 1, 241 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
245 "FFF4: Data transfer underlength error"}, 242 "FFF4: Data transfer underlength error"},
246 {0x04338000, 1, 1, 243 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
247 "FFF4: Data transfer overlength error"}, 244 "FFF4: Data transfer overlength error"},
248 {0x043E0100, 0, 1, 245 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
249 "3400: Logical unit failure"}, 246 "3400: Logical unit failure"},
250 {0x04408500, 0, 1, 247 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
251 "FFF4: Device microcode is corrupt"}, 248 "FFF4: Device microcode is corrupt"},
252 {0x04418000, 1, 1, 249 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
253 "8150: PCI bus error"}, 250 "8150: PCI bus error"},
254 {0x04430000, 1, 0, 251 {0x04430000, 1, 0,
255 "Unsupported device bus message received"}, 252 "Unsupported device bus message received"},
256 {0x04440000, 1, 1, 253 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
257 "FFF4: Disk device problem"}, 254 "FFF4: Disk device problem"},
258 {0x04448200, 1, 1, 255 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
259 "8150: Permanent IOA failure"}, 256 "8150: Permanent IOA failure"},
260 {0x04448300, 0, 1, 257 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
261 "3010: Disk device returned wrong response to IOA"}, 258 "3010: Disk device returned wrong response to IOA"},
262 {0x04448400, 0, 1, 259 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
263 "8151: IOA microcode error"}, 260 "8151: IOA microcode error"},
264 {0x04448500, 0, 0, 261 {0x04448500, 0, 0,
265 "Device bus status error"}, 262 "Device bus status error"},
266 {0x04448600, 0, 1, 263 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
267 "8157: IOA error requiring IOA reset to recover"}, 264 "8157: IOA error requiring IOA reset to recover"},
268 {0x04448700, 0, 0, 265 {0x04448700, 0, 0,
269 "ATA device status error"}, 266 "ATA device status error"},
270 {0x04490000, 0, 0, 267 {0x04490000, 0, 0,
271 "Message reject received from the device"}, 268 "Message reject received from the device"},
272 {0x04449200, 0, 1, 269 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
273 "8008: A permanent cache battery pack failure occurred"}, 270 "8008: A permanent cache battery pack failure occurred"},
274 {0x0444A000, 0, 1, 271 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
275 "9090: Disk unit has been modified after the last known status"}, 272 "9090: Disk unit has been modified after the last known status"},
276 {0x0444A200, 0, 1, 273 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
277 "9081: IOA detected device error"}, 274 "9081: IOA detected device error"},
278 {0x0444A300, 0, 1, 275 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
279 "9082: IOA detected device error"}, 276 "9082: IOA detected device error"},
280 {0x044A0000, 1, 1, 277 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281 "3110: Device bus error, message or command phase"}, 278 "3110: Device bus error, message or command phase"},
282 {0x044A8000, 1, 1, 279 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
283 "3110: SAS Command / Task Management Function failed"}, 280 "3110: SAS Command / Task Management Function failed"},
284 {0x04670400, 0, 1, 281 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
285 "9091: Incorrect hardware configuration change has been detected"}, 282 "9091: Incorrect hardware configuration change has been detected"},
286 {0x04678000, 0, 1, 283 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
287 "9073: Invalid multi-adapter configuration"}, 284 "9073: Invalid multi-adapter configuration"},
288 {0x04678100, 0, 1, 285 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
289 "4010: Incorrect connection between cascaded expanders"}, 286 "4010: Incorrect connection between cascaded expanders"},
290 {0x04678200, 0, 1, 287 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
291 "4020: Connections exceed IOA design limits"}, 288 "4020: Connections exceed IOA design limits"},
292 {0x04678300, 0, 1, 289 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
293 "4030: Incorrect multipath connection"}, 290 "4030: Incorrect multipath connection"},
294 {0x04679000, 0, 1, 291 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
295 "4110: Unsupported enclosure function"}, 292 "4110: Unsupported enclosure function"},
296 {0x046E0000, 0, 1, 293 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "FFF4: Command to logical unit failed"}, 294 "FFF4: Command to logical unit failed"},
298 {0x05240000, 1, 0, 295 {0x05240000, 1, 0,
299 "Illegal request, invalid request type or request packet"}, 296 "Illegal request, invalid request type or request packet"},
@@ -313,101 +310,103 @@ struct ipr_error_table_t ipr_error_table[] = {
313 "Illegal request, command sequence error"}, 310 "Illegal request, command sequence error"},
314 {0x052C8000, 1, 0, 311 {0x052C8000, 1, 0,
315 "Illegal request, dual adapter support not enabled"}, 312 "Illegal request, dual adapter support not enabled"},
316 {0x06040500, 0, 1, 313 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
317 "9031: Array protection temporarily suspended, protection resuming"}, 314 "9031: Array protection temporarily suspended, protection resuming"},
318 {0x06040600, 0, 1, 315 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
319 "9040: Array protection temporarily suspended, protection resuming"}, 316 "9040: Array protection temporarily suspended, protection resuming"},
320 {0x06288000, 0, 1, 317 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
321 "3140: Device bus not ready to ready transition"}, 318 "3140: Device bus not ready to ready transition"},
322 {0x06290000, 0, 1, 319 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "FFFB: SCSI bus was reset"}, 320 "FFFB: SCSI bus was reset"},
324 {0x06290500, 0, 0, 321 {0x06290500, 0, 0,
325 "FFFE: SCSI bus transition to single ended"}, 322 "FFFE: SCSI bus transition to single ended"},
326 {0x06290600, 0, 0, 323 {0x06290600, 0, 0,
327 "FFFE: SCSI bus transition to LVD"}, 324 "FFFE: SCSI bus transition to LVD"},
328 {0x06298000, 0, 1, 325 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
329 "FFFB: SCSI bus was reset by another initiator"}, 326 "FFFB: SCSI bus was reset by another initiator"},
330 {0x063F0300, 0, 1, 327 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
331 "3029: A device replacement has occurred"}, 328 "3029: A device replacement has occurred"},
332 {0x064C8000, 0, 1, 329 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "9051: IOA cache data exists for a missing or failed device"}, 330 "9051: IOA cache data exists for a missing or failed device"},
334 {0x064C8100, 0, 1, 331 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, 332 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
336 {0x06670100, 0, 1, 333 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
337 "9025: Disk unit is not supported at its physical location"}, 334 "9025: Disk unit is not supported at its physical location"},
338 {0x06670600, 0, 1, 335 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
339 "3020: IOA detected a SCSI bus configuration error"}, 336 "3020: IOA detected a SCSI bus configuration error"},
340 {0x06678000, 0, 1, 337 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
341 "3150: SCSI bus configuration error"}, 338 "3150: SCSI bus configuration error"},
342 {0x06678100, 0, 1, 339 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
343 "9074: Asymmetric advanced function disk configuration"}, 340 "9074: Asymmetric advanced function disk configuration"},
344 {0x06678300, 0, 1, 341 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
345 "4040: Incomplete multipath connection between IOA and enclosure"}, 342 "4040: Incomplete multipath connection between IOA and enclosure"},
346 {0x06678400, 0, 1, 343 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
347 "4041: Incomplete multipath connection between enclosure and device"}, 344 "4041: Incomplete multipath connection between enclosure and device"},
348 {0x06678500, 0, 1, 345 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
349 "9075: Incomplete multipath connection between IOA and remote IOA"}, 346 "9075: Incomplete multipath connection between IOA and remote IOA"},
350 {0x06678600, 0, 1, 347 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
351 "9076: Configuration error, missing remote IOA"}, 348 "9076: Configuration error, missing remote IOA"},
352 {0x06679100, 0, 1, 349 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
353 "4050: Enclosure does not support a required multipath function"}, 350 "4050: Enclosure does not support a required multipath function"},
354 {0x06690200, 0, 1, 351 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
355 "9041: Array protection temporarily suspended"}, 352 "9041: Array protection temporarily suspended"},
356 {0x06698200, 0, 1, 353 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
357 "9042: Corrupt array parity detected on specified device"}, 354 "9042: Corrupt array parity detected on specified device"},
358 {0x066B0200, 0, 1, 355 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
359 "9030: Array no longer protected due to missing or failed disk unit"}, 356 "9030: Array no longer protected due to missing or failed disk unit"},
360 {0x066B8000, 0, 1, 357 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
361 "9071: Link operational transition"}, 358 "9071: Link operational transition"},
362 {0x066B8100, 0, 1, 359 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
363 "9072: Link not operational transition"}, 360 "9072: Link not operational transition"},
364 {0x066B8200, 0, 1, 361 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
365 "9032: Array exposed but still protected"}, 362 "9032: Array exposed but still protected"},
366 {0x066B9100, 0, 1, 363 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
364 "70DD: Device forced failed by disrupt device command"},
365 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
367 "4061: Multipath redundancy level got better"}, 366 "4061: Multipath redundancy level got better"},
368 {0x066B9200, 0, 1, 367 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
369 "4060: Multipath redundancy level got worse"}, 368 "4060: Multipath redundancy level got worse"},
370 {0x07270000, 0, 0, 369 {0x07270000, 0, 0,
371 "Failure due to other device"}, 370 "Failure due to other device"},
372 {0x07278000, 0, 1, 371 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
373 "9008: IOA does not support functions expected by devices"}, 372 "9008: IOA does not support functions expected by devices"},
374 {0x07278100, 0, 1, 373 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
375 "9010: Cache data associated with attached devices cannot be found"}, 374 "9010: Cache data associated with attached devices cannot be found"},
376 {0x07278200, 0, 1, 375 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9011: Cache data belongs to devices other than those attached"}, 376 "9011: Cache data belongs to devices other than those attached"},
378 {0x07278400, 0, 1, 377 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9020: Array missing 2 or more devices with only 1 device present"}, 378 "9020: Array missing 2 or more devices with only 1 device present"},
380 {0x07278500, 0, 1, 379 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9021: Array missing 2 or more devices with 2 or more devices present"}, 380 "9021: Array missing 2 or more devices with 2 or more devices present"},
382 {0x07278600, 0, 1, 381 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
383 "9022: Exposed array is missing a required device"}, 382 "9022: Exposed array is missing a required device"},
384 {0x07278700, 0, 1, 383 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
385 "9023: Array member(s) not at required physical locations"}, 384 "9023: Array member(s) not at required physical locations"},
386 {0x07278800, 0, 1, 385 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
387 "9024: Array not functional due to present hardware configuration"}, 386 "9024: Array not functional due to present hardware configuration"},
388 {0x07278900, 0, 1, 387 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
389 "9026: Array not functional due to present hardware configuration"}, 388 "9026: Array not functional due to present hardware configuration"},
390 {0x07278A00, 0, 1, 389 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
391 "9027: Array is missing a device and parity is out of sync"}, 390 "9027: Array is missing a device and parity is out of sync"},
392 {0x07278B00, 0, 1, 391 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
393 "9028: Maximum number of arrays already exist"}, 392 "9028: Maximum number of arrays already exist"},
394 {0x07278C00, 0, 1, 393 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
395 "9050: Required cache data cannot be located for a disk unit"}, 394 "9050: Required cache data cannot be located for a disk unit"},
396 {0x07278D00, 0, 1, 395 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
397 "9052: Cache data exists for a device that has been modified"}, 396 "9052: Cache data exists for a device that has been modified"},
398 {0x07278F00, 0, 1, 397 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
399 "9054: IOA resources not available due to previous problems"}, 398 "9054: IOA resources not available due to previous problems"},
400 {0x07279100, 0, 1, 399 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
401 "9092: Disk unit requires initialization before use"}, 400 "9092: Disk unit requires initialization before use"},
402 {0x07279200, 0, 1, 401 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
403 "9029: Incorrect hardware configuration change has been detected"}, 402 "9029: Incorrect hardware configuration change has been detected"},
404 {0x07279600, 0, 1, 403 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
405 "9060: One or more disk pairs are missing from an array"}, 404 "9060: One or more disk pairs are missing from an array"},
406 {0x07279700, 0, 1, 405 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
407 "9061: One or more disks are missing from an array"}, 406 "9061: One or more disks are missing from an array"},
408 {0x07279800, 0, 1, 407 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
409 "9062: One or more disks are missing from an array"}, 408 "9062: One or more disks are missing from an array"},
410 {0x07279900, 0, 1, 409 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
411 "9063: Maximum number of functional arrays has been exceeded"}, 410 "9063: Maximum number of functional arrays has been exceeded"},
412 {0x0B260000, 0, 0, 411 {0x0B260000, 0, 0,
413 "Aborted command, invalid descriptor"}, 412 "Aborted command, invalid descriptor"},
@@ -481,12 +480,16 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
481{ 480{
482 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 481 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
483 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 482 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
483 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
484 484
485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 485 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
486 ioarcb->write_data_transfer_length = 0; 486 ioarcb->write_data_transfer_length = 0;
487 ioarcb->read_data_transfer_length = 0; 487 ioarcb->read_data_transfer_length = 0;
488 ioarcb->write_ioadl_len = 0; 488 ioarcb->write_ioadl_len = 0;
489 ioarcb->read_ioadl_len = 0; 489 ioarcb->read_ioadl_len = 0;
490 ioarcb->write_ioadl_addr =
491 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
492 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
490 ioasa->ioasc = 0; 493 ioasa->ioasc = 0;
491 ioasa->residual_data_len = 0; 494 ioasa->residual_data_len = 0;
492 ioasa->u.gata.status = 0; 495 ioasa->u.gata.status = 0;
@@ -1610,7 +1613,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1610 /* Set indication we have logged an error */ 1613 /* Set indication we have logged an error */
1611 ioa_cfg->errors_logged++; 1614 ioa_cfg->errors_logged++;
1612 1615
1613 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 1616 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1614 return; 1617 return;
1615 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) 1618 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1616 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); 1619 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
@@ -3850,6 +3853,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3850 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 3853 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3851 if (ipr_cmd->scsi_cmd) 3854 if (ipr_cmd->scsi_cmd)
3852 ipr_cmd->done = ipr_scsi_eh_done; 3855 ipr_cmd->done = ipr_scsi_eh_done;
3856 if (ipr_cmd->qc)
3857 ipr_cmd->done = ipr_sata_eh_done;
3853 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 3858 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
3854 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 3859 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
3855 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 3860 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
@@ -4230,6 +4235,14 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4230 4235
4231 sglist = scsi_cmd->request_buffer; 4236 sglist = scsi_cmd->request_buffer;
4232 4237
4238 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4239 ioadl = ioarcb->add_data.u.ioadl;
4240 ioarcb->write_ioadl_addr =
4241 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4242 offsetof(struct ipr_ioarcb, add_data));
4243 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4244 }
4245
4233 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4246 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4234 ioadl[i].flags_and_data_len = 4247 ioadl[i].flags_and_data_len =
4235 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4248 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
@@ -4260,6 +4273,11 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4260 scsi_cmd->sc_data_direction); 4273 scsi_cmd->sc_data_direction);
4261 4274
4262 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { 4275 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4276 ioadl = ioarcb->add_data.u.ioadl;
4277 ioarcb->write_ioadl_addr =
4278 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4279 offsetof(struct ipr_ioarcb, add_data));
4280 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4263 ipr_cmd->dma_use_sg = 1; 4281 ipr_cmd->dma_use_sg = 1;
4264 ioadl[0].flags_and_data_len = 4282 ioadl[0].flags_and_data_len =
4265 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); 4283 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
@@ -4346,11 +4364,9 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4346 **/ 4364 **/
4347static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 4365static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4348{ 4366{
4349 struct ipr_ioarcb *ioarcb; 4367 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4350 struct ipr_ioasa *ioasa; 4368 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4351 4369 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
4352 ioarcb = &ipr_cmd->ioarcb;
4353 ioasa = &ipr_cmd->ioasa;
4354 4370
4355 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 4371 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4356 ioarcb->write_data_transfer_length = 0; 4372 ioarcb->write_data_transfer_length = 0;
@@ -4359,6 +4375,9 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4359 ioarcb->read_ioadl_len = 0; 4375 ioarcb->read_ioadl_len = 0;
4360 ioasa->ioasc = 0; 4376 ioasa->ioasc = 0;
4361 ioasa->residual_data_len = 0; 4377 ioasa->residual_data_len = 0;
4378 ioarcb->write_ioadl_addr =
4379 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
4380 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4362} 4381}
4363 4382
4364/** 4383/**
@@ -4457,12 +4476,13 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4457{ 4476{
4458 int i; 4477 int i;
4459 u16 data_len; 4478 u16 data_len;
4460 u32 ioasc; 4479 u32 ioasc, fd_ioasc;
4461 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 4480 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4462 __be32 *ioasa_data = (__be32 *)ioasa; 4481 __be32 *ioasa_data = (__be32 *)ioasa;
4463 int error_index; 4482 int error_index;
4464 4483
4465 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; 4484 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
4485 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
4466 4486
4467 if (0 == ioasc) 4487 if (0 == ioasc)
4468 return; 4488 return;
@@ -4470,13 +4490,19 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
4470 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) 4490 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
4471 return; 4491 return;
4472 4492
4473 error_index = ipr_get_error(ioasc); 4493 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
4494 error_index = ipr_get_error(fd_ioasc);
4495 else
4496 error_index = ipr_get_error(ioasc);
4474 4497
4475 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 4498 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
4476 /* Don't log an error if the IOA already logged one */ 4499 /* Don't log an error if the IOA already logged one */
4477 if (ioasa->ilid != 0) 4500 if (ioasa->ilid != 0)
4478 return; 4501 return;
4479 4502
4503 if (!ipr_is_gscsi(res))
4504 return;
4505
4480 if (ipr_error_table[error_index].log_ioasa == 0) 4506 if (ipr_error_table[error_index].log_ioasa == 0)
4481 return; 4507 return;
4482 } 4508 }
@@ -4636,11 +4662,11 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4636 return; 4662 return;
4637 } 4663 }
4638 4664
4639 if (ipr_is_gscsi(res)) 4665 if (!ipr_is_gscsi(res))
4640 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4641 else
4642 ipr_gen_sense(ipr_cmd); 4666 ipr_gen_sense(ipr_cmd);
4643 4667
4668 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4669
4644 switch (ioasc & IPR_IOASC_IOASC_MASK) { 4670 switch (ioasc & IPR_IOASC_IOASC_MASK) {
4645 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: 4671 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4646 if (ipr_is_naca_model(res)) 4672 if (ipr_is_naca_model(res))
@@ -5121,7 +5147,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5121 struct ipr_ioarcb_ata_regs *regs; 5147 struct ipr_ioarcb_ata_regs *regs;
5122 5148
5123 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 5149 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5124 return -EIO; 5150 return AC_ERR_SYSTEM;
5125 5151
5126 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5152 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5127 ioarcb = &ipr_cmd->ioarcb; 5153 ioarcb = &ipr_cmd->ioarcb;
@@ -5166,7 +5192,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5166 5192
5167 default: 5193 default:
5168 WARN_ON(1); 5194 WARN_ON(1);
5169 return -1; 5195 return AC_ERR_INVALID;
5170 } 5196 }
5171 5197
5172 mb(); 5198 mb();
@@ -6188,7 +6214,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6188 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 6214 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6189 6215
6190 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 6216 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6191 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ); 6217 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6192 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 6218 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
6193 ipr_cmd->done = ipr_reset_ioa_job; 6219 ipr_cmd->done = ipr_reset_ioa_job;
6194 add_timer(&ipr_cmd->timer); 6220 add_timer(&ipr_cmd->timer);
@@ -6385,6 +6411,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
6385 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); 6411 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
6386 6412
6387 if (rc != PCIBIOS_SUCCESSFUL) { 6413 if (rc != PCIBIOS_SUCCESSFUL) {
6414 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
6388 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 6415 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
6389 rc = IPR_RC_JOB_CONTINUE; 6416 rc = IPR_RC_JOB_CONTINUE;
6390 } else { 6417 } else {
@@ -7117,8 +7144,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7117 ioa_cfg->pdev = pdev; 7144 ioa_cfg->pdev = pdev;
7118 ioa_cfg->log_level = ipr_log_level; 7145 ioa_cfg->log_level = ipr_log_level;
7119 ioa_cfg->doorbell = IPR_DOORBELL; 7146 ioa_cfg->doorbell = IPR_DOORBELL;
7120 if (!ipr_auto_create)
7121 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7122 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 7147 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
7123 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 7148 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
7124 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); 7149 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
@@ -7233,6 +7258,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7233 goto out_scsi_host_put; 7258 goto out_scsi_host_put;
7234 } 7259 }
7235 7260
7261 if (ipr_transop_timeout)
7262 ioa_cfg->transop_timeout = ipr_transop_timeout;
7263 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
7264 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
7265 else
7266 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
7267
7236 ipr_regs_pci = pci_resource_start(pdev, 0); 7268 ipr_regs_pci = pci_resource_start(pdev, 0);
7237 7269
7238 rc = pci_request_regions(pdev, IPR_NAME); 7270 rc = pci_request_regions(pdev, IPR_NAME);
@@ -7540,29 +7572,45 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7540 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7572 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7541 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 }, 7573 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
7542 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, 7574 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
7543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 }, 7575 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
7576 IPR_USE_LONG_TRANSOP_TIMEOUT },
7544 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7577 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7545 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7578 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7546 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7579 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7580 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7548 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, 7581 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
7549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, 7582 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7583 IPR_USE_LONG_TRANSOP_TIMEOUT },
7550 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7584 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7551 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, 7585 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
7552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7586 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, 7587 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
7554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 7588 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
7555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 }, 7589 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
7590 IPR_USE_LONG_TRANSOP_TIMEOUT },
7591 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7592 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 },
7593 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7594 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7595 IPR_USE_LONG_TRANSOP_TIMEOUT },
7596 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7597 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7556 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 7598 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7557 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 }, 7599 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
7600 IPR_USE_LONG_TRANSOP_TIMEOUT },
7558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, 7601 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
7559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, 7602 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
7560 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7603 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 }, 7604 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
7562 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7605 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7563 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 }, 7606 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
7607 IPR_USE_LONG_TRANSOP_TIMEOUT },
7564 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 7608 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 }, 7609 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7610 IPR_USE_LONG_TRANSOP_TIMEOUT },
7611 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
7612 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
7613 IPR_USE_LONG_TRANSOP_TIMEOUT },
7566 { } 7614 { }
7567}; 7615};
7568MODULE_DEVICE_TABLE(pci, ipr_pci_table); 7616MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 88f285de97bb..bc53d7cebe0a 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.3.1" 40#define IPR_DRIVER_VERSION "2.3.2"
41#define IPR_DRIVER_DATE "(January 23, 2007)" 41#define IPR_DRIVER_DATE "(March 23, 2007)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,6 +55,7 @@
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A
58 59
59#define IPR_SUBS_DEV_ID_2780 0x0264 60#define IPR_SUBS_DEV_ID_2780 0x0264
60#define IPR_SUBS_DEV_ID_5702 0x0266 61#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -69,8 +70,12 @@
69#define IPR_SUBS_DEV_ID_572A 0x02C1 70#define IPR_SUBS_DEV_ID_572A 0x02C1
70#define IPR_SUBS_DEV_ID_572B 0x02C2 71#define IPR_SUBS_DEV_ID_572B 0x02C2
71#define IPR_SUBS_DEV_ID_572F 0x02C3 72#define IPR_SUBS_DEV_ID_572F 0x02C3
73#define IPR_SUBS_DEV_ID_574D 0x030B
74#define IPR_SUBS_DEV_ID_574E 0x030A
72#define IPR_SUBS_DEV_ID_575B 0x030D 75#define IPR_SUBS_DEV_ID_575B 0x030D
73#define IPR_SUBS_DEV_ID_575C 0x0338 76#define IPR_SUBS_DEV_ID_575C 0x0338
77#define IPR_SUBS_DEV_ID_575D 0x033E
78#define IPR_SUBS_DEV_ID_57B3 0x033A
74#define IPR_SUBS_DEV_ID_57B7 0x0360 79#define IPR_SUBS_DEV_ID_57B7 0x0360
75#define IPR_SUBS_DEV_ID_57B8 0x02C2 80#define IPR_SUBS_DEV_ID_57B8 0x02C2
76 81
@@ -104,6 +109,9 @@
104#define IPR_IOASC_IOA_WAS_RESET 0x10000001 109#define IPR_IOASC_IOA_WAS_RESET 0x10000001
105#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 110#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
106 111
112/* Driver data flags */
113#define IPR_USE_LONG_TRANSOP_TIMEOUT 0x00000001
114
107#define IPR_DEFAULT_MAX_ERROR_DUMP 984 115#define IPR_DEFAULT_MAX_ERROR_DUMP 984
108#define IPR_NUM_LOG_HCAMS 2 116#define IPR_NUM_LOG_HCAMS 2
109#define IPR_NUM_CFG_CHG_HCAMS 2 117#define IPR_NUM_CFG_CHG_HCAMS 2
@@ -179,6 +187,7 @@
179#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) 187#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
180#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) 188#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
181#define IPR_OPERATIONAL_TIMEOUT (5 * 60) 189#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
190#define IPR_LONG_OPERATIONAL_TIMEOUT (12 * 60)
182#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) 191#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ)
183#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) 192#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
184#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) 193#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
@@ -413,9 +422,25 @@ struct ipr_ioarcb_ata_regs {
413 u8 ctl; 422 u8 ctl;
414}__attribute__ ((packed, aligned(4))); 423}__attribute__ ((packed, aligned(4)));
415 424
425struct ipr_ioadl_desc {
426 __be32 flags_and_data_len;
427#define IPR_IOADL_FLAGS_MASK 0xff000000
428#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
429#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
430#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
431#define IPR_IOADL_FLAGS_READ 0x48000000
432#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
433#define IPR_IOADL_FLAGS_WRITE 0x68000000
434#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
435#define IPR_IOADL_FLAGS_LAST 0x01000000
436
437 __be32 address;
438}__attribute__((packed, aligned (8)));
439
416struct ipr_ioarcb_add_data { 440struct ipr_ioarcb_add_data {
417 union { 441 union {
418 struct ipr_ioarcb_ata_regs regs; 442 struct ipr_ioarcb_ata_regs regs;
443 struct ipr_ioadl_desc ioadl[5];
419 __be32 add_cmd_parms[10]; 444 __be32 add_cmd_parms[10];
420 }u; 445 }u;
421}__attribute__ ((packed, aligned(4))); 446}__attribute__ ((packed, aligned(4)));
@@ -447,21 +472,6 @@ struct ipr_ioarcb {
447 struct ipr_ioarcb_add_data add_data; 472 struct ipr_ioarcb_add_data add_data;
448}__attribute__((packed, aligned (4))); 473}__attribute__((packed, aligned (4)));
449 474
450struct ipr_ioadl_desc {
451 __be32 flags_and_data_len;
452#define IPR_IOADL_FLAGS_MASK 0xff000000
453#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
454#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff
455#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
456#define IPR_IOADL_FLAGS_READ 0x48000000
457#define IPR_IOADL_FLAGS_READ_LAST 0x49000000
458#define IPR_IOADL_FLAGS_WRITE 0x68000000
459#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000
460#define IPR_IOADL_FLAGS_LAST 0x01000000
461
462 __be32 address;
463}__attribute__((packed, aligned (8)));
464
465struct ipr_ioasa_vset { 475struct ipr_ioasa_vset {
466 __be32 failing_lba_hi; 476 __be32 failing_lba_hi;
467 __be32 failing_lba_lo; 477 __be32 failing_lba_lo;
@@ -1119,6 +1129,7 @@ struct ipr_ioa_cfg {
1119 1129
1120 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; 1130 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
1121 1131
1132 unsigned int transop_timeout;
1122 const struct ipr_chip_cfg_t *chip_cfg; 1133 const struct ipr_chip_cfg_t *chip_cfg;
1123 1134
1124 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */ 1135 void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8f55e1431433..c9a3abf9e7b6 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -527,12 +527,12 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
527 * than 8K, but there are no targets that currently do this. 527 * than 8K, but there are no targets that currently do this.
528 * For now we fail until we find a vendor that needs it 528 * For now we fail until we find a vendor that needs it
529 */ 529 */
530 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < 530 if (ISCSI_DEF_MAX_RECV_SEG_LEN <
531 tcp_conn->in.datalen) { 531 tcp_conn->in.datalen) {
532 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 532 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
533 "but conn buffer is only %u (opcode %0x)\n", 533 "but conn buffer is only %u (opcode %0x)\n",
534 tcp_conn->in.datalen, 534 tcp_conn->in.datalen,
535 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); 535 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
536 rc = ISCSI_ERR_PROTO; 536 rc = ISCSI_ERR_PROTO;
537 break; 537 break;
538 } 538 }
@@ -1762,7 +1762,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1762 * due to strange issues with iser these are not set 1762 * due to strange issues with iser these are not set
1763 * in iscsi_conn_setup 1763 * in iscsi_conn_setup
1764 */ 1764 */
1765 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 1765 conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1766 1766
1767 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); 1767 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
1768 if (!tcp_conn) 1768 if (!tcp_conn)
@@ -1777,14 +1777,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1777 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1777 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1778 CRYPTO_ALG_ASYNC); 1778 CRYPTO_ALG_ASYNC);
1779 tcp_conn->tx_hash.flags = 0; 1779 tcp_conn->tx_hash.flags = 0;
1780 if (IS_ERR(tcp_conn->tx_hash.tfm)) 1780 if (IS_ERR(tcp_conn->tx_hash.tfm)) {
1781 printk(KERN_ERR "Could not create connection due to crc32c "
1782 "loading error %ld. Make sure the crc32c module is "
1783 "built as a module or into the kernel\n",
1784 PTR_ERR(tcp_conn->tx_hash.tfm));
1781 goto free_tcp_conn; 1785 goto free_tcp_conn;
1786 }
1782 1787
1783 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1788 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1784 CRYPTO_ALG_ASYNC); 1789 CRYPTO_ALG_ASYNC);
1785 tcp_conn->rx_hash.flags = 0; 1790 tcp_conn->rx_hash.flags = 0;
1786 if (IS_ERR(tcp_conn->rx_hash.tfm)) 1791 if (IS_ERR(tcp_conn->rx_hash.tfm)) {
1792 printk(KERN_ERR "Could not create connection due to crc32c "
1793 "loading error %ld. Make sure the crc32c module is "
1794 "built as a module or into the kernel\n",
1795 PTR_ERR(tcp_conn->rx_hash.tfm));
1787 goto free_tx_tfm; 1796 goto free_tx_tfm;
1797 }
1788 1798
1789 return cls_conn; 1799 return cls_conn;
1790 1800
@@ -2138,6 +2148,7 @@ static struct scsi_host_template iscsi_sht = {
2138 .change_queue_depth = iscsi_change_queue_depth, 2148 .change_queue_depth = iscsi_change_queue_depth,
2139 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2149 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
2140 .sg_tablesize = ISCSI_SG_TABLESIZE, 2150 .sg_tablesize = ISCSI_SG_TABLESIZE,
2151 .max_sectors = 0xFFFF,
2141 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 2152 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
2142 .eh_abort_handler = iscsi_eh_abort, 2153 .eh_abort_handler = iscsi_eh_abort,
2143 .eh_host_reset_handler = iscsi_eh_host_reset, 2154 .eh_host_reset_handler = iscsi_eh_host_reset,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7c75771c77ff..3f5b9b445b29 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -25,6 +25,7 @@
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/kfifo.h> 26#include <linux/kfifo.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <asm/unaligned.h>
28#include <net/tcp.h> 29#include <net/tcp.h>
29#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -269,14 +270,14 @@ invalid_datalen:
269 goto out; 270 goto out;
270 } 271 }
271 272
272 senselen = be16_to_cpu(*(__be16 *)data); 273 senselen = be16_to_cpu(get_unaligned((__be16 *) data));
273 if (datalen < senselen) 274 if (datalen < senselen)
274 goto invalid_datalen; 275 goto invalid_datalen;
275 276
276 memcpy(sc->sense_buffer, data + 2, 277 memcpy(sc->sense_buffer, data + 2,
277 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 278 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
278 debug_scsi("copied %d bytes of sense\n", 279 debug_scsi("copied %d bytes of sense\n",
279 min(senselen, SCSI_SENSE_BUFFERSIZE)); 280 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
280 } 281 }
281 282
282 if (sc->sc_data_direction == DMA_TO_DEVICE) 283 if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -577,7 +578,7 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
577} 578}
578EXPORT_SYMBOL_GPL(iscsi_conn_failure); 579EXPORT_SYMBOL_GPL(iscsi_conn_failure);
579 580
580static int iscsi_xmit_imm_task(struct iscsi_conn *conn) 581static int iscsi_xmit_mtask(struct iscsi_conn *conn)
581{ 582{
582 struct iscsi_hdr *hdr = conn->mtask->hdr; 583 struct iscsi_hdr *hdr = conn->mtask->hdr;
583 int rc, was_logout = 0; 584 int rc, was_logout = 0;
@@ -591,6 +592,9 @@ static int iscsi_xmit_imm_task(struct iscsi_conn *conn)
591 if (rc) 592 if (rc)
592 return rc; 593 return rc;
593 594
595 /* done with this in-progress mtask */
596 conn->mtask = NULL;
597
594 if (was_logout) { 598 if (was_logout) {
595 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
596 return -ENODATA; 600 return -ENODATA;
@@ -643,11 +647,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
643 conn->ctask = NULL; 647 conn->ctask = NULL;
644 } 648 }
645 if (conn->mtask) { 649 if (conn->mtask) {
646 rc = iscsi_xmit_imm_task(conn); 650 rc = iscsi_xmit_mtask(conn);
647 if (rc) 651 if (rc)
648 goto again; 652 goto again;
649 /* done with this in-progress mtask */
650 conn->mtask = NULL;
651 } 653 }
652 654
653 /* process immediate first */ 655 /* process immediate first */
@@ -658,12 +660,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
658 list_add_tail(&conn->mtask->running, 660 list_add_tail(&conn->mtask->running,
659 &conn->mgmt_run_list); 661 &conn->mgmt_run_list);
660 spin_unlock_bh(&conn->session->lock); 662 spin_unlock_bh(&conn->session->lock);
661 rc = iscsi_xmit_imm_task(conn); 663 rc = iscsi_xmit_mtask(conn);
662 if (rc) 664 if (rc)
663 goto again; 665 goto again;
664 } 666 }
665 /* done with this mtask */
666 conn->mtask = NULL;
667 } 667 }
668 668
669 /* process command queue */ 669 /* process command queue */
@@ -701,12 +701,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
701 list_add_tail(&conn->mtask->running, 701 list_add_tail(&conn->mtask->running,
702 &conn->mgmt_run_list); 702 &conn->mgmt_run_list);
703 spin_unlock_bh(&conn->session->lock); 703 spin_unlock_bh(&conn->session->lock);
704 rc = tt->xmit_mgmt_task(conn, conn->mtask); 704 rc = iscsi_xmit_mtask(conn);
705 if (rc) 705 if (rc)
706 goto again; 706 goto again;
707 } 707 }
708 /* done with this mtask */
709 conn->mtask = NULL;
710 } 708 }
711 709
712 return -ENODATA; 710 return -ENODATA;
@@ -1523,7 +1521,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1523 } 1521 }
1524 spin_unlock_bh(&session->lock); 1522 spin_unlock_bh(&session->lock);
1525 1523
1526 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1524 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1527 if (!data) 1525 if (!data)
1528 goto login_mtask_data_alloc_fail; 1526 goto login_mtask_data_alloc_fail;
1529 conn->login_mtask->data = conn->data = data; 1527 conn->login_mtask->data = conn->data = data;
@@ -1597,6 +1595,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1597 wake_up(&conn->ehwait); 1595 wake_up(&conn->ehwait);
1598 } 1596 }
1599 1597
1598 /* flush queued up work because we free the connection below */
1599 scsi_flush_work(session->host);
1600
1600 spin_lock_bh(&session->lock); 1601 spin_lock_bh(&session->lock);
1601 kfree(conn->data); 1602 kfree(conn->data);
1602 kfree(conn->persistent_address); 1603 kfree(conn->persistent_address);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 6335830df810..5631c199a8eb 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -224,8 +224,7 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
224 struct srp_direct_buf *md = NULL; 224 struct srp_direct_buf *md = NULL;
225 struct scatterlist dummy, *sg = NULL; 225 struct scatterlist dummy, *sg = NULL;
226 dma_addr_t token = 0; 226 dma_addr_t token = 0;
227 long err; 227 int err = 0;
228 unsigned int done = 0;
229 int nmd, nsg = 0, len; 228 int nmd, nsg = 0, len;
230 229
231 if (dma_map || ext_desc) { 230 if (dma_map || ext_desc) {
@@ -257,8 +256,8 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
257 sg_dma_address(&dummy) = token; 256 sg_dma_address(&dummy) = token;
258 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, 257 err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
259 id->table_desc.len); 258 id->table_desc.len);
260 if (err < 0) { 259 if (err) {
261 eprintk("Error copying indirect table %ld\n", err); 260 eprintk("Error copying indirect table %d\n", err);
262 goto free_mem; 261 goto free_mem;
263 } 262 }
264 } else { 263 } else {
@@ -271,6 +270,7 @@ rdma:
271 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL); 270 nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
272 if (!nsg) { 271 if (!nsg) {
273 eprintk("fail to map %p %d\n", iue, sc->use_sg); 272 eprintk("fail to map %p %d\n", iue, sc->use_sg);
273 err = -EIO;
274 goto free_mem; 274 goto free_mem;
275 } 275 }
276 len = min(sc->request_bufflen, id->len); 276 len = min(sc->request_bufflen, id->len);
@@ -286,7 +286,7 @@ free_mem:
286 if (token && dma_map) 286 if (token && dma_map)
287 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token); 287 dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
288 288
289 return done; 289 return err;
290} 290}
291 291
292static int data_out_desc_size(struct srp_cmd *cmd) 292static int data_out_desc_size(struct srp_cmd *cmd)
@@ -351,7 +351,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
351 break; 351 break;
352 default: 352 default:
353 eprintk("Unknown format %d %x\n", dir, format); 353 eprintk("Unknown format %d %x\n", dir, format);
354 break; 354 err = -EINVAL;
355 } 355 }
356 356
357 return err; 357 return err;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 057fd7e0e379..dcf6106f557a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -671,7 +671,7 @@ static int
671lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 671lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
672{ 672{
673 uint8_t lenlo, lenhi; 673 uint8_t lenlo, lenhi;
674 uint32_t Length; 674 int Length;
675 int i, j; 675 int i, j;
676 int finished = 0; 676 int finished = 0;
677 int index = 0; 677 int index = 0;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index a967fadb7439..08060fb478b6 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -87,6 +87,7 @@ MODULE_AUTHOR("Willem Riede");
87MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver"); 87MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR); 89MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
90MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
90 91
91module_param(max_dev, int, 0444); 92module_param(max_dev, int, 0444);
92MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)"); 93MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
diff --git a/drivers/scsi/pci2000.h b/drivers/scsi/pci2000.h
deleted file mode 100644
index 0ebd8ce9e1de..000000000000
--- a/drivers/scsi/pci2000.h
+++ /dev/null
@@ -1,197 +0,0 @@
1/****************************************************************************
2 * Perceptive Solutions, Inc. PCI-2000 device driver for Linux.
3 *
4 * pci2000.h - Linux Host Driver for PCI-2000 IntelliCache SCSI Adapters
5 *
6 * Copyright (c) 1997-1999 Perceptive Solutions, Inc.
7 * All Rights Reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that redistributions of source
11 * code retain the above copyright notice and this comment without
12 * modification.
13 *
14 * Technical updates and product information at:
15 * http://www.psidisk.com
16 *
17 * Please send questions, comments, bug reports to:
18 * tech@psidisk.com Technical Support
19 *
20 ****************************************************************************/
21#ifndef _PCI2000_H
22#define _PCI2000_H
23
24#include <linux/types.h>
25
26#ifndef PSI_EIDE_SCSIOP
27#define PSI_EIDE_SCSIOP 1
28
29#define LINUXVERSION(v,p,s) (((v)<<16) + ((p)<<8) + (s))
30
31/************************************************/
32/* definition of standard data types */
33/************************************************/
34#define CHAR char
35#define UCHAR unsigned char
36#define SHORT short
37#define USHORT unsigned short
38#define BOOL long
39#define LONG long
40#define ULONG unsigned long
41#define VOID void
42
43typedef CHAR *PCHAR;
44typedef UCHAR *PUCHAR;
45typedef SHORT *PSHORT;
46typedef USHORT *PUSHORT;
47typedef BOOL *PBOOL;
48typedef LONG *PLONG;
49typedef ULONG *PULONG;
50typedef VOID *PVOID;
51
52
53/************************************************/
54/* Misc. macros */
55/************************************************/
56#define ANY2SCSI(up, p) \
57((UCHAR *)up)[0] = (((ULONG)(p)) >> 8); \
58((UCHAR *)up)[1] = ((ULONG)(p));
59
60#define SCSI2LONG(up) \
61( (((long)*(((UCHAR *)up))) << 16) \
62+ (((long)(((UCHAR *)up)[1])) << 8) \
63+ ((long)(((UCHAR *)up)[2])) )
64
65#define XANY2SCSI(up, p) \
66((UCHAR *)up)[0] = ((long)(p)) >> 24; \
67((UCHAR *)up)[1] = ((long)(p)) >> 16; \
68((UCHAR *)up)[2] = ((long)(p)) >> 8; \
69((UCHAR *)up)[3] = ((long)(p));
70
71#define XSCSI2LONG(up) \
72( (((long)(((UCHAR *)up)[0])) << 24) \
73+ (((long)(((UCHAR *)up)[1])) << 16) \
74+ (((long)(((UCHAR *)up)[2])) << 8) \
75+ ((long)(((UCHAR *)up)[3])) )
76
77/************************************************/
78/* SCSI CDB operation codes */
79/************************************************/
80#define SCSIOP_TEST_UNIT_READY 0x00
81#define SCSIOP_REZERO_UNIT 0x01
82#define SCSIOP_REWIND 0x01
83#define SCSIOP_REQUEST_BLOCK_ADDR 0x02
84#define SCSIOP_REQUEST_SENSE 0x03
85#define SCSIOP_FORMAT_UNIT 0x04
86#define SCSIOP_READ_BLOCK_LIMITS 0x05
87#define SCSIOP_REASSIGN_BLOCKS 0x07
88#define SCSIOP_READ6 0x08
89#define SCSIOP_RECEIVE 0x08
90#define SCSIOP_WRITE6 0x0A
91#define SCSIOP_PRINT 0x0A
92#define SCSIOP_SEND 0x0A
93#define SCSIOP_SEEK6 0x0B
94#define SCSIOP_TRACK_SELECT 0x0B
95#define SCSIOP_SLEW_PRINT 0x0B
96#define SCSIOP_SEEK_BLOCK 0x0C
97#define SCSIOP_PARTITION 0x0D
98#define SCSIOP_READ_REVERSE 0x0F
99#define SCSIOP_WRITE_FILEMARKS 0x10
100#define SCSIOP_FLUSH_BUFFER 0x10
101#define SCSIOP_SPACE 0x11
102#define SCSIOP_INQUIRY 0x12
103#define SCSIOP_VERIFY6 0x13
104#define SCSIOP_RECOVER_BUF_DATA 0x14
105#define SCSIOP_MODE_SELECT 0x15
106#define SCSIOP_RESERVE_UNIT 0x16
107#define SCSIOP_RELEASE_UNIT 0x17
108#define SCSIOP_COPY 0x18
109#define SCSIOP_ERASE 0x19
110#define SCSIOP_MODE_SENSE 0x1A
111#define SCSIOP_START_STOP_UNIT 0x1B
112#define SCSIOP_STOP_PRINT 0x1B
113#define SCSIOP_LOAD_UNLOAD 0x1B
114#define SCSIOP_RECEIVE_DIAGNOSTIC 0x1C
115#define SCSIOP_SEND_DIAGNOSTIC 0x1D
116#define SCSIOP_MEDIUM_REMOVAL 0x1E
117#define SCSIOP_READ_CAPACITY 0x25
118#define SCSIOP_READ 0x28
119#define SCSIOP_WRITE 0x2A
120#define SCSIOP_SEEK 0x2B
121#define SCSIOP_LOCATE 0x2B
122#define SCSIOP_WRITE_VERIFY 0x2E
123#define SCSIOP_VERIFY 0x2F
124#define SCSIOP_SEARCH_DATA_HIGH 0x30
125#define SCSIOP_SEARCH_DATA_EQUAL 0x31
126#define SCSIOP_SEARCH_DATA_LOW 0x32
127#define SCSIOP_SET_LIMITS 0x33
128#define SCSIOP_READ_POSITION 0x34
129#define SCSIOP_SYNCHRONIZE_CACHE 0x35
130#define SCSIOP_COMPARE 0x39
131#define SCSIOP_COPY_COMPARE 0x3A
132#define SCSIOP_WRITE_DATA_BUFF 0x3B
133#define SCSIOP_READ_DATA_BUFF 0x3C
134#define SCSIOP_CHANGE_DEFINITION 0x40
135#define SCSIOP_READ_SUB_CHANNEL 0x42
136#define SCSIOP_READ_TOC 0x43
137#define SCSIOP_READ_HEADER 0x44
138#define SCSIOP_PLAY_AUDIO 0x45
139#define SCSIOP_PLAY_AUDIO_MSF 0x47
140#define SCSIOP_PLAY_TRACK_INDEX 0x48
141#define SCSIOP_PLAY_TRACK_RELATIVE 0x49
142#define SCSIOP_PAUSE_RESUME 0x4B
143#define SCSIOP_LOG_SELECT 0x4C
144#define SCSIOP_LOG_SENSE 0x4D
145#define SCSIOP_MODE_SELECT10 0x55
146#define SCSIOP_MODE_SENSE10 0x5A
147#define SCSIOP_LOAD_UNLOAD_SLOT 0xA6
148#define SCSIOP_MECHANISM_STATUS 0xBD
149#define SCSIOP_READ_CD 0xBE
150
151// SCSI read capacity structure
152typedef struct _READ_CAPACITY_DATA
153 {
154 ULONG blks; /* total blocks (converted to little endian) */
155 ULONG blksiz; /* size of each (converted to little endian) */
156 } READ_CAPACITY_DATA, *PREAD_CAPACITY_DATA;
157
158// SCSI inquiry data
159typedef struct _INQUIRYDATA
160 {
161 UCHAR DeviceType :5;
162 UCHAR DeviceTypeQualifier :3;
163 UCHAR DeviceTypeModifier :7;
164 UCHAR RemovableMedia :1;
165 UCHAR Versions;
166 UCHAR ResponseDataFormat;
167 UCHAR AdditionalLength;
168 UCHAR Reserved[2];
169 UCHAR SoftReset :1;
170 UCHAR CommandQueue :1;
171 UCHAR Reserved2 :1;
172 UCHAR LinkedCommands :1;
173 UCHAR Synchronous :1;
174 UCHAR Wide16Bit :1;
175 UCHAR Wide32Bit :1;
176 UCHAR RelativeAddressing :1;
177 UCHAR VendorId[8];
178 UCHAR ProductId[16];
179 UCHAR ProductRevisionLevel[4];
180 UCHAR VendorSpecific[20];
181 UCHAR Reserved3[40];
182 } INQUIRYDATA, *PINQUIRYDATA;
183
184#endif
185
186// function prototypes
187int Pci2000_Detect (struct scsi_host_template *tpnt);
188int Pci2000_Command (Scsi_Cmnd *SCpnt);
189int Pci2000_QueueCommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
190int Pci2000_Abort (Scsi_Cmnd *SCpnt);
191int Pci2000_Reset (Scsi_Cmnd *SCpnt, unsigned int flags);
192int Pci2000_Release (struct Scsi_Host *pshost);
193int Pci2000_BiosParam (struct scsi_device *sdev,
194 struct block_device *bdev,
195 sector_t capacity, int geom[]);
196
197#endif
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index eac8e179cfff..7dd787f6ab27 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -3,11 +3,11 @@
3# 3#
4 4
5menu "PCMCIA SCSI adapter support" 5menu "PCMCIA SCSI adapter support"
6 depends on SCSI!=n && PCMCIA!=n && MODULES 6 depends on SCSI!=n && PCMCIA!=n
7 7
8config PCMCIA_AHA152X 8config PCMCIA_AHA152X
9 tristate "Adaptec AHA152X PCMCIA support" 9 tristate "Adaptec AHA152X PCMCIA support"
10 depends on m && !64BIT 10 depends on !64BIT
11 select SCSI_SPI_ATTRS 11 select SCSI_SPI_ATTRS
12 help 12 help
13 Say Y here if you intend to attach this type of PCMCIA SCSI host 13 Say Y here if you intend to attach this type of PCMCIA SCSI host
@@ -18,7 +18,6 @@ config PCMCIA_AHA152X
18 18
19config PCMCIA_FDOMAIN 19config PCMCIA_FDOMAIN
20 tristate "Future Domain PCMCIA support" 20 tristate "Future Domain PCMCIA support"
21 depends on m
22 help 21 help
23 Say Y here if you intend to attach this type of PCMCIA SCSI host 22 Say Y here if you intend to attach this type of PCMCIA SCSI host
24 adapter to your computer. 23 adapter to your computer.
@@ -28,7 +27,7 @@ config PCMCIA_FDOMAIN
28 27
29config PCMCIA_NINJA_SCSI 28config PCMCIA_NINJA_SCSI
30 tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" 29 tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
31 depends on m && !64BIT 30 depends on !64BIT
32 help 31 help
33 If you intend to attach this type of PCMCIA SCSI host adapter to 32 If you intend to attach this type of PCMCIA SCSI host adapter to
34 your computer, say Y here and read 33 your computer, say Y here and read
@@ -62,7 +61,6 @@ config PCMCIA_NINJA_SCSI
62 61
63config PCMCIA_QLOGIC 62config PCMCIA_QLOGIC
64 tristate "Qlogic PCMCIA support" 63 tristate "Qlogic PCMCIA support"
65 depends on m
66 help 64 help
67 Say Y here if you intend to attach this type of PCMCIA SCSI host 65 Say Y here if you intend to attach this type of PCMCIA SCSI host
68 adapter to your computer. 66 adapter to your computer.
@@ -72,7 +70,6 @@ config PCMCIA_QLOGIC
72 70
73config PCMCIA_SYM53C500 71config PCMCIA_SYM53C500
74 tristate "Symbios 53c500 PCMCIA support" 72 tristate "Symbios 53c500 PCMCIA support"
75 depends on m
76 help 73 help
77 Say Y here if you have a New Media Bus Toaster or other PCMCIA 74 Say Y here if you have a New Media Bus Toaster or other PCMCIA
78 SCSI adapter based on the Symbios 53c500 controller. 75 SCSI adapter based on the Symbios 53c500 controller.
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 05f4f2a378eb..e8948b679f5b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1478,14 +1478,17 @@ typedef union {
1478 uint32_t b24 : 24; 1478 uint32_t b24 : 24;
1479 1479
1480 struct { 1480 struct {
1481 uint8_t d_id[3]; 1481#ifdef __BIG_ENDIAN
1482 uint8_t rsvd_1; 1482 uint8_t domain;
1483 } r; 1483 uint8_t area;
1484 1484 uint8_t al_pa;
1485 struct { 1485#elif __LITTLE_ENDIAN
1486 uint8_t al_pa; 1486 uint8_t al_pa;
1487 uint8_t area; 1487 uint8_t area;
1488 uint8_t domain; 1488 uint8_t domain;
1489#else
1490#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
1491#endif
1489 uint8_t rsvd_1; 1492 uint8_t rsvd_1;
1490 } b; 1493 } b;
1491} port_id_t; 1494} port_id_t;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 98c01cd5e1a8..3e296ab845b6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -11,6 +11,11 @@
11 11
12#include "qla_devtbl.h" 12#include "qla_devtbl.h"
13 13
14#ifdef CONFIG_SPARC
15#include <asm/prom.h>
16#include <asm/pbm.h>
17#endif
18
14/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */ 19/* XXX(hch): this is ugly, but we don't want to pull in exioctl.h */
15#ifndef EXT_IS_LUN_BIT_SET 20#ifndef EXT_IS_LUN_BIT_SET
16#define EXT_IS_LUN_BIT_SET(P,L) \ 21#define EXT_IS_LUN_BIT_SET(P,L) \
@@ -88,12 +93,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
88 93
89 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 94 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
90 95
91 rval = ha->isp_ops.nvram_config(ha); 96 ha->isp_ops.nvram_config(ha);
92 if (rval) {
93 DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n",
94 ha->host_no));
95 return rval;
96 }
97 97
98 if (ha->flags.disable_serdes) { 98 if (ha->flags.disable_serdes) {
99 /* Mask HBA via NVRAM settings? */ 99 /* Mask HBA via NVRAM settings? */
@@ -1393,6 +1393,28 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1393 } 1393 }
1394} 1394}
1395 1395
1396/* On sparc systems, obtain port and node WWN from firmware
1397 * properties.
1398 */
1399static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1400{
1401#ifdef CONFIG_SPARC
1402 struct pci_dev *pdev = ha->pdev;
1403 struct pcidev_cookie *pcp = pdev->sysdata;
1404 struct device_node *dp = pcp->prom_node;
1405 u8 *val;
1406 int len;
1407
1408 val = of_get_property(dp, "port-wwn", &len);
1409 if (val && len >= WWN_SIZE)
1410 memcpy(nv->port_name, val, WWN_SIZE);
1411
1412 val = of_get_property(dp, "node-wwn", &len);
1413 if (val && len >= WWN_SIZE)
1414 memcpy(nv->node_name, val, WWN_SIZE);
1415#endif
1416}
1417
1396/* 1418/*
1397* NVRAM configuration for ISP 2xxx 1419* NVRAM configuration for ISP 2xxx
1398* 1420*
@@ -1409,6 +1431,7 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1409int 1431int
1410qla2x00_nvram_config(scsi_qla_host_t *ha) 1432qla2x00_nvram_config(scsi_qla_host_t *ha)
1411{ 1433{
1434 int rval;
1412 uint8_t chksum = 0; 1435 uint8_t chksum = 0;
1413 uint16_t cnt; 1436 uint16_t cnt;
1414 uint8_t *dptr1, *dptr2; 1437 uint8_t *dptr1, *dptr2;
@@ -1417,6 +1440,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1417 uint8_t *ptr = (uint8_t *)ha->request_ring; 1440 uint8_t *ptr = (uint8_t *)ha->request_ring;
1418 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1441 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1419 1442
1443 rval = QLA_SUCCESS;
1444
1420 /* Determine NVRAM starting address. */ 1445 /* Determine NVRAM starting address. */
1421 ha->nvram_size = sizeof(nvram_t); 1446 ha->nvram_size = sizeof(nvram_t);
1422 ha->nvram_base = 0; 1447 ha->nvram_base = 0;
@@ -1440,7 +1465,57 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1440 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 1465 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
1441 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 1466 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
1442 nv->nvram_version); 1467 nv->nvram_version);
1443 return QLA_FUNCTION_FAILED; 1468 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1469 "invalid -- WWPN) defaults.\n");
1470
1471 /*
1472 * Set default initialization control block.
1473 */
1474 memset(nv, 0, ha->nvram_size);
1475 nv->parameter_block_version = ICB_VERSION;
1476
1477 if (IS_QLA23XX(ha)) {
1478 nv->firmware_options[0] = BIT_2 | BIT_1;
1479 nv->firmware_options[1] = BIT_7 | BIT_5;
1480 nv->add_firmware_options[0] = BIT_5;
1481 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1482 nv->frame_payload_size = __constant_cpu_to_le16(2048);
1483 nv->special_options[1] = BIT_7;
1484 } else if (IS_QLA2200(ha)) {
1485 nv->firmware_options[0] = BIT_2 | BIT_1;
1486 nv->firmware_options[1] = BIT_7 | BIT_5;
1487 nv->add_firmware_options[0] = BIT_5;
1488 nv->add_firmware_options[1] = BIT_5 | BIT_4;
1489 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1490 } else if (IS_QLA2100(ha)) {
1491 nv->firmware_options[0] = BIT_3 | BIT_1;
1492 nv->firmware_options[1] = BIT_5;
1493 nv->frame_payload_size = __constant_cpu_to_le16(1024);
1494 }
1495
1496 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
1497 nv->execution_throttle = __constant_cpu_to_le16(16);
1498 nv->retry_count = 8;
1499 nv->retry_delay = 1;
1500
1501 nv->port_name[0] = 33;
1502 nv->port_name[3] = 224;
1503 nv->port_name[4] = 139;
1504
1505 qla2xxx_nvram_wwn_from_ofw(ha, nv);
1506
1507 nv->login_timeout = 4;
1508
1509 /*
1510 * Set default host adapter parameters
1511 */
1512 nv->host_p[1] = BIT_2;
1513 nv->reset_delay = 5;
1514 nv->port_down_retry_count = 8;
1515 nv->max_luns_per_target = __constant_cpu_to_le16(8);
1516 nv->link_down_timeout = 60;
1517
1518 rval = 1;
1444 } 1519 }
1445 1520
1446#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1521#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
@@ -1653,7 +1728,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1653 } 1728 }
1654 } 1729 }
1655 1730
1656 return QLA_SUCCESS; 1731 if (rval) {
1732 DEBUG2_3(printk(KERN_WARNING
1733 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
1734 }
1735 return (rval);
1657} 1736}
1658 1737
1659static void 1738static void
@@ -3071,9 +3150,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3071 3150
3072 ha->isp_ops.get_flash_version(ha, ha->request_ring); 3151 ha->isp_ops.get_flash_version(ha, ha->request_ring);
3073 3152
3074 rval = ha->isp_ops.nvram_config(ha); 3153 ha->isp_ops.nvram_config(ha);
3075 if (rval)
3076 goto isp_abort_retry;
3077 3154
3078 if (!qla2x00_restart_isp(ha)) { 3155 if (!qla2x00_restart_isp(ha)) {
3079 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3156 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3103,7 +3180,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3103 } 3180 }
3104 } 3181 }
3105 } else { /* failed the ISP abort */ 3182 } else { /* failed the ISP abort */
3106isp_abort_retry:
3107 ha->flags.online = 1; 3183 ha->flags.online = 1;
3108 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3184 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
3109 if (ha->isp_abort_cnt == 0) { 3185 if (ha->isp_abort_cnt == 0) {
@@ -3290,9 +3366,32 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3290 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3366 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3291} 3367}
3292 3368
3369/* On sparc systems, obtain port and node WWN from firmware
3370 * properties.
3371 */
3372static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv)
3373{
3374#ifdef CONFIG_SPARC
3375 struct pci_dev *pdev = ha->pdev;
3376 struct pcidev_cookie *pcp = pdev->sysdata;
3377 struct device_node *dp = pcp->prom_node;
3378 u8 *val;
3379 int len;
3380
3381 val = of_get_property(dp, "port-wwn", &len);
3382 if (val && len >= WWN_SIZE)
3383 memcpy(nv->port_name, val, WWN_SIZE);
3384
3385 val = of_get_property(dp, "node-wwn", &len);
3386 if (val && len >= WWN_SIZE)
3387 memcpy(nv->node_name, val, WWN_SIZE);
3388#endif
3389}
3390
3293int 3391int
3294qla24xx_nvram_config(scsi_qla_host_t *ha) 3392qla24xx_nvram_config(scsi_qla_host_t *ha)
3295{ 3393{
3394 int rval;
3296 struct init_cb_24xx *icb; 3395 struct init_cb_24xx *icb;
3297 struct nvram_24xx *nv; 3396 struct nvram_24xx *nv;
3298 uint32_t *dptr; 3397 uint32_t *dptr;
@@ -3300,6 +3399,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3300 uint32_t chksum; 3399 uint32_t chksum;
3301 uint16_t cnt; 3400 uint16_t cnt;
3302 3401
3402 rval = QLA_SUCCESS;
3303 icb = (struct init_cb_24xx *)ha->init_cb; 3403 icb = (struct init_cb_24xx *)ha->init_cb;
3304 nv = (struct nvram_24xx *)ha->request_ring; 3404 nv = (struct nvram_24xx *)ha->request_ring;
3305 3405
@@ -3332,7 +3432,52 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3332 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 3432 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
3333 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 3433 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
3334 le16_to_cpu(nv->nvram_version)); 3434 le16_to_cpu(nv->nvram_version));
3335 return QLA_FUNCTION_FAILED; 3435 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
3436 "invalid -- WWPN) defaults.\n");
3437
3438 /*
3439 * Set default initialization control block.
3440 */
3441 memset(nv, 0, ha->nvram_size);
3442 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
3443 nv->version = __constant_cpu_to_le16(ICB_VERSION);
3444 nv->frame_payload_size = __constant_cpu_to_le16(2048);
3445 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3446 nv->exchange_count = __constant_cpu_to_le16(0);
3447 nv->hard_address = __constant_cpu_to_le16(124);
3448 nv->port_name[0] = 0x21;
3449 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
3450 nv->port_name[2] = 0x00;
3451 nv->port_name[3] = 0xe0;
3452 nv->port_name[4] = 0x8b;
3453 nv->port_name[5] = 0x1c;
3454 nv->port_name[6] = 0x55;
3455 nv->port_name[7] = 0x86;
3456 nv->node_name[0] = 0x20;
3457 nv->node_name[1] = 0x00;
3458 nv->node_name[2] = 0x00;
3459 nv->node_name[3] = 0xe0;
3460 nv->node_name[4] = 0x8b;
3461 nv->node_name[5] = 0x1c;
3462 nv->node_name[6] = 0x55;
3463 nv->node_name[7] = 0x86;
3464 qla24xx_nvram_wwn_from_ofw(ha, nv);
3465 nv->login_retry_count = __constant_cpu_to_le16(8);
3466 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3467 nv->login_timeout = __constant_cpu_to_le16(0);
3468 nv->firmware_options_1 =
3469 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
3470 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
3471 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
3472 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
3473 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
3474 nv->efi_parameters = __constant_cpu_to_le32(0);
3475 nv->reset_delay = 5;
3476 nv->max_luns_per_target = __constant_cpu_to_le16(128);
3477 nv->port_down_retry_count = __constant_cpu_to_le16(30);
3478 nv->link_down_timeout = __constant_cpu_to_le16(30);
3479
3480 rval = 1;
3336 } 3481 }
3337 3482
3338 /* Reset Initialization control block */ 3483 /* Reset Initialization control block */
@@ -3479,7 +3624,11 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3479 ha->flags.process_response_queue = 1; 3624 ha->flags.process_response_queue = 1;
3480 } 3625 }
3481 3626
3482 return QLA_SUCCESS; 3627 if (rval) {
3628 DEBUG2_3(printk(KERN_WARNING
3629 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
3630 }
3631 return (rval);
3483} 3632}
3484 3633
3485static int 3634static int
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 83376f6ac3db..71e32a248528 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1280,14 +1280,14 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1280 } else { 1280 } else {
1281 if (name != NULL) { 1281 if (name != NULL) {
1282 /* This function returns name in big endian. */ 1282 /* This function returns name in big endian. */
1283 name[0] = LSB(mcp->mb[2]); 1283 name[0] = MSB(mcp->mb[2]);
1284 name[1] = MSB(mcp->mb[2]); 1284 name[1] = LSB(mcp->mb[2]);
1285 name[2] = LSB(mcp->mb[3]); 1285 name[2] = MSB(mcp->mb[3]);
1286 name[3] = MSB(mcp->mb[3]); 1286 name[3] = LSB(mcp->mb[3]);
1287 name[4] = LSB(mcp->mb[6]); 1287 name[4] = MSB(mcp->mb[6]);
1288 name[5] = MSB(mcp->mb[6]); 1288 name[5] = LSB(mcp->mb[6]);
1289 name[6] = LSB(mcp->mb[7]); 1289 name[6] = MSB(mcp->mb[7]);
1290 name[7] = MSB(mcp->mb[7]); 1290 name[7] = LSB(mcp->mb[7]);
1291 } 1291 }
1292 1292
1293 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1293 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 68f5d24b938b..b78919a318e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(ql2xallocfwdump,
62 "vary by ISP type. Default is 1 - allocate memory."); 62 "vary by ISP type. Default is 1 - allocate memory.");
63 63
64int ql2xextended_error_logging; 64int ql2xextended_error_logging;
65module_param(ql2xextended_error_logging, int, S_IRUGO|S_IRUSR); 65module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(ql2xextended_error_logging, 66MODULE_PARM_DESC(ql2xextended_error_logging,
67 "Option to enable extended error logging, " 67 "Option to enable extended error logging, "
68 "Default is 0 - no logging. 1 - log errors."); 68 "Default is 0 - no logging. 1 - log errors.");
@@ -157,6 +157,8 @@ static struct scsi_host_template qla24xx_driver_template = {
157 157
158 .slave_alloc = qla2xxx_slave_alloc, 158 .slave_alloc = qla2xxx_slave_alloc,
159 .slave_destroy = qla2xxx_slave_destroy, 159 .slave_destroy = qla2xxx_slave_destroy,
160 .scan_finished = qla2xxx_scan_finished,
161 .scan_start = qla2xxx_scan_start,
160 .change_queue_depth = qla2x00_change_queue_depth, 162 .change_queue_depth = qla2x00_change_queue_depth,
161 .change_queue_type = qla2x00_change_queue_type, 163 .change_queue_type = qla2x00_change_queue_type,
162 .this_id = -1, 164 .this_id = -1,
@@ -1705,6 +1707,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
1705 1707
1706 scsi_host_put(ha->host); 1708 scsi_host_put(ha->host);
1707 1709
1710 pci_disable_device(pdev);
1708 pci_set_drvdata(pdev, NULL); 1711 pci_set_drvdata(pdev, NULL);
1709} 1712}
1710 1713
@@ -1747,8 +1750,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1747 if (ha->iobase) 1750 if (ha->iobase)
1748 iounmap(ha->iobase); 1751 iounmap(ha->iobase);
1749 pci_release_regions(ha->pdev); 1752 pci_release_regions(ha->pdev);
1750
1751 pci_disable_device(ha->pdev);
1752} 1753}
1753 1754
1754static inline void 1755static inline void
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index ff1dd4175a7f..206bda093da2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -466,6 +466,7 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
466 udelay(10); 466 udelay(10);
467 else 467 else
468 rval = QLA_FUNCTION_TIMEOUT; 468 rval = QLA_FUNCTION_TIMEOUT;
469 cond_resched();
469 } 470 }
470 471
471 /* TODO: What happens if we time out? */ 472 /* TODO: What happens if we time out? */
@@ -508,6 +509,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
508 udelay(10); 509 udelay(10);
509 else 510 else
510 rval = QLA_FUNCTION_TIMEOUT; 511 rval = QLA_FUNCTION_TIMEOUT;
512 cond_resched();
511 } 513 }
512 return rval; 514 return rval;
513} 515}
@@ -1255,6 +1257,7 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1255 } 1257 }
1256 udelay(10); 1258 udelay(10);
1257 barrier(); 1259 barrier();
1260 cond_resched();
1258 } 1261 }
1259 return status; 1262 return status;
1260} 1263}
@@ -1403,6 +1406,7 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1403 if (saddr % 100) 1406 if (saddr % 100)
1404 udelay(10); 1407 udelay(10);
1405 *tmp_buf = data; 1408 *tmp_buf = data;
1409 cond_resched();
1406 } 1410 }
1407} 1411}
1408 1412
@@ -1449,7 +1453,6 @@ uint8_t *
1449qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1453qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1450 uint32_t offset, uint32_t length) 1454 uint32_t offset, uint32_t length)
1451{ 1455{
1452 unsigned long flags;
1453 uint32_t addr, midpoint; 1456 uint32_t addr, midpoint;
1454 uint8_t *data; 1457 uint8_t *data;
1455 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1458 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1458,7 +1461,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1458 qla2x00_suspend_hba(ha); 1461 qla2x00_suspend_hba(ha);
1459 1462
1460 /* Go with read. */ 1463 /* Go with read. */
1461 spin_lock_irqsave(&ha->hardware_lock, flags);
1462 midpoint = ha->optrom_size / 2; 1464 midpoint = ha->optrom_size / 2;
1463 1465
1464 qla2x00_flash_enable(ha); 1466 qla2x00_flash_enable(ha);
@@ -1473,7 +1475,6 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1473 *data = qla2x00_read_flash_byte(ha, addr); 1475 *data = qla2x00_read_flash_byte(ha, addr);
1474 } 1476 }
1475 qla2x00_flash_disable(ha); 1477 qla2x00_flash_disable(ha);
1476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477 1478
1478 /* Resume HBA. */ 1479 /* Resume HBA. */
1479 qla2x00_resume_hba(ha); 1480 qla2x00_resume_hba(ha);
@@ -1487,7 +1488,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1487{ 1488{
1488 1489
1489 int rval; 1490 int rval;
1490 unsigned long flags;
1491 uint8_t man_id, flash_id, sec_number, data; 1491 uint8_t man_id, flash_id, sec_number, data;
1492 uint16_t wd; 1492 uint16_t wd;
1493 uint32_t addr, liter, sec_mask, rest_addr; 1493 uint32_t addr, liter, sec_mask, rest_addr;
@@ -1500,7 +1500,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1500 sec_number = 0; 1500 sec_number = 0;
1501 1501
1502 /* Reset ISP chip. */ 1502 /* Reset ISP chip. */
1503 spin_lock_irqsave(&ha->hardware_lock, flags);
1504 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET); 1503 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1505 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); 1504 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1506 1505
@@ -1689,10 +1688,10 @@ update_flash:
1689 rval = QLA_FUNCTION_FAILED; 1688 rval = QLA_FUNCTION_FAILED;
1690 break; 1689 break;
1691 } 1690 }
1691 cond_resched();
1692 } 1692 }
1693 } while (0); 1693 } while (0);
1694 qla2x00_flash_disable(ha); 1694 qla2x00_flash_disable(ha);
1695 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1696 1695
1697 /* Resume HBA. */ 1696 /* Resume HBA. */
1698 qla2x00_resume_hba(ha); 1697 qla2x00_resume_hba(ha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 61347aee55ce..dc85495c337f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.07-k5" 10#define QLA2XXX_VERSION "8.01.07-k6"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c89ee3e69ba..4c1e31334765 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -344,7 +344,6 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)
344void scsi_log_send(struct scsi_cmnd *cmd) 344void scsi_log_send(struct scsi_cmnd *cmd)
345{ 345{
346 unsigned int level; 346 unsigned int level;
347 struct scsi_device *sdev;
348 347
349 /* 348 /*
350 * If ML QUEUE log level is greater than or equal to: 349 * If ML QUEUE log level is greater than or equal to:
@@ -361,22 +360,17 @@ void scsi_log_send(struct scsi_cmnd *cmd)
361 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 360 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
362 SCSI_LOG_MLQUEUE_BITS); 361 SCSI_LOG_MLQUEUE_BITS);
363 if (level > 1) { 362 if (level > 1) {
364 sdev = cmd->device; 363 scmd_printk(KERN_INFO, cmd, "Send: ");
365 sdev_printk(KERN_INFO, sdev, "send ");
366 if (level > 2) 364 if (level > 2)
367 printk("0x%p ", cmd); 365 printk("0x%p ", cmd);
368 /* 366 printk("\n");
369 * spaces to match disposition and cmd->result
370 * output in scsi_log_completion.
371 */
372 printk(" ");
373 scsi_print_command(cmd); 367 scsi_print_command(cmd);
374 if (level > 3) { 368 if (level > 3) {
375 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 369 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
376 " done = 0x%p, queuecommand 0x%p\n", 370 " done = 0x%p, queuecommand 0x%p\n",
377 cmd->request_buffer, cmd->request_bufflen, 371 cmd->request_buffer, cmd->request_bufflen,
378 cmd->done, 372 cmd->done,
379 sdev->host->hostt->queuecommand); 373 cmd->device->host->hostt->queuecommand);
380 374
381 } 375 }
382 } 376 }
@@ -386,7 +380,6 @@ void scsi_log_send(struct scsi_cmnd *cmd)
386void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 380void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
387{ 381{
388 unsigned int level; 382 unsigned int level;
389 struct scsi_device *sdev;
390 383
391 /* 384 /*
392 * If ML COMPLETE log level is greater than or equal to: 385 * If ML COMPLETE log level is greater than or equal to:
@@ -405,8 +398,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
405 SCSI_LOG_MLCOMPLETE_BITS); 398 SCSI_LOG_MLCOMPLETE_BITS);
406 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 399 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
407 (level > 1)) { 400 (level > 1)) {
408 sdev = cmd->device; 401 scmd_printk(KERN_INFO, cmd, "Done: ");
409 sdev_printk(KERN_INFO, sdev, "done ");
410 if (level > 2) 402 if (level > 2)
411 printk("0x%p ", cmd); 403 printk("0x%p ", cmd);
412 /* 404 /*
@@ -415,40 +407,35 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
415 */ 407 */
416 switch (disposition) { 408 switch (disposition) {
417 case SUCCESS: 409 case SUCCESS:
418 printk("SUCCESS"); 410 printk("SUCCESS\n");
419 break; 411 break;
420 case NEEDS_RETRY: 412 case NEEDS_RETRY:
421 printk("RETRY "); 413 printk("RETRY\n");
422 break; 414 break;
423 case ADD_TO_MLQUEUE: 415 case ADD_TO_MLQUEUE:
424 printk("MLQUEUE"); 416 printk("MLQUEUE\n");
425 break; 417 break;
426 case FAILED: 418 case FAILED:
427 printk("FAILED "); 419 printk("FAILED\n");
428 break; 420 break;
429 case TIMEOUT_ERROR: 421 case TIMEOUT_ERROR:
430 /* 422 /*
431 * If called via scsi_times_out. 423 * If called via scsi_times_out.
432 */ 424 */
433 printk("TIMEOUT"); 425 printk("TIMEOUT\n");
434 break; 426 break;
435 default: 427 default:
436 printk("UNKNOWN"); 428 printk("UNKNOWN\n");
437 } 429 }
438 printk(" %8x ", cmd->result); 430 scsi_print_result(cmd);
439 scsi_print_command(cmd); 431 scsi_print_command(cmd);
440 if (status_byte(cmd->result) & CHECK_CONDITION) { 432 if (status_byte(cmd->result) & CHECK_CONDITION)
441 /*
442 * XXX The scsi_print_sense formatting/prefix
443 * doesn't match this function.
444 */
445 scsi_print_sense("", cmd); 433 scsi_print_sense("", cmd);
446 } 434 if (level > 3)
447 if (level > 3) { 435 scmd_printk(KERN_INFO, cmd,
448 printk(KERN_INFO "scsi host busy %d failed %d\n", 436 "scsi host busy %d failed %d\n",
449 sdev->host->host_busy, 437 cmd->device->host->host_busy,
450 sdev->host->host_failed); 438 cmd->device->host->host_failed);
451 }
452 } 439 }
453 } 440 }
454} 441}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 918bb6019540..3963e7013bd9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -184,10 +184,19 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
184 **/ 184 **/
185void scsi_times_out(struct scsi_cmnd *scmd) 185void scsi_times_out(struct scsi_cmnd *scmd)
186{ 186{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
188
187 scsi_log_completion(scmd, TIMEOUT_ERROR); 189 scsi_log_completion(scmd, TIMEOUT_ERROR);
188 190
189 if (scmd->device->host->transportt->eh_timed_out) 191 if (scmd->device->host->transportt->eh_timed_out)
190 switch (scmd->device->host->transportt->eh_timed_out(scmd)) { 192 eh_timed_out = scmd->device->host->transportt->eh_timed_out;
193 else if (scmd->device->host->hostt->eh_timed_out)
194 eh_timed_out = scmd->device->host->hostt->eh_timed_out;
195 else
196 eh_timed_out = NULL;
197
198 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) {
191 case EH_HANDLED: 200 case EH_HANDLED:
192 __scsi_done(scmd); 201 __scsi_done(scmd);
193 return; 202 return;
@@ -923,10 +932,12 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
923 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 932 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
924 933
925 if (scmd->device->allow_restart) { 934 if (scmd->device->allow_restart) {
926 int rtn; 935 int i, rtn = NEEDS_RETRY;
936
937 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
938 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
939 START_UNIT_TIMEOUT, 0);
927 940
928 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
929 START_UNIT_TIMEOUT, 0);
930 if (rtn == SUCCESS) 941 if (rtn == SUCCESS)
931 return 0; 942 return 0;
932 } 943 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 05d79af5ab90..61fbcdcbb009 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -848,8 +848,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
848 memcpy(req->sense, cmd->sense_buffer, len); 848 memcpy(req->sense, cmd->sense_buffer, len);
849 req->sense_len = len; 849 req->sense_len = len;
850 } 850 }
851 } else 851 }
852 req->data_len = cmd->resid; 852 req->data_len = cmd->resid;
853 } 853 }
854 854
855 /* 855 /*
@@ -968,9 +968,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
968 } 968 }
969 if (result) { 969 if (result) {
970 if (!(req->cmd_flags & REQ_QUIET)) { 970 if (!(req->cmd_flags & REQ_QUIET)) {
971 scmd_printk(KERN_INFO, cmd, 971 scsi_print_result(cmd);
972 "SCSI error: return code = 0x%08x\n",
973 result);
974 if (driver_byte(result) & DRIVER_SENSE) 972 if (driver_byte(result) & DRIVER_SENSE)
975 scsi_print_sense("", cmd); 973 scsi_print_sense("", cmd);
976 } 974 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0949145304ea..a67f315244d7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -181,10 +181,8 @@ int scsi_complete_async_scans(void)
181 return 0; 181 return 0;
182} 182}
183 183
184#ifdef MODULE
185/* Only exported for the benefit of scsi_wait_scan */ 184/* Only exported for the benefit of scsi_wait_scan */
186EXPORT_SYMBOL_GPL(scsi_complete_async_scans); 185EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
187#endif
188 186
189/** 187/**
190 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 188 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 939de0de18bc..67a38a1409ba 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -276,8 +276,22 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
276 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 276 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
277} 277}
278 278
279static int scsi_bus_uevent(struct device *dev, char **envp, int num_envp,
280 char *buffer, int buffer_size)
281{
282 struct scsi_device *sdev = to_scsi_device(dev);
283 int i = 0;
284 int length = 0;
285
286 add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
287 "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
288 envp[i] = NULL;
289 return 0;
290}
291
279static int scsi_bus_suspend(struct device * dev, pm_message_t state) 292static int scsi_bus_suspend(struct device * dev, pm_message_t state)
280{ 293{
294 struct device_driver *drv = dev->driver;
281 struct scsi_device *sdev = to_scsi_device(dev); 295 struct scsi_device *sdev = to_scsi_device(dev);
282 struct scsi_host_template *sht = sdev->host->hostt; 296 struct scsi_host_template *sht = sdev->host->hostt;
283 int err; 297 int err;
@@ -286,28 +300,51 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
286 if (err) 300 if (err)
287 return err; 301 return err;
288 302
289 if (sht->suspend) 303 /* call HLD suspend first */
304 if (drv && drv->suspend) {
305 err = drv->suspend(dev, state);
306 if (err)
307 return err;
308 }
309
310 /* then, call host suspend */
311 if (sht->suspend) {
290 err = sht->suspend(sdev, state); 312 err = sht->suspend(sdev, state);
313 if (err) {
314 if (drv && drv->resume)
315 drv->resume(dev);
316 return err;
317 }
318 }
291 319
292 return err; 320 return 0;
293} 321}
294 322
295static int scsi_bus_resume(struct device * dev) 323static int scsi_bus_resume(struct device * dev)
296{ 324{
325 struct device_driver *drv = dev->driver;
297 struct scsi_device *sdev = to_scsi_device(dev); 326 struct scsi_device *sdev = to_scsi_device(dev);
298 struct scsi_host_template *sht = sdev->host->hostt; 327 struct scsi_host_template *sht = sdev->host->hostt;
299 int err = 0; 328 int err = 0, err2 = 0;
300 329
330 /* call host resume first */
301 if (sht->resume) 331 if (sht->resume)
302 err = sht->resume(sdev); 332 err = sht->resume(sdev);
303 333
334 /* then, call HLD resume */
335 if (drv && drv->resume)
336 err2 = drv->resume(dev);
337
304 scsi_device_resume(sdev); 338 scsi_device_resume(sdev);
305 return err; 339
340 /* favor LLD failure */
341 return err ? err : err2;;
306} 342}
307 343
308struct bus_type scsi_bus_type = { 344struct bus_type scsi_bus_type = {
309 .name = "scsi", 345 .name = "scsi",
310 .match = scsi_bus_match, 346 .match = scsi_bus_match,
347 .uevent = scsi_bus_uevent,
311 .suspend = scsi_bus_suspend, 348 .suspend = scsi_bus_suspend,
312 .resume = scsi_bus_resume, 349 .resume = scsi_bus_resume,
313}; 350};
@@ -547,6 +584,14 @@ show_sdev_iostat(iorequest_cnt);
547show_sdev_iostat(iodone_cnt); 584show_sdev_iostat(iodone_cnt);
548show_sdev_iostat(ioerr_cnt); 585show_sdev_iostat(ioerr_cnt);
549 586
587static ssize_t
588sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
589{
590 struct scsi_device *sdev;
591 sdev = to_scsi_device(dev);
592 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
593}
594static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
550 595
551/* Default template for device attributes. May NOT be modified */ 596/* Default template for device attributes. May NOT be modified */
552static struct device_attribute *scsi_sysfs_sdev_attrs[] = { 597static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
@@ -566,6 +611,7 @@ static struct device_attribute *scsi_sysfs_sdev_attrs[] = {
566 &dev_attr_iorequest_cnt, 611 &dev_attr_iorequest_cnt,
567 &dev_attr_iodone_cnt, 612 &dev_attr_iodone_cnt,
568 &dev_attr_ioerr_cnt, 613 &dev_attr_ioerr_cnt,
614 &dev_attr_modalias,
569 NULL 615 NULL
570}; 616};
571 617
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index 0e08817fdecf..ca22ddf81746 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -179,10 +179,12 @@ static int event_recv_msg(struct tgt_event *ev)
179 switch (ev->hdr.type) { 179 switch (ev->hdr.type) {
180 case TGT_UEVENT_CMD_RSP: 180 case TGT_UEVENT_CMD_RSP:
181 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no, 181 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
182 ev->p.cmd_rsp.tag,
183 ev->p.cmd_rsp.result, 182 ev->p.cmd_rsp.result,
184 ev->p.cmd_rsp.len, 183 ev->p.cmd_rsp.tag,
185 ev->p.cmd_rsp.uaddr, 184 ev->p.cmd_rsp.uaddr,
185 ev->p.cmd_rsp.len,
186 ev->p.cmd_rsp.sense_uaddr,
187 ev->p.cmd_rsp.sense_len,
186 ev->p.cmd_rsp.rw); 188 ev->p.cmd_rsp.rw);
187 break; 189 break;
188 case TGT_UEVENT_TSK_MGMT_RSP: 190 case TGT_UEVENT_TSK_MGMT_RSP:
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index d402aff5f314..2570f48a69c7 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -28,7 +28,6 @@
28#include <scsi/scsi_device.h> 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h> 30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32 31
33#include "scsi_tgt_priv.h" 32#include "scsi_tgt_priv.h"
34 33
@@ -42,16 +41,12 @@ static struct kmem_cache *scsi_tgt_cmd_cache;
42struct scsi_tgt_cmd { 41struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */ 42 /* TODO replace work with James b's code */
44 struct work_struct work; 43 struct work_struct work;
45 /* TODO replace the lists with a large bio */ 44 /* TODO fix limits of some drivers */
46 struct bio_list xfer_done_list; 45 struct bio *bio;
47 struct bio_list xfer_list;
48 46
49 struct list_head hash_list; 47 struct list_head hash_list;
50 struct request *rq; 48 struct request *rq;
51 u64 tag; 49 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55}; 50};
56 51
57#define TGT_HASH_ORDER 4 52#define TGT_HASH_ORDER 4
@@ -93,7 +88,12 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
93 if (!tcmd) 88 if (!tcmd)
94 goto put_dev; 89 goto put_dev;
95 90
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask); 91 /*
92 * The blk helpers are used to the READ/WRITE requests
93 * transfering data from a initiator point of view. Since
94 * we are in target mode we want the opposite.
95 */
96 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
97 if (!rq) 97 if (!rq)
98 goto free_tcmd; 98 goto free_tcmd;
99 99
@@ -111,8 +111,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC; 111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd; 112 rq->end_io_data = tcmd;
113 113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq; 114 tcmd->rq = rq;
117 115
118 return cmd; 116 return cmd;
@@ -157,22 +155,6 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
157} 155}
158EXPORT_SYMBOL_GPL(scsi_host_put_command); 156EXPORT_SYMBOL_GPL(scsi_host_put_command);
159 157
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd) 158static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{ 159{
178 struct request_queue *q = cmd->request->q; 160 struct request_queue *q = cmd->request->q;
@@ -185,6 +167,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd)
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); 167 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186} 168}
187 169
170static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
171{
172 blk_rq_unmap_user(tcmd->bio);
173}
174
188static void scsi_tgt_cmd_destroy(struct work_struct *work) 175static void scsi_tgt_cmd_destroy(struct work_struct *work)
189{ 176{
190 struct scsi_tgt_cmd *tcmd = 177 struct scsi_tgt_cmd *tcmd =
@@ -193,16 +180,6 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
193 180
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, 181 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request)); 182 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd); 183 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); 184 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208} 185}
@@ -215,6 +192,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
215 struct list_head *head; 192 struct list_head *head;
216 193
217 tcmd->tag = tag; 194 tcmd->tag = tag;
195 tcmd->bio = NULL;
218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); 196 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags); 197 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)]; 198 head = &qdata->cmd_hash[cmd_hashfn(tag)];
@@ -349,10 +327,14 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); 327 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350 328
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag); 329 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
330
331 if (cmd->request_buffer)
332 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
333
352 queue_work(scsi_tgtd, &tcmd->work); 334 queue_work(scsi_tgtd, &tcmd->work);
353} 335}
354 336
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd) 337static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{ 338{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); 339 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err; 340 int err;
@@ -365,30 +347,12 @@ static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
365 case SCSI_MLQUEUE_DEVICE_BUSY: 347 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN; 348 return -EAGAIN;
367 } 349 }
368
369 return 0; 350 return 0;
370} 351}
371 352
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) 353static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{ 354{
390 struct request *rq = cmd->request; 355 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count; 356 int count;
393 357
394 cmd->use_sg = rq->nr_phys_segments; 358 cmd->use_sg = rq->nr_phys_segments;
@@ -398,143 +362,54 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
398 362
399 cmd->request_bufflen = rq->data_len; 363 cmd->request_bufflen = rq->data_len;
400 364
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg, 365 dprintk("cmd %p cnt %d %lu\n", cmd, cmd->use_sg, rq_data_dir(rq));
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer); 366 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) { 367 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count; 368 cmd->use_sg = count;
406 return 0; 369 return 0;
407 } 370 }
408 371
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg); 372 eprintk("cmd %p cnt %d\n", cmd, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 373 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL; 374 return -EINVAL;
412} 375}
413 376
414/* TODO: test this crap and replace bio_map_user with new interface maybe */ 377/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, 378static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw) 379 unsigned long uaddr, unsigned int len, int rw)
417{ 380{
418 struct request_queue *q = cmd->request->q; 381 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request; 382 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err; 383 int err;
424 384
425 while (len > 0) { 385 dprintk("%lx %u\n", uaddr, len);
426 dprintk("%lx %u\n", (unsigned long) uaddr, len); 386 err = blk_rq_map_user(q, rq, (void *)uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw); 387 if (err) {
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /* 388 /*
439 * The first bio is added and merged. We could probably 389 * TODO: need to fixup sg_tablesize, max_segment_size,
440 * try to add others using scsi_merge_bio() but for now 390 * max_sectors, etc for modern HW and software drivers
441 * we keep it simple. The first bio should be pretty large 391 * where this value is bogus.
442 * (either hitting the 1 MB bio pages limit or a queue limit) 392 *
443 * already but for really large IO we may want to try and 393 * TODO2: we can alloc a reserve buffer of max size
444 * merge these. 394 * we can handle and do the slow copy path for really large
395 * IO.
445 */ 396 */
446 if (!rq->bio) { 397 eprintk("Could not handle request of size %u.\n", len);
447 blk_rq_bio_prep(q, rq, bio); 398 return err;
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 } 399 }
453 400
454 cmd->offset = 0; 401 tcmd->bio = rq->bio;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL); 402 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err) 403 if (err)
457 goto unmap_bios; 404 goto unmap_rq;
458 405
459 return 0; 406 return 0;
460 407
461unmap_bios: 408unmap_rq:
462 if (rq->bio) { 409 scsi_unmap_user_pages(tcmd);
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err; 410 return err;
469} 411}
470 412
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, 413static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len) 414 unsigned len)
540{ 415{
@@ -584,8 +459,9 @@ static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
584 return rq; 459 return rq;
585} 460}
586 461
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len, 462int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
588 unsigned long uaddr, u8 rw) 463 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
464 u32 sense_len, u8 rw)
589{ 465{
590 struct Scsi_Host *shost; 466 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd; 467 struct scsi_cmnd *cmd;
@@ -617,8 +493,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
617 } 493 }
618 cmd = rq->special; 494 cmd = rq->special;
619 495
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd, 496 dprintk("cmd %p scb %x result %d len %d bufflen %u %lu %x\n",
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]); 497 cmd, cmd->cmnd[0], result, len, cmd->request_bufflen,
498 rq_data_dir(rq), cmd->cmnd[0]);
622 499
623 if (result == TASK_ABORTED) { 500 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd); 501 scsi_tgt_abort_cmd(shost, cmd);
@@ -629,36 +506,36 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
629 * in the request_* values 506 * in the request_* values
630 */ 507 */
631 tcmd = cmd->request->end_io_data; 508 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result; 509 cmd->result = result;
635 510
636 if (!tcmd->bufflen || cmd->request_buffer) { 511 if (cmd->result == SAM_STAT_CHECK_CONDITION)
637 err = __scsi_tgt_transfer_response(cmd); 512 scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
638 goto done;
639 }
640 513
641 /* 514 if (len) {
642 * TODO: Do we need to handle case where request does not 515 err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
643 * align with LLD. 516 if (err) {
644 */ 517 /*
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw); 518 * user-space daemon bugs or OOM
646 if (err) { 519 * TODO: we can do better for OOM.
647 eprintk("%p %d\n", cmd, err); 520 */
648 err = -EAGAIN; 521 struct scsi_tgt_queuedata *qdata;
649 goto done; 522 struct list_head *head;
650 } 523 unsigned long flags;
651 524
652 /* userspace failure */ 525 eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
653 if (cmd->result) { 526 cmd, err, uaddr, len, rw);
654 if (status_byte(cmd->result) == CHECK_CONDITION) 527
655 scsi_tgt_copy_sense(cmd, uaddr, len); 528 qdata = shost->uspace_req_q->queuedata;
656 err = __scsi_tgt_transfer_response(cmd); 529 head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];
657 goto done; 530
658 } 531 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
659 /* ask the target LLD to transfer the data to the buffer */ 532 list_add(&tcmd->hash_list, head);
660 err = scsi_tgt_transfer_data(cmd); 533 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
661 534
535 goto done;
536 }
537 }
538 err = scsi_tgt_transfer_response(cmd);
662done: 539done:
663 scsi_host_put(shost); 540 scsi_host_put(shost);
664 return err; 541 return err;
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index 84488c51ff62..e9e6db1c417f 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -18,8 +18,9 @@ extern int scsi_tgt_if_init(void);
18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, 18extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
19 u64 tag); 19 u64 tag);
20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag); 20extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
21extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len, 21extern int scsi_tgt_kspace_exec(int host_no, int result, u64 tag,
22 unsigned long uaddr, u8 rw); 22 unsigned long uaddr, u32 len, unsigned long sense_uaddr,
23 u32 sense_len, u8 rw);
23extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag, 24extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
24 struct scsi_lun *scsilun, void *data); 25 struct scsi_lun *scsilun, void *data);
25extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result); 26extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 58afdb401703..14c4f065b2b8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -200,6 +200,8 @@ static const struct {
200 { FC_PORTSPEED_2GBIT, "2 Gbit" }, 200 { FC_PORTSPEED_2GBIT, "2 Gbit" },
201 { FC_PORTSPEED_4GBIT, "4 Gbit" }, 201 { FC_PORTSPEED_4GBIT, "4 Gbit" },
202 { FC_PORTSPEED_10GBIT, "10 Gbit" }, 202 { FC_PORTSPEED_10GBIT, "10 Gbit" },
203 { FC_PORTSPEED_8GBIT, "8 Gbit" },
204 { FC_PORTSPEED_16GBIT, "16 Gbit" },
203 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 205 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
204}; 206};
205fc_bitfield_name_search(port_speed, fc_port_speed_names) 207fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index aabaa0576ab4..caf1836bbeca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -49,7 +49,7 @@ struct iscsi_internal {
49 struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; 49 struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
50}; 50};
51 51
52static int iscsi_session_nr; /* sysfs session id for next new session */ 52static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53 53
54/* 54/*
55 * list of registered transports and lock that must 55 * list of registered transports and lock that must
@@ -300,7 +300,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
300 int err; 300 int err;
301 301
302 ihost = shost->shost_data; 302 ihost = shost->shost_data;
303 session->sid = iscsi_session_nr++; 303 session->sid = atomic_add_return(1, &iscsi_session_nr);
304 session->target_id = target_id; 304 session->target_id = target_id;
305 305
306 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 306 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
@@ -1419,6 +1419,8 @@ static __init int iscsi_transport_init(void)
1419 printk(KERN_INFO "Loading iSCSI transport class v%s.\n", 1419 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
1420 ISCSI_TRANSPORT_VERSION); 1420 ISCSI_TRANSPORT_VERSION);
1421 1421
1422 atomic_set(&iscsi_session_nr, 0);
1423
1422 err = class_register(&iscsi_transport_class); 1424 err = class_register(&iscsi_transport_class);
1423 if (err) 1425 if (err)
1424 return err; 1426 return err;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a8f55fea5ff..00e46662296f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,16 +58,10 @@
58#include <scsi/scsi_host.h> 58#include <scsi/scsi_host.h>
59#include <scsi/scsi_ioctl.h> 59#include <scsi/scsi_ioctl.h>
60#include <scsi/scsicam.h> 60#include <scsi/scsicam.h>
61#include <scsi/sd.h>
61 62
62#include "scsi_logging.h" 63#include "scsi_logging.h"
63 64
64/*
65 * More than enough for everybody ;) The huge number of majors
66 * is a leftover from 16bit dev_t days, we don't really need that
67 * much numberspace.
68 */
69#define SD_MAJORS 16
70
71MODULE_AUTHOR("Eric Youngdale"); 65MODULE_AUTHOR("Eric Youngdale");
72MODULE_DESCRIPTION("SCSI disk (sd) driver"); 66MODULE_DESCRIPTION("SCSI disk (sd) driver");
73MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
@@ -88,45 +82,9 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR); 82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR); 83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
90MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR); 84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
91 85MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
92/* 86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
93 * This is limited by the naming scheme enforced in sd_probe, 87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
94 * add another character to it if you really need more disks.
95 */
96#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
97
98/*
99 * Time out in seconds for disks and Magneto-opticals (which are slower).
100 */
101#define SD_TIMEOUT (30 * HZ)
102#define SD_MOD_TIMEOUT (75 * HZ)
103
104/*
105 * Number of allowed retries
106 */
107#define SD_MAX_RETRIES 5
108#define SD_PASSTHROUGH_RETRIES 1
109
110/*
111 * Size of the initial data buffer for mode and read capacity data
112 */
113#define SD_BUF_SIZE 512
114
115struct scsi_disk {
116 struct scsi_driver *driver; /* always &sd_template */
117 struct scsi_device *device;
118 struct class_device cdev;
119 struct gendisk *disk;
120 unsigned int openers; /* protected by BKL for now, yuck */
121 sector_t capacity; /* size in 512-byte sectors */
122 u32 index;
123 u8 media_present;
124 u8 write_prot;
125 unsigned WCE : 1; /* state of disk WCE bit */
126 unsigned RCD : 1; /* state of disk RCD bit, unused */
127 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
128};
129#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,cdev)
130 88
131static DEFINE_IDR(sd_index_idr); 89static DEFINE_IDR(sd_index_idr);
132static DEFINE_SPINLOCK(sd_index_lock); 90static DEFINE_SPINLOCK(sd_index_lock);
@@ -136,20 +94,6 @@ static DEFINE_SPINLOCK(sd_index_lock);
136 * object after last put) */ 94 * object after last put) */
137static DEFINE_MUTEX(sd_ref_mutex); 95static DEFINE_MUTEX(sd_ref_mutex);
138 96
139static int sd_revalidate_disk(struct gendisk *disk);
140static void sd_rw_intr(struct scsi_cmnd * SCpnt);
141
142static int sd_probe(struct device *);
143static int sd_remove(struct device *);
144static void sd_shutdown(struct device *dev);
145static void sd_rescan(struct device *);
146static int sd_init_command(struct scsi_cmnd *);
147static int sd_issue_flush(struct device *, sector_t *);
148static void sd_prepare_flush(request_queue_t *, struct request *);
149static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
150 unsigned char *buffer);
151static void scsi_disk_release(struct class_device *cdev);
152
153static const char *sd_cache_types[] = { 97static const char *sd_cache_types[] = {
154 "write through", "none", "write back", 98 "write through", "none", "write back",
155 "write back, no read (daft)" 99 "write back, no read (daft)"
@@ -199,13 +143,27 @@ static ssize_t sd_store_cache_type(struct class_device *cdev, const char *buf,
199 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 143 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
200 SD_MAX_RETRIES, &data, &sshdr)) { 144 SD_MAX_RETRIES, &data, &sshdr)) {
201 if (scsi_sense_valid(&sshdr)) 145 if (scsi_sense_valid(&sshdr))
202 scsi_print_sense_hdr(sdkp->disk->disk_name, &sshdr); 146 sd_print_sense_hdr(sdkp, &sshdr);
203 return -EINVAL; 147 return -EINVAL;
204 } 148 }
205 sd_revalidate_disk(sdkp->disk); 149 sd_revalidate_disk(sdkp->disk);
206 return count; 150 return count;
207} 151}
208 152
153static ssize_t sd_store_manage_start_stop(struct class_device *cdev,
154 const char *buf, size_t count)
155{
156 struct scsi_disk *sdkp = to_scsi_disk(cdev);
157 struct scsi_device *sdp = sdkp->device;
158
159 if (!capable(CAP_SYS_ADMIN))
160 return -EACCES;
161
162 sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
163
164 return count;
165}
166
209static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf, 167static ssize_t sd_store_allow_restart(struct class_device *cdev, const char *buf,
210 size_t count) 168 size_t count)
211{ 169{
@@ -238,6 +196,14 @@ static ssize_t sd_show_fua(struct class_device *cdev, char *buf)
238 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); 196 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
239} 197}
240 198
199static ssize_t sd_show_manage_start_stop(struct class_device *cdev, char *buf)
200{
201 struct scsi_disk *sdkp = to_scsi_disk(cdev);
202 struct scsi_device *sdp = sdkp->device;
203
204 return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
205}
206
241static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf) 207static ssize_t sd_show_allow_restart(struct class_device *cdev, char *buf)
242{ 208{
243 struct scsi_disk *sdkp = to_scsi_disk(cdev); 209 struct scsi_disk *sdkp = to_scsi_disk(cdev);
@@ -251,6 +217,8 @@ static struct class_device_attribute sd_disk_attrs[] = {
251 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), 217 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
252 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart, 218 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
253 sd_store_allow_restart), 219 sd_store_allow_restart),
220 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
221 sd_store_manage_start_stop),
254 __ATTR_NULL, 222 __ATTR_NULL,
255}; 223};
256 224
@@ -267,6 +235,8 @@ static struct scsi_driver sd_template = {
267 .name = "sd", 235 .name = "sd",
268 .probe = sd_probe, 236 .probe = sd_probe,
269 .remove = sd_remove, 237 .remove = sd_remove,
238 .suspend = sd_suspend,
239 .resume = sd_resume,
270 .shutdown = sd_shutdown, 240 .shutdown = sd_shutdown,
271 }, 241 },
272 .rescan = sd_rescan, 242 .rescan = sd_rescan,
@@ -371,15 +341,19 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
371 unsigned int this_count = SCpnt->request_bufflen >> 9; 341 unsigned int this_count = SCpnt->request_bufflen >> 9;
372 unsigned int timeout = sdp->timeout; 342 unsigned int timeout = sdp->timeout;
373 343
374 SCSI_LOG_HLQUEUE(1, printk("sd_init_command: disk=%s, block=%llu, " 344 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
375 "count=%d\n", disk->disk_name, 345 "sd_init_command: block=%llu, "
376 (unsigned long long)block, this_count)); 346 "count=%d\n",
347 (unsigned long long)block,
348 this_count));
377 349
378 if (!sdp || !scsi_device_online(sdp) || 350 if (!sdp || !scsi_device_online(sdp) ||
379 block + rq->nr_sectors > get_capacity(disk)) { 351 block + rq->nr_sectors > get_capacity(disk)) {
380 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 352 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
381 rq->nr_sectors)); 353 "Finishing %ld sectors\n",
382 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 354 rq->nr_sectors));
355 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
356 "Retry with 0x%p\n", SCpnt));
383 return 0; 357 return 0;
384 } 358 }
385 359
@@ -391,8 +365,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
391 /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ 365 /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
392 return 0; 366 return 0;
393 } 367 }
394 SCSI_LOG_HLQUEUE(2, printk("%s : block=%llu\n", 368 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
395 disk->disk_name, (unsigned long long)block)); 369 (unsigned long long)block));
396 370
397 /* 371 /*
398 * If we have a 1K hardware sectorsize, prevent access to single 372 * If we have a 1K hardware sectorsize, prevent access to single
@@ -407,7 +381,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
407 */ 381 */
408 if (sdp->sector_size == 1024) { 382 if (sdp->sector_size == 1024) {
409 if ((block & 1) || (rq->nr_sectors & 1)) { 383 if ((block & 1) || (rq->nr_sectors & 1)) {
410 printk(KERN_ERR "sd: Bad block number requested"); 384 scmd_printk(KERN_ERR, SCpnt,
385 "Bad block number requested\n");
411 return 0; 386 return 0;
412 } else { 387 } else {
413 block = block >> 1; 388 block = block >> 1;
@@ -416,7 +391,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
416 } 391 }
417 if (sdp->sector_size == 2048) { 392 if (sdp->sector_size == 2048) {
418 if ((block & 3) || (rq->nr_sectors & 3)) { 393 if ((block & 3) || (rq->nr_sectors & 3)) {
419 printk(KERN_ERR "sd: Bad block number requested"); 394 scmd_printk(KERN_ERR, SCpnt,
395 "Bad block number requested\n");
420 return 0; 396 return 0;
421 } else { 397 } else {
422 block = block >> 2; 398 block = block >> 2;
@@ -425,7 +401,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
425 } 401 }
426 if (sdp->sector_size == 4096) { 402 if (sdp->sector_size == 4096) {
427 if ((block & 7) || (rq->nr_sectors & 7)) { 403 if ((block & 7) || (rq->nr_sectors & 7)) {
428 printk(KERN_ERR "sd: Bad block number requested"); 404 scmd_printk(KERN_ERR, SCpnt,
405 "Bad block number requested\n");
429 return 0; 406 return 0;
430 } else { 407 } else {
431 block = block >> 3; 408 block = block >> 3;
@@ -442,13 +419,15 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
442 SCpnt->cmnd[0] = READ_6; 419 SCpnt->cmnd[0] = READ_6;
443 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 420 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
444 } else { 421 } else {
445 printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags); 422 scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
446 return 0; 423 return 0;
447 } 424 }
448 425
449 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
450 disk->disk_name, (rq_data_dir(rq) == WRITE) ? 427 "%s %d/%ld 512 byte blocks.\n",
451 "writing" : "reading", this_count, rq->nr_sectors)); 428 (rq_data_dir(rq) == WRITE) ?
429 "writing" : "reading", this_count,
430 rq->nr_sectors));
452 431
453 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
454 433
@@ -490,7 +469,8 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
490 * during operation and thus turned off 469 * during operation and thus turned off
491 * use_10_for_rw. 470 * use_10_for_rw.
492 */ 471 */
493 printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n"); 472 scmd_printk(KERN_ERR, SCpnt,
473 "FUA write on READ/WRITE(6) drive\n");
494 return 0; 474 return 0;
495 } 475 }
496 476
@@ -549,7 +529,7 @@ static int sd_open(struct inode *inode, struct file *filp)
549 return -ENXIO; 529 return -ENXIO;
550 530
551 531
552 SCSI_LOG_HLQUEUE(3, printk("sd_open: disk=%s\n", disk->disk_name)); 532 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
553 533
554 sdev = sdkp->device; 534 sdev = sdkp->device;
555 535
@@ -619,7 +599,7 @@ static int sd_release(struct inode *inode, struct file *filp)
619 struct scsi_disk *sdkp = scsi_disk(disk); 599 struct scsi_disk *sdkp = scsi_disk(disk);
620 struct scsi_device *sdev = sdkp->device; 600 struct scsi_device *sdev = sdkp->device;
621 601
622 SCSI_LOG_HLQUEUE(3, printk("sd_release: disk=%s\n", disk->disk_name)); 602 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
623 603
624 if (!--sdkp->openers && sdev->removable) { 604 if (!--sdkp->openers && sdev->removable) {
625 if (scsi_block_when_processing_errors(sdev)) 605 if (scsi_block_when_processing_errors(sdev))
@@ -732,8 +712,7 @@ static int sd_media_changed(struct gendisk *disk)
732 struct scsi_device *sdp = sdkp->device; 712 struct scsi_device *sdp = sdkp->device;
733 int retval; 713 int retval;
734 714
735 SCSI_LOG_HLQUEUE(3, printk("sd_media_changed: disk=%s\n", 715 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
736 disk->disk_name));
737 716
738 if (!sdp->removable) 717 if (!sdp->removable)
739 return 0; 718 return 0;
@@ -786,9 +765,10 @@ not_present:
786 return 1; 765 return 1;
787} 766}
788 767
789static int sd_sync_cache(struct scsi_device *sdp) 768static int sd_sync_cache(struct scsi_disk *sdkp)
790{ 769{
791 int retries, res; 770 int retries, res;
771 struct scsi_device *sdp = sdkp->device;
792 struct scsi_sense_hdr sshdr; 772 struct scsi_sense_hdr sshdr;
793 773
794 if (!scsi_device_online(sdp)) 774 if (!scsi_device_online(sdp))
@@ -809,28 +789,27 @@ static int sd_sync_cache(struct scsi_device *sdp)
809 break; 789 break;
810 } 790 }
811 791
812 if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, " 792 if (res) {
813 "host = %d, driver = %02x\n ", 793 sd_print_result(sdkp, res);
814 status_byte(res), msg_byte(res), 794 if (driver_byte(res) & DRIVER_SENSE)
815 host_byte(res), driver_byte(res)); 795 sd_print_sense_hdr(sdkp, &sshdr);
816 if (driver_byte(res) & DRIVER_SENSE)
817 scsi_print_sense_hdr("sd", &sshdr);
818 } 796 }
819 797
820 return res; 798 if (res)
799 return -EIO;
800 return 0;
821} 801}
822 802
823static int sd_issue_flush(struct device *dev, sector_t *error_sector) 803static int sd_issue_flush(struct device *dev, sector_t *error_sector)
824{ 804{
825 int ret = 0; 805 int ret = 0;
826 struct scsi_device *sdp = to_scsi_device(dev);
827 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 806 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
828 807
829 if (!sdkp) 808 if (!sdkp)
830 return -ENODEV; 809 return -ENODEV;
831 810
832 if (sdkp->WCE) 811 if (sdkp->WCE)
833 ret = sd_sync_cache(sdp); 812 ret = sd_sync_cache(sdkp);
834 scsi_disk_put(sdkp); 813 scsi_disk_put(sdkp);
835 return ret; 814 return ret;
836} 815}
@@ -928,12 +907,14 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
928 sense_deferred = scsi_sense_is_deferred(&sshdr); 907 sense_deferred = scsi_sense_is_deferred(&sshdr);
929 } 908 }
930#ifdef CONFIG_SCSI_LOGGING 909#ifdef CONFIG_SCSI_LOGGING
931 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: %s: res=0x%x\n", 910 SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
932 SCpnt->request->rq_disk->disk_name, result));
933 if (sense_valid) { 911 if (sense_valid) {
934 SCSI_LOG_HLCOMPLETE(1, printk("sd_rw_intr: sb[respc,sk,asc," 912 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
935 "ascq]=%x,%x,%x,%x\n", sshdr.response_code, 913 "sd_rw_intr: sb[respc,sk,asc,"
936 sshdr.sense_key, sshdr.asc, sshdr.ascq)); 914 "ascq]=%x,%x,%x,%x\n",
915 sshdr.response_code,
916 sshdr.sense_key, sshdr.asc,
917 sshdr.ascq));
937 } 918 }
938#endif 919#endif
939 if (driver_byte(result) != DRIVER_SENSE && 920 if (driver_byte(result) != DRIVER_SENSE &&
@@ -1025,7 +1006,7 @@ static int media_not_present(struct scsi_disk *sdkp,
1025 * spinup disk - called only in sd_revalidate_disk() 1006 * spinup disk - called only in sd_revalidate_disk()
1026 */ 1007 */
1027static void 1008static void
1028sd_spinup_disk(struct scsi_disk *sdkp, char *diskname) 1009sd_spinup_disk(struct scsi_disk *sdkp)
1029{ 1010{
1030 unsigned char cmd[10]; 1011 unsigned char cmd[10];
1031 unsigned long spintime_expire = 0; 1012 unsigned long spintime_expire = 0;
@@ -1069,9 +1050,10 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1069 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { 1050 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1070 /* no sense, TUR either succeeded or failed 1051 /* no sense, TUR either succeeded or failed
1071 * with a status error */ 1052 * with a status error */
1072 if(!spintime && !scsi_status_is_good(the_result)) 1053 if(!spintime && !scsi_status_is_good(the_result)) {
1073 printk(KERN_NOTICE "%s: Unit Not Ready, " 1054 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1074 "error = 0x%x\n", diskname, the_result); 1055 sd_print_result(sdkp, the_result);
1056 }
1075 break; 1057 break;
1076 } 1058 }
1077 1059
@@ -1096,8 +1078,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1096 */ 1078 */
1097 } else if (sense_valid && sshdr.sense_key == NOT_READY) { 1079 } else if (sense_valid && sshdr.sense_key == NOT_READY) {
1098 if (!spintime) { 1080 if (!spintime) {
1099 printk(KERN_NOTICE "%s: Spinning up disk...", 1081 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
1100 diskname);
1101 cmd[0] = START_STOP; 1082 cmd[0] = START_STOP;
1102 cmd[1] = 1; /* Return immediately */ 1083 cmd[1] = 1; /* Return immediately */
1103 memset((void *) &cmd[2], 0, 8); 1084 memset((void *) &cmd[2], 0, 8);
@@ -1130,9 +1111,8 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1130 /* we don't understand the sense code, so it's 1111 /* we don't understand the sense code, so it's
1131 * probably pointless to loop */ 1112 * probably pointless to loop */
1132 if(!spintime) { 1113 if(!spintime) {
1133 printk(KERN_NOTICE "%s: Unit Not Ready, " 1114 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1134 "sense:\n", diskname); 1115 sd_print_sense_hdr(sdkp, &sshdr);
1135 scsi_print_sense_hdr("", &sshdr);
1136 } 1116 }
1137 break; 1117 break;
1138 } 1118 }
@@ -1151,8 +1131,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
1151 * read disk capacity 1131 * read disk capacity
1152 */ 1132 */
1153static void 1133static void
1154sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 1134sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1155 unsigned char *buffer)
1156{ 1135{
1157 unsigned char cmd[16]; 1136 unsigned char cmd[16];
1158 int the_result, retries; 1137 int the_result, retries;
@@ -1191,18 +1170,12 @@ repeat:
1191 } while (the_result && retries); 1170 } while (the_result && retries);
1192 1171
1193 if (the_result && !longrc) { 1172 if (the_result && !longrc) {
1194 printk(KERN_NOTICE "%s : READ CAPACITY failed.\n" 1173 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
1195 "%s : status=%x, message=%02x, host=%d, driver=%02x \n", 1174 sd_print_result(sdkp, the_result);
1196 diskname, diskname,
1197 status_byte(the_result),
1198 msg_byte(the_result),
1199 host_byte(the_result),
1200 driver_byte(the_result));
1201
1202 if (driver_byte(the_result) & DRIVER_SENSE) 1175 if (driver_byte(the_result) & DRIVER_SENSE)
1203 scsi_print_sense_hdr("sd", &sshdr); 1176 sd_print_sense_hdr(sdkp, &sshdr);
1204 else 1177 else
1205 printk("%s : sense not available. \n", diskname); 1178 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1206 1179
1207 /* Set dirty bit for removable devices if not ready - 1180 /* Set dirty bit for removable devices if not ready -
1208 * sometimes drives will not report this properly. */ 1181 * sometimes drives will not report this properly. */
@@ -1218,16 +1191,10 @@ repeat:
1218 return; 1191 return;
1219 } else if (the_result && longrc) { 1192 } else if (the_result && longrc) {
1220 /* READ CAPACITY(16) has been failed */ 1193 /* READ CAPACITY(16) has been failed */
1221 printk(KERN_NOTICE "%s : READ CAPACITY(16) failed.\n" 1194 sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1222 "%s : status=%x, message=%02x, host=%d, driver=%02x \n", 1195 sd_print_result(sdkp, the_result);
1223 diskname, diskname, 1196 sd_printk(KERN_NOTICE, sdkp, "Use 0xffffffff as device size\n");
1224 status_byte(the_result), 1197
1225 msg_byte(the_result),
1226 host_byte(the_result),
1227 driver_byte(the_result));
1228 printk(KERN_NOTICE "%s : use 0xffffffff as device size\n",
1229 diskname);
1230
1231 sdkp->capacity = 1 + (sector_t) 0xffffffff; 1198 sdkp->capacity = 1 + (sector_t) 0xffffffff;
1232 goto got_data; 1199 goto got_data;
1233 } 1200 }
@@ -1238,14 +1205,14 @@ repeat:
1238 if (buffer[0] == 0xff && buffer[1] == 0xff && 1205 if (buffer[0] == 0xff && buffer[1] == 0xff &&
1239 buffer[2] == 0xff && buffer[3] == 0xff) { 1206 buffer[2] == 0xff && buffer[3] == 0xff) {
1240 if(sizeof(sdkp->capacity) > 4) { 1207 if(sizeof(sdkp->capacity) > 4) {
1241 printk(KERN_NOTICE "%s : very big device. try to use" 1208 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
1242 " READ CAPACITY(16).\n", diskname); 1209 "Trying to use READ CAPACITY(16).\n");
1243 longrc = 1; 1210 longrc = 1;
1244 goto repeat; 1211 goto repeat;
1245 } 1212 }
1246 printk(KERN_ERR "%s: too big for this kernel. Use a " 1213 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use "
1247 "kernel compiled with support for large block " 1214 "a kernel compiled with support for large "
1248 "devices.\n", diskname); 1215 "block devices.\n");
1249 sdkp->capacity = 0; 1216 sdkp->capacity = 0;
1250 goto got_data; 1217 goto got_data;
1251 } 1218 }
@@ -1284,8 +1251,8 @@ repeat:
1284got_data: 1251got_data:
1285 if (sector_size == 0) { 1252 if (sector_size == 0) {
1286 sector_size = 512; 1253 sector_size = 512;
1287 printk(KERN_NOTICE "%s : sector size 0 reported, " 1254 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
1288 "assuming 512.\n", diskname); 1255 "assuming 512.\n");
1289 } 1256 }
1290 1257
1291 if (sector_size != 512 && 1258 if (sector_size != 512 &&
@@ -1293,8 +1260,8 @@ got_data:
1293 sector_size != 2048 && 1260 sector_size != 2048 &&
1294 sector_size != 4096 && 1261 sector_size != 4096 &&
1295 sector_size != 256) { 1262 sector_size != 256) {
1296 printk(KERN_NOTICE "%s : unsupported sector size " 1263 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
1297 "%d.\n", diskname, sector_size); 1264 sector_size);
1298 /* 1265 /*
1299 * The user might want to re-format the drive with 1266 * The user might want to re-format the drive with
1300 * a supported sectorsize. Once this happens, it 1267 * a supported sectorsize. Once this happens, it
@@ -1327,10 +1294,10 @@ got_data:
1327 mb -= sz - 974; 1294 mb -= sz - 974;
1328 sector_div(mb, 1950); 1295 sector_div(mb, 1950);
1329 1296
1330 printk(KERN_NOTICE "SCSI device %s: " 1297 sd_printk(KERN_NOTICE, sdkp,
1331 "%llu %d-byte hdwr sectors (%llu MB)\n", 1298 "%llu %d-byte hardware sectors (%llu MB)\n",
1332 diskname, (unsigned long long)sdkp->capacity, 1299 (unsigned long long)sdkp->capacity,
1333 hard_sector, (unsigned long long)mb); 1300 hard_sector, (unsigned long long)mb);
1334 } 1301 }
1335 1302
1336 /* Rescale capacity to 512-byte units */ 1303 /* Rescale capacity to 512-byte units */
@@ -1362,8 +1329,7 @@ sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1362 * called with buffer of length SD_BUF_SIZE 1329 * called with buffer of length SD_BUF_SIZE
1363 */ 1330 */
1364static void 1331static void
1365sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname, 1332sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1366 unsigned char *buffer)
1367{ 1333{
1368 int res; 1334 int res;
1369 struct scsi_device *sdp = sdkp->device; 1335 struct scsi_device *sdp = sdkp->device;
@@ -1371,7 +1337,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1371 1337
1372 set_disk_ro(sdkp->disk, 0); 1338 set_disk_ro(sdkp->disk, 0);
1373 if (sdp->skip_ms_page_3f) { 1339 if (sdp->skip_ms_page_3f) {
1374 printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname); 1340 sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
1375 return; 1341 return;
1376 } 1342 }
1377 1343
@@ -1403,15 +1369,16 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1403 } 1369 }
1404 1370
1405 if (!scsi_status_is_good(res)) { 1371 if (!scsi_status_is_good(res)) {
1406 printk(KERN_WARNING 1372 sd_printk(KERN_WARNING, sdkp,
1407 "%s: test WP failed, assume Write Enabled\n", diskname); 1373 "Test WP failed, assume Write Enabled\n");
1408 } else { 1374 } else {
1409 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 1375 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
1410 set_disk_ro(sdkp->disk, sdkp->write_prot); 1376 set_disk_ro(sdkp->disk, sdkp->write_prot);
1411 printk(KERN_NOTICE "%s: Write Protect is %s\n", diskname, 1377 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
1412 sdkp->write_prot ? "on" : "off"); 1378 sdkp->write_prot ? "on" : "off");
1413 printk(KERN_DEBUG "%s: Mode Sense: %02x %02x %02x %02x\n", 1379 sd_printk(KERN_DEBUG, sdkp,
1414 diskname, buffer[0], buffer[1], buffer[2], buffer[3]); 1380 "Mode Sense: %02x %02x %02x %02x\n",
1381 buffer[0], buffer[1], buffer[2], buffer[3]);
1415 } 1382 }
1416} 1383}
1417 1384
@@ -1420,8 +1387,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
1420 * called with buffer of length SD_BUF_SIZE 1387 * called with buffer of length SD_BUF_SIZE
1421 */ 1388 */
1422static void 1389static void
1423sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, 1390sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1424 unsigned char *buffer)
1425{ 1391{
1426 int len = 0, res; 1392 int len = 0, res;
1427 struct scsi_device *sdp = sdkp->device; 1393 struct scsi_device *sdp = sdkp->device;
@@ -1450,8 +1416,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1450 1416
1451 if (!data.header_length) { 1417 if (!data.header_length) {
1452 modepage = 6; 1418 modepage = 6;
1453 printk(KERN_ERR "%s: missing header in MODE_SENSE response\n", 1419 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
1454 diskname);
1455 } 1420 }
1456 1421
1457 /* that went OK, now ask for the proper length */ 1422 /* that went OK, now ask for the proper length */
@@ -1478,13 +1443,12 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1478 int offset = data.header_length + data.block_descriptor_length; 1443 int offset = data.header_length + data.block_descriptor_length;
1479 1444
1480 if (offset >= SD_BUF_SIZE - 2) { 1445 if (offset >= SD_BUF_SIZE - 2) {
1481 printk(KERN_ERR "%s: malformed MODE SENSE response", 1446 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
1482 diskname);
1483 goto defaults; 1447 goto defaults;
1484 } 1448 }
1485 1449
1486 if ((buffer[offset] & 0x3f) != modepage) { 1450 if ((buffer[offset] & 0x3f) != modepage) {
1487 printk(KERN_ERR "%s: got wrong page\n", diskname); 1451 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
1488 goto defaults; 1452 goto defaults;
1489 } 1453 }
1490 1454
@@ -1498,14 +1462,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
1498 1462
1499 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 1463 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1500 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 1464 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1501 printk(KERN_NOTICE "SCSI device %s: uses " 1465 sd_printk(KERN_NOTICE, sdkp,
1502 "READ/WRITE(6), disabling FUA\n", diskname); 1466 "Uses READ/WRITE(6), disabling FUA\n");
1503 sdkp->DPOFUA = 0; 1467 sdkp->DPOFUA = 0;
1504 } 1468 }
1505 1469
1506 printk(KERN_NOTICE "SCSI device %s: " 1470 sd_printk(KERN_NOTICE, sdkp,
1507 "write cache: %s, read cache: %s, %s\n", 1471 "Write cache: %s, read cache: %s, %s\n",
1508 diskname,
1509 sdkp->WCE ? "enabled" : "disabled", 1472 sdkp->WCE ? "enabled" : "disabled",
1510 sdkp->RCD ? "disabled" : "enabled", 1473 sdkp->RCD ? "disabled" : "enabled",
1511 sdkp->DPOFUA ? "supports DPO and FUA" 1474 sdkp->DPOFUA ? "supports DPO and FUA"
@@ -1518,15 +1481,13 @@ bad_sense:
1518 if (scsi_sense_valid(&sshdr) && 1481 if (scsi_sense_valid(&sshdr) &&
1519 sshdr.sense_key == ILLEGAL_REQUEST && 1482 sshdr.sense_key == ILLEGAL_REQUEST &&
1520 sshdr.asc == 0x24 && sshdr.ascq == 0x0) 1483 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1521 printk(KERN_NOTICE "%s: cache data unavailable\n", 1484 /* Invalid field in CDB */
1522 diskname); /* Invalid field in CDB */ 1485 sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
1523 else 1486 else
1524 printk(KERN_ERR "%s: asking for cache data failed\n", 1487 sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
1525 diskname);
1526 1488
1527defaults: 1489defaults:
1528 printk(KERN_ERR "%s: assuming drive cache: write through\n", 1490 sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
1529 diskname);
1530 sdkp->WCE = 0; 1491 sdkp->WCE = 0;
1531 sdkp->RCD = 0; 1492 sdkp->RCD = 0;
1532 sdkp->DPOFUA = 0; 1493 sdkp->DPOFUA = 0;
@@ -1544,7 +1505,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1544 unsigned char *buffer; 1505 unsigned char *buffer;
1545 unsigned ordered; 1506 unsigned ordered;
1546 1507
1547 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1508 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
1509 "sd_revalidate_disk\n"));
1548 1510
1549 /* 1511 /*
1550 * If the device is offline, don't try and read capacity or any 1512 * If the device is offline, don't try and read capacity or any
@@ -1555,8 +1517,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
1555 1517
1556 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA); 1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
1557 if (!buffer) { 1519 if (!buffer) {
1558 printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation " 1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
1559 "failure.\n"); 1521 "allocation failure.\n");
1560 goto out; 1522 goto out;
1561 } 1523 }
1562 1524
@@ -1568,16 +1530,16 @@ static int sd_revalidate_disk(struct gendisk *disk)
1568 sdkp->WCE = 0; 1530 sdkp->WCE = 0;
1569 sdkp->RCD = 0; 1531 sdkp->RCD = 0;
1570 1532
1571 sd_spinup_disk(sdkp, disk->disk_name); 1533 sd_spinup_disk(sdkp);
1572 1534
1573 /* 1535 /*
1574 * Without media there is no reason to ask; moreover, some devices 1536 * Without media there is no reason to ask; moreover, some devices
1575 * react badly if we do. 1537 * react badly if we do.
1576 */ 1538 */
1577 if (sdkp->media_present) { 1539 if (sdkp->media_present) {
1578 sd_read_capacity(sdkp, disk->disk_name, buffer); 1540 sd_read_capacity(sdkp, buffer);
1579 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); 1541 sd_read_write_protect_flag(sdkp, buffer);
1580 sd_read_cache_type(sdkp, disk->disk_name, buffer); 1542 sd_read_cache_type(sdkp, buffer);
1581 } 1543 }
1582 1544
1583 /* 1545 /*
@@ -1709,8 +1671,8 @@ static int sd_probe(struct device *dev)
1709 dev_set_drvdata(dev, sdkp); 1671 dev_set_drvdata(dev, sdkp);
1710 add_disk(gd); 1672 add_disk(gd);
1711 1673
1712 sdev_printk(KERN_NOTICE, sdp, "Attached scsi %sdisk %s\n", 1674 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1713 sdp->removable ? "removable " : "", gd->disk_name); 1675 sdp->removable ? "removable " : "");
1714 1676
1715 return 0; 1677 return 0;
1716 1678
@@ -1774,6 +1736,31 @@ static void scsi_disk_release(struct class_device *cdev)
1774 kfree(sdkp); 1736 kfree(sdkp);
1775} 1737}
1776 1738
1739static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
1740{
1741 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
1742 struct scsi_sense_hdr sshdr;
1743 struct scsi_device *sdp = sdkp->device;
1744 int res;
1745
1746 if (start)
1747 cmd[4] |= 1; /* START */
1748
1749 if (!scsi_device_online(sdp))
1750 return -ENODEV;
1751
1752 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1753 SD_TIMEOUT, SD_MAX_RETRIES);
1754 if (res) {
1755 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
1756 sd_print_result(sdkp, res);
1757 if (driver_byte(res) & DRIVER_SENSE)
1758 sd_print_sense_hdr(sdkp, &sshdr);
1759 }
1760
1761 return res;
1762}
1763
1777/* 1764/*
1778 * Send a SYNCHRONIZE CACHE instruction down to the device through 1765 * Send a SYNCHRONIZE CACHE instruction down to the device through
1779 * the normal SCSI command structure. Wait for the command to 1766 * the normal SCSI command structure. Wait for the command to
@@ -1781,20 +1768,62 @@ static void scsi_disk_release(struct class_device *cdev)
1781 */ 1768 */
1782static void sd_shutdown(struct device *dev) 1769static void sd_shutdown(struct device *dev)
1783{ 1770{
1784 struct scsi_device *sdp = to_scsi_device(dev);
1785 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 1771 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1786 1772
1787 if (!sdkp) 1773 if (!sdkp)
1788 return; /* this can happen */ 1774 return; /* this can happen */
1789 1775
1790 if (sdkp->WCE) { 1776 if (sdkp->WCE) {
1791 printk(KERN_NOTICE "Synchronizing SCSI cache for disk %s: \n", 1777 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
1792 sdkp->disk->disk_name); 1778 sd_sync_cache(sdkp);
1793 sd_sync_cache(sdp); 1779 }
1780
1781 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
1782 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1783 sd_start_stop_device(sdkp, 0);
1794 } 1784 }
1785
1795 scsi_disk_put(sdkp); 1786 scsi_disk_put(sdkp);
1796} 1787}
1797 1788
1789static int sd_suspend(struct device *dev, pm_message_t mesg)
1790{
1791 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1792 int ret;
1793
1794 if (!sdkp)
1795 return 0; /* this can happen */
1796
1797 if (sdkp->WCE) {
1798 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
1799 ret = sd_sync_cache(sdkp);
1800 if (ret)
1801 return ret;
1802 }
1803
1804 if (mesg.event == PM_EVENT_SUSPEND &&
1805 sdkp->device->manage_start_stop) {
1806 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1807 ret = sd_start_stop_device(sdkp, 0);
1808 if (ret)
1809 return ret;
1810 }
1811
1812 return 0;
1813}
1814
1815static int sd_resume(struct device *dev)
1816{
1817 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1818
1819 if (!sdkp->device->manage_start_stop)
1820 return 0;
1821
1822 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
1823
1824 return sd_start_stop_device(sdkp, 1);
1825}
1826
1798/** 1827/**
1799 * init_sd - entry point for this driver (both when built in or when 1828 * init_sd - entry point for this driver (both when built in or when
1800 * a module). 1829 * a module).
@@ -1852,3 +1881,19 @@ static void __exit exit_sd(void)
1852 1881
1853module_init(init_sd); 1882module_init(init_sd);
1854module_exit(exit_sd); 1883module_exit(exit_sd);
1884
1885static void sd_print_sense_hdr(struct scsi_disk *sdkp,
1886 struct scsi_sense_hdr *sshdr)
1887{
1888 sd_printk(KERN_INFO, sdkp, "");
1889 scsi_show_sense_hdr(sshdr);
1890 sd_printk(KERN_INFO, sdkp, "");
1891 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
1892}
1893
1894static void sd_print_result(struct scsi_disk *sdkp, int result)
1895{
1896 sd_printk(KERN_INFO, sdkp, "");
1897 scsi_show_result(result);
1898}
1899
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 81e3bc7b02a1..570977cf9efb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -917,6 +917,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
917 return result; 917 return result;
918 if (val < 0) 918 if (val < 0)
919 return -EINVAL; 919 return -EINVAL;
920 val = min_t(int, val,
921 sdp->device->request_queue->max_sectors * 512);
920 if (val != sfp->reserve.bufflen) { 922 if (val != sfp->reserve.bufflen) {
921 if (sg_res_in_use(sfp) || sfp->mmap_called) 923 if (sg_res_in_use(sfp) || sfp->mmap_called)
922 return -EBUSY; 924 return -EBUSY;
@@ -925,7 +927,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
925 } 927 }
926 return 0; 928 return 0;
927 case SG_GET_RESERVED_SIZE: 929 case SG_GET_RESERVED_SIZE:
928 val = (int) sfp->reserve.bufflen; 930 val = min_t(int, sfp->reserve.bufflen,
931 sdp->device->request_queue->max_sectors * 512);
929 return put_user(val, ip); 932 return put_user(val, ip);
930 case SG_SET_COMMAND_Q: 933 case SG_SET_COMMAND_Q:
931 result = get_user(val, ip); 934 result = get_user(val, ip);
@@ -1061,6 +1064,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 if (sdp->detached) 1064 if (sdp->detached)
1062 return -ENODEV; 1065 return -ENODEV;
1063 return scsi_ioctl(sdp->device, cmd_in, p); 1066 return scsi_ioctl(sdp->device, cmd_in, p);
1067 case BLKSECTGET:
1068 return put_user(sdp->device->request_queue->max_sectors * 512,
1069 ip);
1064 default: 1070 default:
1065 if (read_only) 1071 if (read_only)
1066 return -EPERM; /* don't know so take safe approach */ 1072 return -EPERM; /* don't know so take safe approach */
@@ -2339,6 +2345,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2339{ 2345{
2340 Sg_fd *sfp; 2346 Sg_fd *sfp;
2341 unsigned long iflags; 2347 unsigned long iflags;
2348 int bufflen;
2342 2349
2343 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2350 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2344 if (!sfp) 2351 if (!sfp)
@@ -2369,7 +2376,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2369 if (unlikely(sg_big_buff != def_reserved_size)) 2376 if (unlikely(sg_big_buff != def_reserved_size))
2370 sg_big_buff = def_reserved_size; 2377 sg_big_buff = def_reserved_size;
2371 2378
2372 sg_build_reserve(sfp, sg_big_buff); 2379 bufflen = min_t(int, sg_big_buff,
2380 sdp->device->request_queue->max_sectors * 512);
2381 sg_build_reserve(sfp, bufflen);
2373 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2382 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2374 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2383 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2375 return sfp; 2384 return sfp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1857d68e7195..f9a52af7f5b4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -62,6 +62,8 @@
62MODULE_DESCRIPTION("SCSI cdrom (sr) driver"); 62MODULE_DESCRIPTION("SCSI cdrom (sr) driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); 64MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR);
65MODULE_ALIAS_SCSI_DEVICE(TYPE_ROM);
66MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
65 67
66#define SR_DISKS 256 68#define SR_DISKS 256
67 69
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 98d8411bbccc..55bfeccf68a2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -89,6 +89,7 @@ MODULE_AUTHOR("Kai Makisara");
89MODULE_DESCRIPTION("SCSI tape (st) driver"); 89MODULE_DESCRIPTION("SCSI tape (st) driver");
90MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
91MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR); 91MODULE_ALIAS_CHARDEV_MAJOR(SCSI_TAPE_MAJOR);
92MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
92 93
93/* Set 'perm' (4th argument) to 0 to disable module_param's definition 94/* Set 'perm' (4th argument) to 0 to disable module_param's definition
94 * of sysfs parameters (which module_param doesn't yet support). 95 * of sysfs parameters (which module_param doesn't yet support).