aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas23
-rw-r--r--Makefile7
-rw-r--r--arch/i386/kernel/cpu/transmeta.c1
-rw-r--r--arch/x86_64/defconfig42
-rw-r--r--arch/x86_64/kernel/apic.c1
-rw-r--r--arch/x86_64/kernel/entry.S1
-rw-r--r--arch/x86_64/kernel/head.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c16
-rw-r--r--arch/x86_64/kernel/nmi.c19
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/mm/k8topology.c2
-rw-r--r--arch/x86_64/mm/numa.c2
-rw-r--r--arch/x86_64/mm/srat.c5
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c1
-rw-r--r--drivers/input/mouse/logips2pp.c1
-rw-r--r--drivers/input/mouse/trackpoint.c20
-rw-r--r--drivers/input/mouse/trackpoint.h4
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c147
-rw-r--r--drivers/message/fusion/mptbase.c115
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptctl.c241
-rw-r--r--drivers/message/fusion/mptctl.h4
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/mmci.c7
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c76
-rw-r--r--drivers/s390/scsi/zfcp_def.h13
-rw-r--r--drivers/s390/scsi/zfcp_erp.c82
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c80
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c4
-rw-r--r--drivers/scsi/3w-9xxx.c7
-rw-r--r--drivers/scsi/aacraid/aachba.c217
-rw-r--r--drivers/scsi/aacraid/aacraid.h18
-rw-r--r--drivers/scsi/aacraid/commctrl.c22
-rw-r--r--drivers/scsi/aacraid/comminit.c12
-rw-r--r--drivers/scsi/aacraid/commsup.c50
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c50
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/ipr.c49
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/iscsi_tcp.c78
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c101
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h53
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c276
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h44
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c963
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c260
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/video/neofb.c15
-rw-r--r--fs/cifs/file.c14
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c12
-rw-r--r--fs/ocfs2/dlm/dlmlock.c25
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c7
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c42
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--include/asm-alpha/mman.h8
-rw-r--r--include/asm-arm/mman.h31
-rw-r--r--include/asm-arm26/mman.h31
-rw-r--r--include/asm-cris/mman.h31
-rw-r--r--include/asm-frv/mman.h31
-rw-r--r--include/asm-generic/mman.h42
-rw-r--r--include/asm-h8300/mman.h31
-rw-r--r--include/asm-i386/mman.h31
-rw-r--r--include/asm-ia64/mman.h31
-rw-r--r--include/asm-m32r/mman.h33
-rw-r--r--include/asm-m68k/mman.h31
-rw-r--r--include/asm-mips/mman.h22
-rw-r--r--include/asm-parisc/mman.h8
-rw-r--r--include/asm-powerpc/mman.h32
-rw-r--r--include/asm-s390/mman.h31
-rw-r--r--include/asm-sh/mman.h31
-rw-r--r--include/asm-sparc/mman.h31
-rw-r--r--include/asm-sparc64/mman.h31
-rw-r--r--include/asm-v850/mman.h30
-rw-r--r--include/asm-x86_64/mman.h30
-rw-r--r--include/asm-x86_64/proto.h1
-rw-r--r--include/asm-xtensa/mman.h22
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/ktime.h10
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netfilter.h21
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/scsi/iscsi_if.h3
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h34
-rw-r--r--include/video/neomagic.h1
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/ptrace.c25
-rw-r--r--kernel/sysctl.c2
-rw-r--r--lib/radix-tree.c10
-rw-r--r--mm/memory.c10
-rw-r--r--net/bridge/br_stp_if.c4
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter.c41
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/xfrm4_output.c13
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
133 files changed, 2989 insertions, 1496 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 84370363da80..b874771385cd 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1133,6 +1133,8 @@ running once the system is up.
1133 Mechanism 1. 1133 Mechanism 1.
1134 conf2 [IA-32] Force use of PCI Configuration 1134 conf2 [IA-32] Force use of PCI Configuration
1135 Mechanism 2. 1135 Mechanism 2.
1136 nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI
1137 Configuration
1136 nosort [IA-32] Don't sort PCI devices according to 1138 nosort [IA-32] Don't sort PCI devices according to
1137 order given by the PCI BIOS. This sorting is 1139 order given by the PCI BIOS. This sorting is
1138 done to get a device order compatible with 1140 done to get a device order compatible with
@@ -1636,6 +1638,9 @@ running once the system is up.
1636 Format: 1638 Format:
1637 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 1639 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
1638 1640
1641 norandmaps Don't use address space randomization
1642 Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space
1643
1639 1644
1640______________________________________________________________________ 1645______________________________________________________________________
1641Changelog: 1646Changelog:
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index f8c16cbf56ba..2dafa63bd370 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,26 @@
11 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.04
33 Older Version : 00.00.02.04
4
5i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added.
6 During initialization, depending on the device id, the template members
7 are initialized with function pointers specific to the ppc or
8 xscale controllers.
9
10 -Sumant Patro <Sumant.Patro@lsil.com>
11
121 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro
13 <Sumant.Patro@lsil.com>
142 Current Version : 00.00.02.04
153 Older Version : 00.00.02.02
16i. Register 16 byte CDB capability with scsi midlayer
17
18 "Ths patch properly registers the 16 byte command length capability of the
19 megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas
20 hardware supports 16 byte CDB's."
21
22 -Joshua Giles <joshua_giles@dell.com>
23
11 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 241 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.02 252 Current Version : 00.00.02.02
33 Older Version : 00.00.02.01 263 Older Version : 00.00.02.01
diff --git a/Makefile b/Makefile
index 74d67b2c35d9..48d569d46ae1 100644
--- a/Makefile
+++ b/Makefile
@@ -106,13 +106,12 @@ KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd)
106$(if $(KBUILD_OUTPUT),, \ 106$(if $(KBUILD_OUTPUT),, \
107 $(error output directory "$(saved-output)" does not exist)) 107 $(error output directory "$(saved-output)" does not exist))
108 108
109.PHONY: $(MAKECMDGOALS) cdbuilddir 109.PHONY: $(MAKECMDGOALS)
110$(MAKECMDGOALS) _all: cdbuilddir
111 110
112cdbuilddir: 111$(filter-out _all,$(MAKECMDGOALS)) _all:
113 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ 112 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
114 KBUILD_SRC=$(CURDIR) \ 113 KBUILD_SRC=$(CURDIR) \
115 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $(MAKECMDGOALS) 114 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@
116 115
117# Leave processing to above invocation of make 116# Leave processing to above invocation of make
118skip-makefile := 1 117skip-makefile := 1
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index bdbeb77f4e22..7214c9b577ab 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -1,4 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/mm.h>
2#include <linux/init.h> 3#include <linux/init.h>
3#include <asm/processor.h> 4#include <asm/processor.h>
4#include <asm/msr.h> 5#include <asm/msr.h>
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 56832929a543..b337136f28b6 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc1-git2 3# Linux kernel version: 2.6.16-rc3
4# Thu Jan 19 10:05:21 2006 4# Mon Feb 13 22:31:24 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -21,7 +21,6 @@ CONFIG_DMI=y
21# Code maturity level options 21# Code maturity level options
22# 22#
23CONFIG_EXPERIMENTAL=y 23CONFIG_EXPERIMENTAL=y
24CONFIG_CLEAN_COMPILE=y
25CONFIG_LOCK_KERNEL=y 24CONFIG_LOCK_KERNEL=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 25CONFIG_INIT_ENV_ARG_LIMIT=32
27 26
@@ -267,6 +266,7 @@ CONFIG_NET=y
267# 266#
268# Networking options 267# Networking options
269# 268#
269# CONFIG_NETDEBUG is not set
270CONFIG_PACKET=y 270CONFIG_PACKET=y
271# CONFIG_PACKET_MMAP is not set 271# CONFIG_PACKET_MMAP is not set
272CONFIG_UNIX=y 272CONFIG_UNIX=y
@@ -446,7 +446,6 @@ CONFIG_BLK_DEV_PIIX=y
446# CONFIG_BLK_DEV_NS87415 is not set 446# CONFIG_BLK_DEV_NS87415 is not set
447# CONFIG_BLK_DEV_PDC202XX_OLD is not set 447# CONFIG_BLK_DEV_PDC202XX_OLD is not set
448CONFIG_BLK_DEV_PDC202XX_NEW=y 448CONFIG_BLK_DEV_PDC202XX_NEW=y
449# CONFIG_PDC202XX_FORCE is not set
450# CONFIG_BLK_DEV_SVWKS is not set 449# CONFIG_BLK_DEV_SVWKS is not set
451# CONFIG_BLK_DEV_SIIMAGE is not set 450# CONFIG_BLK_DEV_SIIMAGE is not set
452# CONFIG_BLK_DEV_SIS5513 is not set 451# CONFIG_BLK_DEV_SIS5513 is not set
@@ -573,7 +572,33 @@ CONFIG_FUSION_MAX_SGE=128
573# 572#
574# IEEE 1394 (FireWire) support 573# IEEE 1394 (FireWire) support
575# 574#
576# CONFIG_IEEE1394 is not set 575CONFIG_IEEE1394=y
576
577#
578# Subsystem Options
579#
580# CONFIG_IEEE1394_VERBOSEDEBUG is not set
581# CONFIG_IEEE1394_OUI_DB is not set
582# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
583# CONFIG_IEEE1394_EXPORT_FULL_API is not set
584
585#
586# Device Drivers
587#
588
589#
590# Texas Instruments PCILynx requires I2C
591#
592CONFIG_IEEE1394_OHCI1394=y
593
594#
595# Protocol Drivers
596#
597# CONFIG_IEEE1394_VIDEO1394 is not set
598# CONFIG_IEEE1394_SBP2 is not set
599# CONFIG_IEEE1394_ETH1394 is not set
600# CONFIG_IEEE1394_DV1394 is not set
601CONFIG_IEEE1394_RAWIO=y
577 602
578# 603#
579# I2O device support 604# I2O device support
@@ -772,6 +797,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
772# 797#
773CONFIG_SERIAL_CORE=y 798CONFIG_SERIAL_CORE=y
774CONFIG_SERIAL_CORE_CONSOLE=y 799CONFIG_SERIAL_CORE_CONSOLE=y
800# CONFIG_SERIAL_JSM is not set
775CONFIG_UNIX98_PTYS=y 801CONFIG_UNIX98_PTYS=y
776CONFIG_LEGACY_PTYS=y 802CONFIG_LEGACY_PTYS=y
777CONFIG_LEGACY_PTY_COUNT=256 803CONFIG_LEGACY_PTY_COUNT=256
@@ -871,6 +897,7 @@ CONFIG_HPET_MMAP=y
871# 897#
872CONFIG_HWMON=y 898CONFIG_HWMON=y
873# CONFIG_HWMON_VID is not set 899# CONFIG_HWMON_VID is not set
900# CONFIG_SENSORS_F71805F is not set
874# CONFIG_SENSORS_HDAPS is not set 901# CONFIG_SENSORS_HDAPS is not set
875# CONFIG_HWMON_DEBUG_CHIP is not set 902# CONFIG_HWMON_DEBUG_CHIP is not set
876 903
@@ -1101,7 +1128,6 @@ CONFIG_USB_MON=y
1101# EDAC - error detection and reporting (RAS) 1128# EDAC - error detection and reporting (RAS)
1102# 1129#
1103# CONFIG_EDAC is not set 1130# CONFIG_EDAC is not set
1104# CONFIG_EDAC_POLL is not set
1105 1131
1106# 1132#
1107# Firmware Drivers 1133# Firmware Drivers
@@ -1291,14 +1317,12 @@ CONFIG_DETECT_SOFTLOCKUP=y
1291# CONFIG_DEBUG_SPINLOCK is not set 1317# CONFIG_DEBUG_SPINLOCK is not set
1292# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1318# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1293# CONFIG_DEBUG_KOBJECT is not set 1319# CONFIG_DEBUG_KOBJECT is not set
1294# CONFIG_DEBUG_INFO is not set 1320CONFIG_DEBUG_INFO=y
1295CONFIG_DEBUG_FS=y 1321CONFIG_DEBUG_FS=y
1296# CONFIG_DEBUG_VM is not set 1322# CONFIG_DEBUG_VM is not set
1297# CONFIG_FRAME_POINTER is not set 1323# CONFIG_FRAME_POINTER is not set
1298# CONFIG_FORCED_INLINING is not set 1324# CONFIG_FORCED_INLINING is not set
1299# CONFIG_UNWIND_INFO is not set
1300# CONFIG_RCU_TORTURE_TEST is not set 1325# CONFIG_RCU_TORTURE_TEST is not set
1301CONFIG_INIT_DEBUG=y
1302# CONFIG_DEBUG_RODATA is not set 1326# CONFIG_DEBUG_RODATA is not set
1303# CONFIG_IOMMU_DEBUG is not set 1327# CONFIG_IOMMU_DEBUG is not set
1304 1328
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 7a0a3e8d5d72..e5b14c57eaa0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -1152,6 +1152,7 @@ __setup("noapicmaintimer", setup_noapicmaintimer);
1152static __init int setup_apicpmtimer(char *s) 1152static __init int setup_apicpmtimer(char *s)
1153{ 1153{
1154 apic_calibrate_pmtmr = 1; 1154 apic_calibrate_pmtmr = 1;
1155 notsc_setup(NULL);
1155 return setup_apicmaintimer(NULL); 1156 return setup_apicmaintimer(NULL);
1156} 1157}
1157__setup("apicpmtimer", setup_apicpmtimer); 1158__setup("apicpmtimer", setup_apicpmtimer);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index b150c87a08c6..7c10e9009d61 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -554,6 +554,7 @@ iret_label:
554 /* running with kernel gs */ 554 /* running with kernel gs */
555bad_iret: 555bad_iret:
556 movq $-9999,%rdi /* better code? */ 556 movq $-9999,%rdi /* better code? */
557 sti
557 jmp do_exit 558 jmp do_exit
558 .previous 559 .previous
559 560
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 692c737feddb..02fc7fa0ea28 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -213,6 +213,11 @@ ENTRY(early_idt_handler)
213 cmpl $2,early_recursion_flag(%rip) 213 cmpl $2,early_recursion_flag(%rip)
214 jz 1f 214 jz 1f
215 call dump_stack 215 call dump_stack
216#ifdef CONFIG_KALLSYMS
217 leaq early_idt_ripmsg(%rip),%rdi
218 movq 8(%rsp),%rsi # get rip again
219 call __print_symbol
220#endif
2161: hlt 2211: hlt
217 jmp 1b 222 jmp 1b
218early_recursion_flag: 223early_recursion_flag:
@@ -220,6 +225,8 @@ early_recursion_flag:
220 225
221early_idt_msg: 226early_idt_msg:
222 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n" 227 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
228early_idt_ripmsg:
229 .asciz "RIP %s\n"
223 230
224.code32 231.code32
225ENTRY(no_long_mode) 232ENTRY(no_long_mode)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 4282d72b2a26..2585c1d92b26 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -30,6 +30,9 @@
30#include <linux/mc146818rtc.h> 30#include <linux/mc146818rtc.h>
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/sysdev.h> 32#include <linux/sysdev.h>
33#ifdef CONFIG_ACPI
34#include <acpi/acpi_bus.h>
35#endif
33 36
34#include <asm/io.h> 37#include <asm/io.h>
35#include <asm/smp.h> 38#include <asm/smp.h>
@@ -260,6 +263,8 @@ __setup("apic", enable_ioapic_setup);
260 263
261 And another hack to disable the IOMMU on VIA chipsets. 264 And another hack to disable the IOMMU on VIA chipsets.
262 265
266 ... and others. Really should move this somewhere else.
267
263 Kludge-O-Rama. */ 268 Kludge-O-Rama. */
264void __init check_ioapic(void) 269void __init check_ioapic(void)
265{ 270{
@@ -307,6 +312,17 @@ void __init check_ioapic(void)
307 case PCI_VENDOR_ID_ATI: 312 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0) 313 if (apic_runs_main_timer != 0)
309 break; 314 break;
315#ifdef CONFIG_ACPI
316 /* Don't do this for laptops right
317 right now because their timer
318 doesn't necessarily tick in C2/3 */
319 if (acpi_fadt.revision >= 3 &&
320 (acpi_fadt.plvl2_lat + acpi_fadt.plvl3_lat) < 1100) {
321 printk(KERN_INFO
322"ATI board detected, but seems to be a laptop. Timer might be shakey, sorry\n");
323 break;
324 }
325#endif
310 printk(KERN_INFO 326 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n"); 327 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1; 328 apic_runs_main_timer = 1;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 8be407a1f62d..5bf17e41cd2d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -236,6 +236,7 @@ static void enable_lapic_nmi_watchdog(void)
236{ 236{
237 if (nmi_active < 0) { 237 if (nmi_active < 0) {
238 nmi_watchdog = NMI_LOCAL_APIC; 238 nmi_watchdog = NMI_LOCAL_APIC;
239 touch_nmi_watchdog();
239 setup_apic_nmi_watchdog(); 240 setup_apic_nmi_watchdog();
240 } 241 }
241} 242}
@@ -456,15 +457,17 @@ static DEFINE_PER_CPU(int, nmi_touch);
456 457
457void touch_nmi_watchdog (void) 458void touch_nmi_watchdog (void)
458{ 459{
459 int i; 460 if (nmi_watchdog > 0) {
461 unsigned cpu;
460 462
461 /* 463 /*
462 * Tell other CPUs to reset their alert counters. We cannot 464 * Tell other CPUs to reset their alert counters. We cannot
463 * do it ourselves because the alert count increase is not 465 * do it ourselves because the alert count increase is not
464 * atomic. 466 * atomic.
465 */ 467 */
466 for (i = 0; i < NR_CPUS; i++) 468 for_each_present_cpu (cpu)
467 per_cpu(nmi_touch, i) = 1; 469 per_cpu(nmi_touch, cpu) = 1;
470 }
468 471
469 touch_softlockup_watchdog(); 472 touch_softlockup_watchdog();
470} 473}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 3c58c30506a1..67841d11ed1f 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -1327,8 +1327,7 @@ static int __init nohpet_setup(char *s)
1327 1327
1328__setup("nohpet", nohpet_setup); 1328__setup("nohpet", nohpet_setup);
1329 1329
1330 1330int __init notsc_setup(char *s)
1331static int __init notsc_setup(char *s)
1332{ 1331{
1333 notsc = 1; 1332 notsc = 1;
1334 return 0; 1333 return 0;
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index a5663e0bb01c..dd60e71fdba6 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -155,7 +155,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
155 if (!found) 155 if (!found)
156 return -1; 156 return -1;
157 157
158 memnode_shift = compute_hash_shift(nodes, numnodes); 158 memnode_shift = compute_hash_shift(nodes, 8);
159 if (memnode_shift < 0) { 159 if (memnode_shift < 0) {
160 printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n"); 160 printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
161 return -1; 161 return -1;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 6ef9f9a76235..22e51beee8d3 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -351,7 +351,7 @@ void __init init_cpu_to_node(void)
351 continue; 351 continue;
352 if (apicid_to_node[apicid] == NUMA_NO_NODE) 352 if (apicid_to_node[apicid] == NUMA_NO_NODE)
353 continue; 353 continue;
354 cpu_to_node[i] = apicid_to_node[apicid]; 354 numa_set_node(i,apicid_to_node[apicid]);
355 } 355 }
356} 356}
357 357
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index cd25300726fc..482c25767369 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -228,7 +228,8 @@ static int nodes_cover_memory(void)
228 } 228 }
229 229
230 e820ram = end_pfn - e820_hole_size(0, end_pfn); 230 e820ram = end_pfn - e820_hole_size(0, end_pfn);
231 if (pxmram < e820ram) { 231 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
232 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
232 printk(KERN_ERR 233 printk(KERN_ERR
233 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", 234 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
234 (pxmram << PAGE_SHIFT) >> 20, 235 (pxmram << PAGE_SHIFT) >> 20,
@@ -270,7 +271,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
270 return -1; 271 return -1;
271 } 272 }
272 273
273 memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed)); 274 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
274 if (memnode_shift < 0) { 275 if (memnode_shift < 0) {
275 printk(KERN_ERR 276 printk(KERN_ERR
276 "SRAT: No NUMA node hash function found. Contact maintainer\n"); 277 "SRAT: No NUMA node hash function found. Contact maintainer\n");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9834dce4e20f..0606bd2f6020 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -34,6 +34,7 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/sched.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
38#include <linux/major.h> 39#include <linux/major.h>
39#include <linux/errno.h> 40#include <linux/errno.h>
@@ -314,6 +315,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
314 if (rq->bio) /* fs request */ 315 if (rq->bio) /* fs request */
315 rq->errors = 0; 316 rq->errors = 0;
316 317
318 touch_softlockup_watchdog();
319
317 switch (drive->hwif->data_phase) { 320 switch (drive->hwif->data_phase) {
318 case TASKFILE_MULTI_IN: 321 case TASKFILE_MULTI_IN:
319 case TASKFILE_MULTI_OUT: 322 case TASKFILE_MULTI_OUT:
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 6e0afbb22383..2708167ba175 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
11obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o 11obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
12obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o 12obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
13obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o 13obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
14obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o
15obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o 14obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
16obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o 15obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
17obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 16obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 184c4129470d..415c49178985 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -7,7 +7,6 @@
7obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o 7obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
8obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o 8obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
9obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 9obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
10obj-$(CONFIG_INPUT_98SPKR) += 98spkr.o
11obj-$(CONFIG_INPUT_UINPUT) += uinput.o 10obj-$(CONFIG_INPUT_UINPUT) += uinput.o
12obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o 11obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
13obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 12obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index d448bb5e4869..3a6ae85cd69c 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -19,6 +19,7 @@
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/interrupt.h>
22#include <asm/hardware.h> 23#include <asm/hardware.h>
23 24
24MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); 25MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index c88520d3d13c..40333d61093c 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -232,6 +232,7 @@ static struct ps2pp_info *get_model_info(unsigned char model)
232 { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 232 { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
233 { 96, 0, 0 }, 233 { 96, 0, 0 },
234 { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL }, 234 { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL },
235 { 99, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
235 { 100, PS2PP_KIND_MX, /* MX510 */ 236 { 100, PS2PP_KIND_MX, /* MX510 */
236 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 237 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
237 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, 238 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN },
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index b4898d8a68e2..6d9ec9ab1b90 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -68,15 +68,19 @@ struct trackpoint_attr_data {
68 size_t field_offset; 68 size_t field_offset;
69 unsigned char command; 69 unsigned char command;
70 unsigned char mask; 70 unsigned char mask;
71 unsigned char inverted;
71}; 72};
72 73
73static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) 74static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
74{ 75{
75 struct trackpoint_data *tp = psmouse->private; 76 struct trackpoint_data *tp = psmouse->private;
76 struct trackpoint_attr_data *attr = data; 77 struct trackpoint_attr_data *attr = data;
77 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 78 unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
79
80 if (attr->inverted)
81 value = !value;
78 82
79 return sprintf(buf, "%u\n", *field); 83 return sprintf(buf, "%u\n", value);
80} 84}
81 85
82static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, 86static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
@@ -120,6 +124,9 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
120 if (*rest || value > 1) 124 if (*rest || value > 1)
121 return -EINVAL; 125 return -EINVAL;
122 126
127 if (attr->inverted)
128 value = !value;
129
123 if (*field != value) { 130 if (*field != value) {
124 *field = value; 131 *field = value;
125 trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask); 132 trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask);
@@ -129,11 +136,12 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
129} 136}
130 137
131 138
132#define TRACKPOINT_BIT_ATTR(_name, _command, _mask) \ 139#define TRACKPOINT_BIT_ATTR(_name, _command, _mask, _inv) \
133 static struct trackpoint_attr_data trackpoint_attr_##_name = { \ 140 static struct trackpoint_attr_data trackpoint_attr_##_name = { \
134 .field_offset = offsetof(struct trackpoint_data, _name), \ 141 .field_offset = offsetof(struct trackpoint_data, _name), \
135 .command = _command, \ 142 .command = _command, \
136 .mask = _mask, \ 143 .mask = _mask, \
144 .inverted = _inv, \
137 }; \ 145 }; \
138 PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ 146 PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
139 &trackpoint_attr_##_name, \ 147 &trackpoint_attr_##_name, \
@@ -150,9 +158,9 @@ TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH);
150TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME); 158TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME);
151TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV); 159TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV);
152 160
153TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON); 161TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0);
154TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK); 162TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0);
155TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV); 163TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1);
156 164
157static struct attribute *trackpoint_attrs[] = { 165static struct attribute *trackpoint_attrs[] = {
158 &psmouse_attr_sensitivity.dattr.attr, 166 &psmouse_attr_sensitivity.dattr.attr,
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 9857d8b6ad66..050298b1a09d 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -78,7 +78,7 @@
78 78
79#define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */ 79#define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */
80#define TP_MASK_MB 0x01 80#define TP_MASK_MB 0x01
81#define TP_TOGGLE_EXT_DEV 0x23 /* Toggle external device */ 81#define TP_TOGGLE_EXT_DEV 0x23 /* Disable external device */
82#define TP_MASK_EXT_DEV 0x02 82#define TP_MASK_EXT_DEV 0x02
83#define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */ 83#define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */
84#define TP_MASK_DRIFT 0x80 84#define TP_MASK_DRIFT 0x80
@@ -125,7 +125,7 @@
125#define TP_DEF_MB 0x00 125#define TP_DEF_MB 0x00
126#define TP_DEF_PTSON 0x00 126#define TP_DEF_PTSON 0x00
127#define TP_DEF_SKIPBACK 0x00 127#define TP_DEF_SKIPBACK 0x00
128#define TP_DEF_EXT_DEV 0x01 128#define TP_DEF_EXT_DEV 0x00 /* 0 means enabled */
129 129
130#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) 130#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
131 131
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 678a8599f9ff..4155197867a3 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_SERIO_RPCKBD) += rpckbd.o
13obj-$(CONFIG_SERIO_SA1111) += sa1111ps2.o 13obj-$(CONFIG_SERIO_SA1111) += sa1111ps2.o
14obj-$(CONFIG_SERIO_AMBAKMI) += ambakmi.o 14obj-$(CONFIG_SERIO_AMBAKMI) += ambakmi.o
15obj-$(CONFIG_SERIO_Q40KBD) += q40kbd.o 15obj-$(CONFIG_SERIO_Q40KBD) += q40kbd.o
16obj-$(CONFIG_SERIO_98KBD) += 98kbd-io.o
17obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o 16obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o
18obj-$(CONFIG_HP_SDC) += hp_sdc.o 17obj-$(CONFIG_HP_SDC) += hp_sdc.o
19obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o 18obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index b45a45ca7cc9..8c12a974b411 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -48,10 +48,13 @@
48 48
49#define TS_POLL_PERIOD msecs_to_jiffies(10) 49#define TS_POLL_PERIOD msecs_to_jiffies(10)
50 50
51/* this driver doesn't aim at the peak continuous sample rate */
52#define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */)
53
51struct ts_event { 54struct ts_event {
52 /* For portability, we can't read 12 bit values using SPI (which 55 /* For portability, we can't read 12 bit values using SPI (which
53 * would make the controller deliver them as native byteorder u16 56 * would make the controller deliver them as native byteorder u16
54 * with msbs zeroed). Instead, we read them as two 8-byte values, 57 * with msbs zeroed). Instead, we read them as two 8-bit values,
55 * which need byteswapping then range adjustment. 58 * which need byteswapping then range adjustment.
56 */ 59 */
57 __be16 x; 60 __be16 x;
@@ -60,7 +63,7 @@ struct ts_event {
60}; 63};
61 64
62struct ads7846 { 65struct ads7846 {
63 struct input_dev input; 66 struct input_dev *input;
64 char phys[32]; 67 char phys[32];
65 68
66 struct spi_device *spi; 69 struct spi_device *spi;
@@ -68,6 +71,7 @@ struct ads7846 {
68 u16 vref_delay_usecs; 71 u16 vref_delay_usecs;
69 u16 x_plate_ohms; 72 u16 x_plate_ohms;
70 73
74 u8 read_x, read_y, read_z1, read_z2;
71 struct ts_event tc; 75 struct ts_event tc;
72 76
73 struct spi_transfer xfer[8]; 77 struct spi_transfer xfer[8];
@@ -117,10 +121,10 @@ struct ads7846 {
117#define READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \ 121#define READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \
118 | ADS_12_BIT | ADS_DFR) 122 | ADS_12_BIT | ADS_DFR)
119 123
120static const u8 read_y = READ_12BIT_DFR(y) | ADS_PD10_ADC_ON; 124#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON)
121static const u8 read_z1 = READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON; 125#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON)
122static const u8 read_z2 = READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON; 126#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON)
123static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */ 127#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_PDOWN) /* LAST */
124 128
125/* single-ended samples need to first power up reference voltage; 129/* single-ended samples need to first power up reference voltage;
126 * we leave both ADC and VREF powered 130 * we leave both ADC and VREF powered
@@ -128,8 +132,8 @@ static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */
128#define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \ 132#define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \
129 | ADS_12_BIT | ADS_SER) 133 | ADS_12_BIT | ADS_SER)
130 134
131static const u8 ref_on = READ_12BIT_DFR(x) | ADS_PD10_ALL_ON; 135#define REF_ON (READ_12BIT_DFR(x) | ADS_PD10_ALL_ON)
132static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN; 136#define REF_OFF (READ_12BIT_DFR(y) | ADS_PD10_PDOWN)
133 137
134/*--------------------------------------------------------------------------*/ 138/*--------------------------------------------------------------------------*/
135 139
@@ -138,7 +142,9 @@ static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN;
138 */ 142 */
139 143
140struct ser_req { 144struct ser_req {
145 u8 ref_on;
141 u8 command; 146 u8 command;
147 u8 ref_off;
142 u16 scratch; 148 u16 scratch;
143 __be16 sample; 149 __be16 sample;
144 struct spi_message msg; 150 struct spi_message msg;
@@ -152,7 +158,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
152 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 158 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL);
153 int status; 159 int status;
154 int sample; 160 int sample;
155 int i; 161 int i;
156 162
157 if (!req) 163 if (!req)
158 return -ENOMEM; 164 return -ENOMEM;
@@ -160,7 +166,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
160 INIT_LIST_HEAD(&req->msg.transfers); 166 INIT_LIST_HEAD(&req->msg.transfers);
161 167
162 /* activate reference, so it has time to settle; */ 168 /* activate reference, so it has time to settle; */
163 req->xfer[0].tx_buf = &ref_on; 169 req->ref_on = REF_ON;
170 req->xfer[0].tx_buf = &req->ref_on;
164 req->xfer[0].len = 1; 171 req->xfer[0].len = 1;
165 req->xfer[1].rx_buf = &req->scratch; 172 req->xfer[1].rx_buf = &req->scratch;
166 req->xfer[1].len = 2; 173 req->xfer[1].len = 2;
@@ -182,7 +189,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
182 /* REVISIT: take a few more samples, and compare ... */ 189 /* REVISIT: take a few more samples, and compare ... */
183 190
184 /* turn off reference */ 191 /* turn off reference */
185 req->xfer[4].tx_buf = &ref_off; 192 req->ref_off = REF_OFF;
193 req->xfer[4].tx_buf = &req->ref_off;
186 req->xfer[4].len = 1; 194 req->xfer[4].len = 1;
187 req->xfer[5].rx_buf = &req->scratch; 195 req->xfer[5].rx_buf = &req->scratch;
188 req->xfer[5].len = 2; 196 req->xfer[5].len = 2;
@@ -236,11 +244,12 @@ SHOW(vbatt)
236 244
237static void ads7846_rx(void *ads) 245static void ads7846_rx(void *ads)
238{ 246{
239 struct ads7846 *ts = ads; 247 struct ads7846 *ts = ads;
240 unsigned Rt; 248 struct input_dev *input_dev = ts->input;
241 unsigned sync = 0; 249 unsigned Rt;
242 u16 x, y, z1, z2; 250 unsigned sync = 0;
243 unsigned long flags; 251 u16 x, y, z1, z2;
252 unsigned long flags;
244 253
245 /* adjust: 12 bit samples (left aligned), built from 254 /* adjust: 12 bit samples (left aligned), built from
246 * two 8 bit values writen msb-first. 255 * two 8 bit values writen msb-first.
@@ -276,21 +285,21 @@ static void ads7846_rx(void *ads)
276 * won't notice that, even if nPENIRQ never fires ... 285 * won't notice that, even if nPENIRQ never fires ...
277 */ 286 */
278 if (!ts->pendown && Rt != 0) { 287 if (!ts->pendown && Rt != 0) {
279 input_report_key(&ts->input, BTN_TOUCH, 1); 288 input_report_key(input_dev, BTN_TOUCH, 1);
280 sync = 1; 289 sync = 1;
281 } else if (ts->pendown && Rt == 0) { 290 } else if (ts->pendown && Rt == 0) {
282 input_report_key(&ts->input, BTN_TOUCH, 0); 291 input_report_key(input_dev, BTN_TOUCH, 0);
283 sync = 1; 292 sync = 1;
284 } 293 }
285 294
286 if (Rt) { 295 if (Rt) {
287 input_report_abs(&ts->input, ABS_X, x); 296 input_report_abs(input_dev, ABS_X, x);
288 input_report_abs(&ts->input, ABS_Y, y); 297 input_report_abs(input_dev, ABS_Y, y);
289 input_report_abs(&ts->input, ABS_PRESSURE, Rt); 298 input_report_abs(input_dev, ABS_PRESSURE, Rt);
290 sync = 1; 299 sync = 1;
291 } 300 }
292 if (sync) 301 if (sync)
293 input_sync(&ts->input); 302 input_sync(input_dev);
294 303
295#ifdef VERBOSE 304#ifdef VERBOSE
296 if (Rt || ts->pendown) 305 if (Rt || ts->pendown)
@@ -396,9 +405,10 @@ static int ads7846_resume(struct spi_device *spi)
396static int __devinit ads7846_probe(struct spi_device *spi) 405static int __devinit ads7846_probe(struct spi_device *spi)
397{ 406{
398 struct ads7846 *ts; 407 struct ads7846 *ts;
408 struct input_dev *input_dev;
399 struct ads7846_platform_data *pdata = spi->dev.platform_data; 409 struct ads7846_platform_data *pdata = spi->dev.platform_data;
400 struct spi_transfer *x; 410 struct spi_transfer *x;
401 int i; 411 int err;
402 412
403 if (!spi->irq) { 413 if (!spi->irq) {
404 dev_dbg(&spi->dev, "no IRQ?\n"); 414 dev_dbg(&spi->dev, "no IRQ?\n");
@@ -411,9 +421,9 @@ static int __devinit ads7846_probe(struct spi_device *spi)
411 } 421 }
412 422
413 /* don't exceed max specified sample rate */ 423 /* don't exceed max specified sample rate */
414 if (spi->max_speed_hz > (125000 * 16)) { 424 if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) {
415 dev_dbg(&spi->dev, "f(sample) %d KHz?\n", 425 dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
416 (spi->max_speed_hz/16)/1000); 426 (spi->max_speed_hz/SAMPLE_BITS)/1000);
417 return -EINVAL; 427 return -EINVAL;
418 } 428 }
419 429
@@ -423,13 +433,18 @@ static int __devinit ads7846_probe(struct spi_device *spi)
423 * to discard the four garbage LSBs. 433 * to discard the four garbage LSBs.
424 */ 434 */
425 435
426 if (!(ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL))) 436 ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL);
427 return -ENOMEM; 437 input_dev = input_allocate_device();
438 if (!ts || !input_dev) {
439 err = -ENOMEM;
440 goto err_free_mem;
441 }
428 442
429 dev_set_drvdata(&spi->dev, ts); 443 dev_set_drvdata(&spi->dev, ts);
444 spi->dev.power.power_state = PMSG_ON;
430 445
431 ts->spi = spi; 446 ts->spi = spi;
432 spi->dev.power.power_state = PMSG_ON; 447 ts->input = input_dev;
433 448
434 init_timer(&ts->timer); 449 init_timer(&ts->timer);
435 ts->timer.data = (unsigned long) ts; 450 ts->timer.data = (unsigned long) ts;
@@ -439,70 +454,80 @@ static int __devinit ads7846_probe(struct spi_device *spi)
439 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; 454 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
440 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; 455 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
441 456
442 init_input_dev(&ts->input); 457 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id);
443 458
444 ts->input.dev = &spi->dev; 459 input_dev->name = "ADS784x Touchscreen";
445 ts->input.name = "ADS784x Touchscreen"; 460 input_dev->phys = ts->phys;
446 snprintf(ts->phys, sizeof ts->phys, "%s/input0", spi->dev.bus_id); 461 input_dev->cdev.dev = &spi->dev;
447 ts->input.phys = ts->phys;
448 462
449 ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 463 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
450 ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); 464 input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
451 input_set_abs_params(&ts->input, ABS_X, 465 input_set_abs_params(input_dev, ABS_X,
452 pdata->x_min ? : 0, 466 pdata->x_min ? : 0,
453 pdata->x_max ? : MAX_12BIT, 467 pdata->x_max ? : MAX_12BIT,
454 0, 0); 468 0, 0);
455 input_set_abs_params(&ts->input, ABS_Y, 469 input_set_abs_params(input_dev, ABS_Y,
456 pdata->y_min ? : 0, 470 pdata->y_min ? : 0,
457 pdata->y_max ? : MAX_12BIT, 471 pdata->y_max ? : MAX_12BIT,
458 0, 0); 472 0, 0);
459 input_set_abs_params(&ts->input, ABS_PRESSURE, 473 input_set_abs_params(input_dev, ABS_PRESSURE,
460 pdata->pressure_min, pdata->pressure_max, 0, 0); 474 pdata->pressure_min, pdata->pressure_max, 0, 0);
461 475
462 input_register_device(&ts->input);
463
464 /* set up the transfers to read touchscreen state; this assumes we 476 /* set up the transfers to read touchscreen state; this assumes we
465 * use formula #2 for pressure, not #3. 477 * use formula #2 for pressure, not #3.
466 */ 478 */
479 INIT_LIST_HEAD(&ts->msg.transfers);
467 x = ts->xfer; 480 x = ts->xfer;
468 481
469 /* y- still on; turn on only y+ (and ADC) */ 482 /* y- still on; turn on only y+ (and ADC) */
470 x->tx_buf = &read_y; 483 ts->read_y = READ_Y;
484 x->tx_buf = &ts->read_y;
471 x->len = 1; 485 x->len = 1;
486 spi_message_add_tail(x, &ts->msg);
487
472 x++; 488 x++;
473 x->rx_buf = &ts->tc.y; 489 x->rx_buf = &ts->tc.y;
474 x->len = 2; 490 x->len = 2;
475 x++; 491 spi_message_add_tail(x, &ts->msg);
476 492
477 /* turn y+ off, x- on; we'll use formula #2 */ 493 /* turn y+ off, x- on; we'll use formula #2 */
478 if (ts->model == 7846) { 494 if (ts->model == 7846) {
479 x->tx_buf = &read_z1; 495 x++;
496 ts->read_z1 = READ_Z1;
497 x->tx_buf = &ts->read_z1;
480 x->len = 1; 498 x->len = 1;
499 spi_message_add_tail(x, &ts->msg);
500
481 x++; 501 x++;
482 x->rx_buf = &ts->tc.z1; 502 x->rx_buf = &ts->tc.z1;
483 x->len = 2; 503 x->len = 2;
484 x++; 504 spi_message_add_tail(x, &ts->msg);
485 505
486 x->tx_buf = &read_z2; 506 x++;
507 ts->read_z2 = READ_Z2;
508 x->tx_buf = &ts->read_z2;
487 x->len = 1; 509 x->len = 1;
510 spi_message_add_tail(x, &ts->msg);
511
488 x++; 512 x++;
489 x->rx_buf = &ts->tc.z2; 513 x->rx_buf = &ts->tc.z2;
490 x->len = 2; 514 x->len = 2;
491 x++; 515 spi_message_add_tail(x, &ts->msg);
492 } 516 }
493 517
494 /* turn y- off, x+ on, then leave in lowpower */ 518 /* turn y- off, x+ on, then leave in lowpower */
495 x->tx_buf = &read_x; 519 x++;
520 ts->read_x = READ_X;
521 x->tx_buf = &ts->read_x;
496 x->len = 1; 522 x->len = 1;
523 spi_message_add_tail(x, &ts->msg);
524
497 x++; 525 x++;
498 x->rx_buf = &ts->tc.x; 526 x->rx_buf = &ts->tc.x;
499 x->len = 2; 527 x->len = 2;
500 x++; 528 CS_CHANGE(*x);
501 529 spi_message_add_tail(x, &ts->msg);
502 CS_CHANGE(x[-1]);
503 530
504 for (i = 0; i < x - ts->xfer; i++)
505 spi_message_add_tail(&ts->xfer[i], &ts->msg);
506 ts->msg.complete = ads7846_rx; 531 ts->msg.complete = ads7846_rx;
507 ts->msg.context = ts; 532 ts->msg.context = ts;
508 533
@@ -510,9 +535,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
510 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING, 535 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
511 spi->dev.bus_id, ts)) { 536 spi->dev.bus_id, ts)) {
512 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); 537 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
513 input_unregister_device(&ts->input); 538 err = -EBUSY;
514 kfree(ts); 539 goto err_free_mem;
515 return -EBUSY;
516 } 540 }
517 541
518 dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); 542 dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
@@ -534,7 +558,18 @@ static int __devinit ads7846_probe(struct spi_device *spi)
534 device_create_file(&spi->dev, &dev_attr_vbatt); 558 device_create_file(&spi->dev, &dev_attr_vbatt);
535 device_create_file(&spi->dev, &dev_attr_vaux); 559 device_create_file(&spi->dev, &dev_attr_vaux);
536 560
561 err = input_register_device(input_dev);
562 if (err)
563 goto err_free_irq;
564
537 return 0; 565 return 0;
566
567 err_free_irq:
568 free_irq(spi->irq, ts);
569 err_free_mem:
570 input_free_device(input_dev);
571 kfree(ts);
572 return err;
538} 573}
539 574
540static int __devexit ads7846_remove(struct spi_device *spi) 575static int __devexit ads7846_remove(struct spi_device *spi)
@@ -554,7 +589,7 @@ static int __devexit ads7846_remove(struct spi_device *spi)
554 device_remove_file(&spi->dev, &dev_attr_vbatt); 589 device_remove_file(&spi->dev, &dev_attr_vbatt);
555 device_remove_file(&spi->dev, &dev_attr_vaux); 590 device_remove_file(&spi->dev, &dev_attr_vaux);
556 591
557 input_unregister_device(&ts->input); 592 input_unregister_device(ts->input);
558 kfree(ts); 593 kfree(ts);
559 594
560 dev_dbg(&spi->dev, "unregistered touchscreen\n"); 595 dev_dbg(&spi->dev, "unregistered touchscreen\n");
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9a2c7605d49c..642a61b6d0a4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -452,8 +452,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 452 } else if (func == MPI_FUNCTION_EVENT_ACK) {
453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n",
454 ioc->name)); 454 ioc->name));
455 } else if (func == MPI_FUNCTION_CONFIG || 455 } else if (func == MPI_FUNCTION_CONFIG) {
456 func == MPI_FUNCTION_TOOLBOX) {
457 CONFIGPARMS *pCfg; 456 CONFIGPARMS *pCfg;
458 unsigned long flags; 457 unsigned long flags;
459 458
@@ -5327,115 +5326,6 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5327} 5326}
5328 5327
5329/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5328/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5330/**
5331 * mpt_toolbox - Generic function to issue toolbox message
5332 * @ioc - Pointer to an adapter structure
5333 * @cfg - Pointer to a toolbox structure. Struct contains
5334 * action, page address, direction, physical address
5335 * and pointer to a configuration page header
5336 * Page header is updated.
5337 *
5338 * Returns 0 for success
5339 * -EPERM if not allowed due to ISR context
5340 * -EAGAIN if no msg frames currently available
5341 * -EFAULT for non-successful reply or no reply (timeout)
5342 */
5343int
5344mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5345{
5346 ToolboxIstwiReadWriteRequest_t *pReq;
5347 MPT_FRAME_HDR *mf;
5348 struct pci_dev *pdev;
5349 unsigned long flags;
5350 int rc;
5351 u32 flagsLength;
5352 int in_isr;
5353
5354 /* Prevent calling wait_event() (below), if caller happens
5355 * to be in ISR context, because that is fatal!
5356 */
5357 in_isr = in_interrupt();
5358 if (in_isr) {
5359 dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n",
5360 ioc->name));
5361 return -EPERM;
5362 }
5363
5364 /* Get and Populate a free Frame
5365 */
5366 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5367 dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n",
5368 ioc->name));
5369 return -EAGAIN;
5370 }
5371 pReq = (ToolboxIstwiReadWriteRequest_t *)mf;
5372 pReq->Tool = pCfg->action;
5373 pReq->Reserved = 0;
5374 pReq->ChainOffset = 0;
5375 pReq->Function = MPI_FUNCTION_TOOLBOX;
5376 pReq->Reserved1 = 0;
5377 pReq->Reserved2 = 0;
5378 pReq->MsgFlags = 0;
5379 pReq->Flags = pCfg->dir;
5380 pReq->BusNum = 0;
5381 pReq->Reserved3 = 0;
5382 pReq->NumAddressBytes = 0x01;
5383 pReq->Reserved4 = 0;
5384 pReq->DataLength = cpu_to_le16(0x04);
5385 pdev = ioc->pcidev;
5386 if (pdev->devfn & 1)
5387 pReq->DeviceAddr = 0xB2;
5388 else
5389 pReq->DeviceAddr = 0xB0;
5390 pReq->Addr1 = 0;
5391 pReq->Addr2 = 0;
5392 pReq->Addr3 = 0;
5393 pReq->Reserved5 = 0;
5394
5395 /* Add a SGE to the config request.
5396 */
5397
5398 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4;
5399
5400 mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr);
5401
5402 dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n",
5403 ioc->name, pReq->Tool));
5404
5405 /* Append pCfg pointer to end of mf
5406 */
5407 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5408
5409 /* Initalize the timer
5410 */
5411 init_timer(&pCfg->timer);
5412 pCfg->timer.data = (unsigned long) ioc;
5413 pCfg->timer.function = mpt_timer_expired;
5414 pCfg->wait_done = 0;
5415
5416 /* Set the timer; ensure 10 second minimum */
5417 if (pCfg->timeout < 10)
5418 pCfg->timer.expires = jiffies + HZ*10;
5419 else
5420 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5421
5422 /* Add to end of Q, set timer and then issue this command */
5423 spin_lock_irqsave(&ioc->FreeQlock, flags);
5424 list_add_tail(&pCfg->linkage, &ioc->configQ);
5425 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5426
5427 add_timer(&pCfg->timer);
5428 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5429 wait_event(mpt_waitq, pCfg->wait_done);
5430
5431 /* mf has been freed - do not access */
5432
5433 rc = pCfg->status;
5434
5435 return rc;
5436}
5437
5438/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5439/* 5329/*
5440 * mpt_timer_expired - Call back for timer process. 5330 * mpt_timer_expired - Call back for timer process.
5441 * Used only internal config functionality. 5331 * Used only internal config functionality.
@@ -6142,7 +6032,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6142 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6032 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
6143 int idx; 6033 int idx;
6144 6034
6145 idx = ioc->eventContext % ioc->eventLogSize; 6035 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
6146 6036
6147 ioc->events[idx].event = event; 6037 ioc->events[idx].event = event;
6148 ioc->events[idx].eventContext = ioc->eventContext; 6038 ioc->events[idx].eventContext = ioc->eventContext;
@@ -6540,7 +6430,6 @@ EXPORT_SYMBOL(mpt_lan_index);
6540EXPORT_SYMBOL(mpt_stm_index); 6430EXPORT_SYMBOL(mpt_stm_index);
6541EXPORT_SYMBOL(mpt_HardResetHandler); 6431EXPORT_SYMBOL(mpt_HardResetHandler);
6542EXPORT_SYMBOL(mpt_config); 6432EXPORT_SYMBOL(mpt_config);
6543EXPORT_SYMBOL(mpt_toolbox);
6544EXPORT_SYMBOL(mpt_findImVolumes); 6433EXPORT_SYMBOL(mpt_findImVolumes);
6545EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6434EXPORT_SYMBOL(mpt_read_ioc_pg_3);
6546EXPORT_SYMBOL(mpt_alloc_fw_memory); 6435EXPORT_SYMBOL(mpt_alloc_fw_memory);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index ea2649ecad1f..723d54300953 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -616,6 +616,7 @@ typedef struct _MPT_ADAPTER
616 * increments by 32 bytes 616 * increments by 32 bytes
617 */ 617 */
618 int errata_flag_1064; 618 int errata_flag_1064;
619 int aen_event_read_flag; /* flag to indicate event log was read*/
619 u8 FirstWhoInit; 620 u8 FirstWhoInit;
620 u8 upload_fw; /* If set, do a fw upload */ 621 u8 upload_fw; /* If set, do a fw upload */
621 u8 reload_fw; /* Force a FW Reload on next reset */ 622 u8 reload_fw; /* Force a FW Reload on next reset */
@@ -1026,7 +1027,6 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
1026extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1027extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
1027extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1028extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
1028extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1029extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1029extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index bdf709987982..9b64e07400da 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -136,6 +136,12 @@ static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
136 */ 136 */
137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
138 138
139/*
140 * Event Handler function
141 */
142static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
143struct fasync_struct *async_queue=NULL;
144
139/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
140/* 146/*
141 * Scatter gather list (SGL) sizes and limits... 147 * Scatter gather list (SGL) sizes and limits...
@@ -385,18 +391,18 @@ static int mptctl_bus_reset(MPT_IOCTL *ioctl)
385 } 391 }
386 392
387 /* Now wait for the command to complete */ 393 /* Now wait for the command to complete */
388 ii = wait_event_interruptible_timeout(mptctl_wait, 394 ii = wait_event_timeout(mptctl_wait,
389 ioctl->wait_done == 1, 395 ioctl->wait_done == 1,
390 HZ*5 /* 5 second timeout */); 396 HZ*5 /* 5 second timeout */);
391 397
392 if(ii <=0 && (ioctl->wait_done != 1 )) { 398 if(ii <=0 && (ioctl->wait_done != 1 )) {
399 mpt_free_msg_frame(hd->ioc, mf);
393 ioctl->wait_done = 0; 400 ioctl->wait_done = 0;
394 retval = -1; /* return failure */ 401 retval = -1; /* return failure */
395 } 402 }
396 403
397mptctl_bus_reset_done: 404mptctl_bus_reset_done:
398 405
399 mpt_free_msg_frame(hd->ioc, mf);
400 mptctl_free_tm_flags(ioctl->ioc); 406 mptctl_free_tm_flags(ioctl->ioc);
401 return retval; 407 return retval;
402} 408}
@@ -472,6 +478,69 @@ mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
472} 478}
473 479
474/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 480/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
481/* ASYNC Event Notification Support */
482static int
483mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
484{
485 u8 event;
486
487 event = le32_to_cpu(pEvReply->Event) & 0xFF;
488
489 dctlprintk(("%s() called\n", __FUNCTION__));
490 if(async_queue == NULL)
491 return 1;
492
493 /* Raise SIGIO for persistent events.
494 * TODO - this define is not in MPI spec yet,
495 * but they plan to set it to 0x21
496 */
497 if (event == 0x21 ) {
498 ioc->aen_event_read_flag=1;
499 dctlprintk(("Raised SIGIO to application\n"));
500 devtprintk(("Raised SIGIO to application\n"));
501 kill_fasync(&async_queue, SIGIO, POLL_IN);
502 return 1;
503 }
504
505 /* This flag is set after SIGIO was raised, and
506 * remains set until the application has read
507 * the event log via ioctl=MPTEVENTREPORT
508 */
509 if(ioc->aen_event_read_flag)
510 return 1;
511
512 /* Signal only for the events that are
513 * requested for by the application
514 */
515 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
516 ioc->aen_event_read_flag=1;
517 dctlprintk(("Raised SIGIO to application\n"));
518 devtprintk(("Raised SIGIO to application\n"));
519 kill_fasync(&async_queue, SIGIO, POLL_IN);
520 }
521 return 1;
522}
523
524static int
525mptctl_fasync(int fd, struct file *filep, int mode)
526{
527 MPT_ADAPTER *ioc;
528
529 list_for_each_entry(ioc, &ioc_list, list)
530 ioc->aen_event_read_flag=0;
531
532 dctlprintk(("%s() called\n", __FUNCTION__));
533 return fasync_helper(fd, filep, mode, &async_queue);
534}
535
536static int
537mptctl_release(struct inode *inode, struct file *filep)
538{
539 dctlprintk(("%s() called\n", __FUNCTION__));
540 return fasync_helper(-1, filep, 0, &async_queue);
541}
542
543/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
475/* 544/*
476 * MPT ioctl handler 545 * MPT ioctl handler
477 * cmd - specify the particular IOCTL command to be issued 546 * cmd - specify the particular IOCTL command to be issued
@@ -674,22 +743,23 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
674 u16 iocstat; 743 u16 iocstat;
675 pFWDownloadReply_t ReplyMsg = NULL; 744 pFWDownloadReply_t ReplyMsg = NULL;
676 745
677 dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 746 dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id));
678 747
679 dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); 748 dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf));
680 dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 749 dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen));
681 dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); 750 dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc));
682 751
683 if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { 752 if (mpt_verify_adapter(ioc, &iocp) < 0) {
684 dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", 753 dctlprintk(("ioctl_fwdl - ioc%d not found!\n",
685 __FILE__, __LINE__, ioc)); 754 ioc));
686 return -ENODEV; /* (-6) No such device or address */ 755 return -ENODEV; /* (-6) No such device or address */
687 } 756 } else {
688 757
689 /* Valid device. Get a message frame and construct the FW download message. 758 /* Valid device. Get a message frame and construct the FW download message.
690 */ 759 */
691 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 760 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
692 return -EAGAIN; 761 return -EAGAIN;
762 }
693 dlmsg = (FWDownload_t*) mf; 763 dlmsg = (FWDownload_t*) mf;
694 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 764 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
695 sgOut = (char *) (ptsge + 1); 765 sgOut = (char *) (ptsge + 1);
@@ -702,7 +772,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
702 dlmsg->ChainOffset = 0; 772 dlmsg->ChainOffset = 0;
703 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 773 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
704 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 774 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
705 dlmsg->MsgFlags = 0; 775 if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
776 dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
777 else
778 dlmsg->MsgFlags = 0;
779
706 780
707 /* Set up the Transaction SGE. 781 /* Set up the Transaction SGE.
708 */ 782 */
@@ -754,7 +828,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
754 goto fwdl_out; 828 goto fwdl_out;
755 } 829 }
756 830
757 dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 831 dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags));
758 832
759 /* 833 /*
760 * Parse SG list, copying sgl itself, 834 * Parse SG list, copying sgl itself,
@@ -803,11 +877,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
803 /* 877 /*
804 * Finally, perform firmware download. 878 * Finally, perform firmware download.
805 */ 879 */
806 iocp->ioctl->wait_done = 0; 880 ReplyMsg = NULL;
807 mpt_put_msg_frame(mptctl_id, iocp, mf); 881 mpt_put_msg_frame(mptctl_id, iocp, mf);
808 882
809 /* Now wait for the command to complete */ 883 /* Now wait for the command to complete */
810 ret = wait_event_interruptible_timeout(mptctl_wait, 884 ret = wait_event_timeout(mptctl_wait,
811 iocp->ioctl->wait_done == 1, 885 iocp->ioctl->wait_done == 1,
812 HZ*60); 886 HZ*60);
813 887
@@ -1145,7 +1219,9 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1145 /* Fill in the data and return the structure to the calling 1219 /* Fill in the data and return the structure to the calling
1146 * program 1220 * program
1147 */ 1221 */
1148 if (ioc->bus_type == FC) 1222 if (ioc->bus_type == SAS)
1223 karg->adapterType = MPT_IOCTL_INTERFACE_SAS;
1224 else if (ioc->bus_type == FC)
1149 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1225 karg->adapterType = MPT_IOCTL_INTERFACE_FC;
1150 else 1226 else
1151 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; 1227 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
@@ -1170,12 +1246,11 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1170 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1246 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1171 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1247 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1172 } else if (cim_rev == 2) { 1248 } else if (cim_rev == 2) {
1173 /* Get the PCI bus, device, function and segment ID numbers 1249 /* Get the PCI bus, device, function and segment ID numbers
1174 for the IOC */ 1250 for the IOC */
1175 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1251 karg->pciInfo.u.bits.busNumber = pdev->bus->number;
1176 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1252 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1177 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1253 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1178 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1179 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1254 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
1180 } 1255 }
1181 1256
@@ -1500,7 +1575,7 @@ mptctl_eventquery (unsigned long arg)
1500 return -ENODEV; 1575 return -ENODEV;
1501 } 1576 }
1502 1577
1503 karg.eventEntries = ioc->eventLogSize; 1578 karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
1504 karg.eventTypes = ioc->eventTypes; 1579 karg.eventTypes = ioc->eventTypes;
1505 1580
1506 /* Copy the data from kernel memory to user memory 1581 /* Copy the data from kernel memory to user memory
@@ -1550,7 +1625,6 @@ mptctl_eventenable (unsigned long arg)
1550 memset(ioc->events, 0, sz); 1625 memset(ioc->events, 0, sz);
1551 ioc->alloc_total += sz; 1626 ioc->alloc_total += sz;
1552 1627
1553 ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE;
1554 ioc->eventContext = 0; 1628 ioc->eventContext = 0;
1555 } 1629 }
1556 1630
@@ -1590,7 +1664,7 @@ mptctl_eventreport (unsigned long arg)
1590 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1664 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
1591 1665
1592 1666
1593 max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; 1667 max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
1594 1668
1595 /* If fewer than 1 event is requested, there must have 1669 /* If fewer than 1 event is requested, there must have
1596 * been some type of error. 1670 * been some type of error.
@@ -1598,6 +1672,9 @@ mptctl_eventreport (unsigned long arg)
1598 if ((max < 1) || !ioc->events) 1672 if ((max < 1) || !ioc->events)
1599 return -ENODATA; 1673 return -ENODATA;
1600 1674
1675 /* reset this flag so SIGIO can restart */
1676 ioc->aen_event_read_flag=0;
1677
1601 /* Copy the data from kernel memory to user memory 1678 /* Copy the data from kernel memory to user memory
1602 */ 1679 */
1603 numBytes = max * sizeof(MPT_IOCTL_EVENTS); 1680 numBytes = max * sizeof(MPT_IOCTL_EVENTS);
@@ -1817,6 +1894,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1817 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1894 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1818 case MPI_FUNCTION_FW_DOWNLOAD: 1895 case MPI_FUNCTION_FW_DOWNLOAD:
1819 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1896 case MPI_FUNCTION_FC_PRIMITIVE_SEND:
1897 case MPI_FUNCTION_TOOLBOX:
1898 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
1820 break; 1899 break;
1821 1900
1822 case MPI_FUNCTION_SCSI_IO_REQUEST: 1901 case MPI_FUNCTION_SCSI_IO_REQUEST:
@@ -1837,7 +1916,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1837 goto done_free_mem; 1916 goto done_free_mem;
1838 } 1917 }
1839 1918
1840 pScsiReq->MsgFlags = mpt_msg_flags(); 1919 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1920 pScsiReq->MsgFlags |= mpt_msg_flags();
1921
1841 1922
1842 /* verify that app has not requested 1923 /* verify that app has not requested
1843 * more sense data than driver 1924 * more sense data than driver
@@ -1888,6 +1969,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1888 } 1969 }
1889 break; 1970 break;
1890 1971
1972 case MPI_FUNCTION_SMP_PASSTHROUGH:
1973 /* Check mf->PassthruFlags to determine if
1974 * transfer is ImmediateMode or not.
1975 * Immediate mode returns data in the ReplyFrame.
1976 * Else, we are sending request and response data
1977 * in two SGLs at the end of the mf.
1978 */
1979 break;
1980
1981 case MPI_FUNCTION_SATA_PASSTHROUGH:
1982 if (!ioc->sh) {
1983 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1984 "SCSI driver is not loaded. \n",
1985 __FILE__, __LINE__);
1986 rc = -EFAULT;
1987 goto done_free_mem;
1988 }
1989 break;
1990
1891 case MPI_FUNCTION_RAID_ACTION: 1991 case MPI_FUNCTION_RAID_ACTION:
1892 /* Just add a SGE 1992 /* Just add a SGE
1893 */ 1993 */
@@ -1900,7 +2000,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1900 int scsidir = MPI_SCSIIO_CONTROL_READ; 2000 int scsidir = MPI_SCSIIO_CONTROL_READ;
1901 int dataSize; 2001 int dataSize;
1902 2002
1903 pScsiReq->MsgFlags = mpt_msg_flags(); 2003 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
2004 pScsiReq->MsgFlags |= mpt_msg_flags();
2005
1904 2006
1905 /* verify that app has not requested 2007 /* verify that app has not requested
1906 * more sense data than driver 2008 * more sense data than driver
@@ -2130,7 +2232,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2130 2232
2131 /* Now wait for the command to complete */ 2233 /* Now wait for the command to complete */
2132 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2234 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2133 timeout = wait_event_interruptible_timeout(mptctl_wait, 2235 timeout = wait_event_timeout(mptctl_wait,
2134 ioc->ioctl->wait_done == 1, 2236 ioc->ioctl->wait_done == 1,
2135 HZ*timeout); 2237 HZ*timeout);
2136 2238
@@ -2246,13 +2348,16 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2246 hp_host_info_t __user *uarg = (void __user *) arg; 2348 hp_host_info_t __user *uarg = (void __user *) arg;
2247 MPT_ADAPTER *ioc; 2349 MPT_ADAPTER *ioc;
2248 struct pci_dev *pdev; 2350 struct pci_dev *pdev;
2249 char *pbuf; 2351 char *pbuf=NULL;
2250 dma_addr_t buf_dma; 2352 dma_addr_t buf_dma;
2251 hp_host_info_t karg; 2353 hp_host_info_t karg;
2252 CONFIGPARMS cfg; 2354 CONFIGPARMS cfg;
2253 ConfigPageHeader_t hdr; 2355 ConfigPageHeader_t hdr;
2254 int iocnum; 2356 int iocnum;
2255 int rc, cim_rev; 2357 int rc, cim_rev;
2358 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2359 MPT_FRAME_HDR *mf = NULL;
2360 MPIHeader_t *mpi_hdr;
2256 2361
2257 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2362 dctlprintk((": mptctl_hp_hostinfo called.\n"));
2258 /* Reset long to int. Should affect IA64 and SPARC only 2363 /* Reset long to int. Should affect IA64 and SPARC only
@@ -2370,7 +2475,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2370 2475
2371 karg.base_io_addr = pci_resource_start(pdev, 0); 2476 karg.base_io_addr = pci_resource_start(pdev, 0);
2372 2477
2373 if (ioc->bus_type == FC) 2478 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2374 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2479 karg.bus_phys_width = HP_BUS_WIDTH_UNK;
2375 else 2480 else
2376 karg.bus_phys_width = HP_BUS_WIDTH_16; 2481 karg.bus_phys_width = HP_BUS_WIDTH_16;
@@ -2388,20 +2493,67 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2388 } 2493 }
2389 } 2494 }
2390 2495
2391 cfg.pageAddr = 0; 2496 /*
2392 cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2497 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2393 cfg.dir = MPI_TB_ISTWI_FLAGS_READ; 2498 */
2394 cfg.timeout = 10; 2499 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2500 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
2501 ioc->name,__FUNCTION__));
2502 goto out;
2503 }
2504
2505 IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
2506 mpi_hdr = (MPIHeader_t *) mf;
2507 memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
2508 IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
2509 IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
2510 IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
2511 IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
2512 IstwiRWRequest->NumAddressBytes = 0x01;
2513 IstwiRWRequest->DataLength = cpu_to_le16(0x04);
2514 if (pdev->devfn & 1)
2515 IstwiRWRequest->DeviceAddr = 0xB2;
2516 else
2517 IstwiRWRequest->DeviceAddr = 0xB0;
2518
2395 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2519 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2396 if (pbuf) { 2520 if (!pbuf)
2397 cfg.physAddr = buf_dma; 2521 goto out;
2398 if ((mpt_toolbox(ioc, &cfg)) == 0) { 2522 mpt_add_sge((char *)&IstwiRWRequest->SGL,
2399 karg.rsvd = *(u32 *)pbuf; 2523 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2400 } 2524
2401 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2525 ioc->ioctl->wait_done = 0;
2402 pbuf = NULL; 2526 mpt_put_msg_frame(mptctl_id, ioc, mf);
2527
2528 rc = wait_event_timeout(mptctl_wait,
2529 ioc->ioctl->wait_done == 1,
2530 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */);
2531
2532 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) {
2533 /*
2534 * Now we need to reset the board
2535 */
2536 mpt_free_msg_frame(ioc, mf);
2537 mptctl_timeout_expired(ioc->ioctl);
2538 goto out;
2403 } 2539 }
2404 2540
2541 /*
2542 *ISTWI Data Definition
2543 * pbuf[0] = FW_VERSION = 0x4
2544 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on
2545 * the config, you should be seeing one out of these three values
2546 * pbuf[2] = Drive Installed Map = bit pattern depend on which
2547 * bays have drives in them
2548 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2549 */
2550 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID)
2551 karg.rsvd = *(u32 *)pbuf;
2552
2553 out:
2554 if (pbuf)
2555 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2556
2405 /* Copy the data from kernel memory to user memory 2557 /* Copy the data from kernel memory to user memory
2406 */ 2558 */
2407 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { 2559 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
@@ -2459,7 +2611,7 @@ mptctl_hp_targetinfo(unsigned long arg)
2459 2611
2460 /* There is nothing to do for FCP parts. 2612 /* There is nothing to do for FCP parts.
2461 */ 2613 */
2462 if (ioc->bus_type == FC) 2614 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2463 return 0; 2615 return 0;
2464 2616
2465 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) 2617 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
@@ -2569,6 +2721,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2569static struct file_operations mptctl_fops = { 2721static struct file_operations mptctl_fops = {
2570 .owner = THIS_MODULE, 2722 .owner = THIS_MODULE,
2571 .llseek = no_llseek, 2723 .llseek = no_llseek,
2724 .release = mptctl_release,
2725 .fasync = mptctl_fasync,
2572 .unlocked_ioctl = mptctl_ioctl, 2726 .unlocked_ioctl = mptctl_ioctl,
2573#ifdef CONFIG_COMPAT 2727#ifdef CONFIG_COMPAT
2574 .compat_ioctl = compat_mpctl_ioctl, 2728 .compat_ioctl = compat_mpctl_ioctl,
@@ -2813,6 +2967,11 @@ static int __init mptctl_init(void)
2813 /* FIXME! */ 2967 /* FIXME! */
2814 } 2968 }
2815 2969
2970 if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) {
2971 devtprintk((KERN_INFO MYNAM
2972 ": Registered for IOC event notifications\n"));
2973 }
2974
2816 return 0; 2975 return 0;
2817 2976
2818out_fail: 2977out_fail:
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index 518996e03481..a2f8a97992e6 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -169,8 +169,10 @@ struct mpt_ioctl_pci_info2 {
169 * Read only. 169 * Read only.
170 * Data starts at offset 0xC 170 * Data starts at offset 0xC
171 */ 171 */
172#define MPT_IOCTL_INTERFACE_FC (0x01)
173#define MPT_IOCTL_INTERFACE_SCSI (0x00) 172#define MPT_IOCTL_INTERFACE_SCSI (0x00)
173#define MPT_IOCTL_INTERFACE_FC (0x01)
174#define MPT_IOCTL_INTERFACE_FC_IP (0x02)
175#define MPT_IOCTL_INTERFACE_SAS (0x03)
174#define MPT_IOCTL_VERSION_LENGTH (32) 176#define MPT_IOCTL_VERSION_LENGTH (32)
175 177
176struct mpt_ioctl_iocinfo { 178struct mpt_ioctl_iocinfo {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 05789e505464..4fee6befc93d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2489,7 +2489,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2489 int idx; 2489 int idx;
2490 MPT_ADAPTER *ioc = hd->ioc; 2490 MPT_ADAPTER *ioc = hd->ioc;
2491 2491
2492 idx = ioc->eventContext % ioc->eventLogSize; 2492 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
2494 ioc->events[idx].eventContext = ioc->eventContext; 2494 ioc->events[idx].eventContext = ioc->eventContext;
2495 2495
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 37ee7f8dc82f..9fef29d978b5 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -97,6 +97,13 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
97 if (data->flags & MMC_DATA_READ) { 97 if (data->flags & MMC_DATA_READ) {
98 datactrl |= MCI_DPSM_DIRECTION; 98 datactrl |= MCI_DPSM_DIRECTION;
99 irqmask = MCI_RXFIFOHALFFULLMASK; 99 irqmask = MCI_RXFIFOHALFFULLMASK;
100
101 /*
102 * If we have less than a FIFOSIZE of bytes to transfer,
103 * trigger a PIO interrupt as soon as any data is available.
104 */
105 if (host->size < MCI_FIFOSIZE)
106 irqmask |= MCI_RXDATAAVLBLMASK;
100 } else { 107 } else {
101 /* 108 /*
102 * We don't actually need to include "FIFO empty" here 109 * We don't actually need to include "FIFO empty" here
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 4d7d47cf2394..a5f2ba9a8fdb 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -710,10 +710,9 @@ static inline void
710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
711 struct zfcp_adapter *adapter, 711 struct zfcp_adapter *adapter,
712 struct scsi_cmnd *scsi_cmnd, 712 struct scsi_cmnd *scsi_cmnd,
713 struct zfcp_fsf_req *new_fsf_req) 713 struct zfcp_fsf_req *fsf_req,
714 struct zfcp_fsf_req *old_fsf_req)
714{ 715{
715 struct zfcp_fsf_req *fsf_req =
716 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
717 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 716 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
718 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 717 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
719 unsigned long flags; 718 unsigned long flags;
@@ -727,19 +726,20 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
727 if (offset == 0) { 726 if (offset == 0) {
728 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 727 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
729 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 728 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
730 if (scsi_cmnd->device) { 729 if (scsi_cmnd != NULL) {
731 rec->scsi_id = scsi_cmnd->device->id; 730 if (scsi_cmnd->device) {
732 rec->scsi_lun = scsi_cmnd->device->lun; 731 rec->scsi_id = scsi_cmnd->device->id;
732 rec->scsi_lun = scsi_cmnd->device->lun;
733 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd,
738 min((int)scsi_cmnd->cmd_len,
739 ZFCP_DBF_SCSI_OPCODE));
740 rec->scsi_retries = scsi_cmnd->retries;
741 rec->scsi_allowed = scsi_cmnd->allowed;
733 } 742 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode,
738 &scsi_cmnd->cmnd,
739 min((int)scsi_cmnd->cmd_len,
740 ZFCP_DBF_SCSI_OPCODE));
741 rec->scsi_retries = scsi_cmnd->retries;
742 rec->scsi_allowed = scsi_cmnd->allowed;
743 if (fsf_req != NULL) { 743 if (fsf_req != NULL) {
744 fcp_rsp = (struct fcp_rsp_iu *) 744 fcp_rsp = (struct fcp_rsp_iu *)
745 &(fsf_req->qtcb->bottom.io.fcp_rsp); 745 &(fsf_req->qtcb->bottom.io.fcp_rsp);
@@ -772,15 +772,8 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
772 rec->fsf_seqno = fsf_req->seq_no; 772 rec->fsf_seqno = fsf_req->seq_no;
773 rec->fsf_issued = fsf_req->issued; 773 rec->fsf_issued = fsf_req->issued;
774 } 774 }
775 if (new_fsf_req != NULL) { 775 rec->type.old_fsf_reqid =
776 rec->type.new_fsf_req.fsf_reqid = 776 (unsigned long) old_fsf_req;
777 (unsigned long)
778 new_fsf_req;
779 rec->type.new_fsf_req.fsf_seqno =
780 new_fsf_req->seq_no;
781 rec->type.new_fsf_req.fsf_issued =
782 new_fsf_req->issued;
783 }
784 } else { 777 } else {
785 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 778 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
786 dump->total_size = buflen; 779 dump->total_size = buflen;
@@ -801,19 +794,21 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
801inline void 794inline void
802zfcp_scsi_dbf_event_result(const char *tag, int level, 795zfcp_scsi_dbf_event_result(const char *tag, int level,
803 struct zfcp_adapter *adapter, 796 struct zfcp_adapter *adapter,
804 struct scsi_cmnd *scsi_cmnd) 797 struct scsi_cmnd *scsi_cmnd,
798 struct zfcp_fsf_req *fsf_req)
805{ 799{
806 _zfcp_scsi_dbf_event_common("rslt", 800 _zfcp_scsi_dbf_event_common("rslt", tag, level,
807 tag, level, adapter, scsi_cmnd, NULL); 801 adapter, scsi_cmnd, fsf_req, NULL);
808} 802}
809 803
810inline void 804inline void
811zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 805zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
812 struct scsi_cmnd *scsi_cmnd, 806 struct scsi_cmnd *scsi_cmnd,
813 struct zfcp_fsf_req *new_fsf_req) 807 struct zfcp_fsf_req *new_fsf_req,
808 struct zfcp_fsf_req *old_fsf_req)
814{ 809{
815 _zfcp_scsi_dbf_event_common("abrt", 810 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
816 tag, 1, adapter, scsi_cmnd, new_fsf_req); 811 adapter, scsi_cmnd, new_fsf_req, old_fsf_req);
817} 812}
818 813
819inline void 814inline void
@@ -823,7 +818,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct zfcp_adapter *adapter = unit->port->adapter; 818 struct zfcp_adapter *adapter = unit->port->adapter;
824 819
825 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 820 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
826 tag, 1, adapter, scsi_cmnd, NULL); 821 tag, 1, adapter, scsi_cmnd, NULL, NULL);
827} 822}
828 823
829static int 824static int
@@ -856,6 +851,10 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
856 rec->scsi_retries); 851 rec->scsi_retries);
857 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 852 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
858 rec->scsi_allowed); 853 rec->scsi_allowed);
854 if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
855 len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx",
856 rec->type.old_fsf_reqid);
857 }
859 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 858 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
860 rec->fsf_reqid); 859 rec->fsf_reqid);
861 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", 860 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
@@ -883,21 +882,6 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
883 min((int)rec->type.fcp.sns_info_len, 882 min((int)rec->type.fcp.sns_info_len,
884 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 883 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
885 rec->type.fcp.sns_info_len); 884 rec->type.fcp.sns_info_len);
886 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
887 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
888 rec->type.new_fsf_req.fsf_reqid);
889 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
890 rec->type.new_fsf_req.fsf_seqno);
891 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
892 rec->type.new_fsf_req.fsf_issued);
893 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
894 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
895 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
896 rec->type.new_fsf_req.fsf_reqid);
897 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
898 rec->type.new_fsf_req.fsf_seqno);
899 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
900 rec->type.new_fsf_req.fsf_issued);
901 } 885 }
902 886
903 len += sprintf(out_buf + len, "\n"); 887 len += sprintf(out_buf + len, "\n");
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e260d19fa717..7f551d66f47f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -152,11 +152,6 @@ typedef u32 scsi_lun_t;
152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
154 154
155/* Retry 5 times every 2 second, then every minute */
156#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
157#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
158#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
159
160/* timeout value for "default timer" for fsf requests */ 155/* timeout value for "default timer" for fsf requests */
161#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 156#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
162 157
@@ -429,11 +424,7 @@ struct zfcp_scsi_dbf_record {
429 u32 fsf_seqno; 424 u32 fsf_seqno;
430 u64 fsf_issued; 425 u64 fsf_issued;
431 union { 426 union {
432 struct { 427 u64 old_fsf_reqid;
433 u64 fsf_reqid;
434 u32 fsf_seqno;
435 u64 fsf_issued;
436 } new_fsf_req;
437 struct { 428 struct {
438 u8 rsp_validity; 429 u8 rsp_validity;
439 u8 rsp_scsi_status; 430 u8 rsp_scsi_status;
@@ -915,8 +906,6 @@ struct zfcp_adapter {
915 wwn_t peer_wwnn; /* P2P peer WWNN */ 906 wwn_t peer_wwnn; /* P2P peer WWNN */
916 wwn_t peer_wwpn; /* P2P peer WWPN */ 907 wwn_t peer_wwpn; /* P2P peer WWPN */
917 u32 peer_d_id; /* P2P peer D_ID */ 908 u32 peer_d_id; /* P2P peer D_ID */
918 wwn_t physical_wwpn; /* WWPN of physical port */
919 u32 physical_s_id; /* local FC port ID */
920 struct ccw_device *ccw_device; /* S/390 ccw device */ 909 struct ccw_device *ccw_device; /* S/390 ccw device */
921 u8 fc_service_class; 910 u8 fc_service_class;
922 u32 hydra_version; /* Hydra version */ 911 u32 hydra_version; /* Hydra version */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index da947e662031..e3c4bdd29a60 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -2246,15 +2246,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2246{ 2246{
2247 int retval; 2247 int retval;
2248 2248
2249 if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2250 &erp_action->adapter->status)) &&
2251 (erp_action->adapter->adapter_features &
2252 FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2253 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2254 atomic_set(&erp_action->adapter->erp_counter, 0);
2255 return ZFCP_ERP_FAILED;
2256 }
2257
2258 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2249 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2259 if (retval == ZFCP_ERP_FAILED) 2250 if (retval == ZFCP_ERP_FAILED)
2260 return ZFCP_ERP_FAILED; 2251 return ZFCP_ERP_FAILED;
@@ -2266,13 +2257,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2266 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2257 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2267} 2258}
2268 2259
2269/*
2270 * function:
2271 *
2272 * purpose:
2273 *
2274 * returns:
2275 */
2276static int 2260static int
2277zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2261zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2278{ 2262{
@@ -2350,48 +2334,40 @@ static int
2350zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2334zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2351{ 2335{
2352 int ret; 2336 int ret;
2353 int retries; 2337 struct zfcp_adapter *adapter;
2354 int sleep;
2355 struct zfcp_adapter *adapter = erp_action->adapter;
2356 2338
2339 adapter = erp_action->adapter;
2357 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2340 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2358 2341
2359 retries = 0; 2342 write_lock(&adapter->erp_lock);
2360 do { 2343 zfcp_erp_action_to_running(erp_action);
2361 write_lock(&adapter->erp_lock); 2344 write_unlock(&adapter->erp_lock);
2362 zfcp_erp_action_to_running(erp_action);
2363 write_unlock(&adapter->erp_lock);
2364 zfcp_erp_timeout_init(erp_action);
2365 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2366 if (ret == -EOPNOTSUPP) {
2367 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2368 return ZFCP_ERP_SUCCEEDED;
2369 } else if (ret) {
2370 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2371 return ZFCP_ERP_FAILED;
2372 }
2373 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2374 2345
2375 down(&adapter->erp_ready_sem); 2346 zfcp_erp_timeout_init(erp_action);
2376 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2347 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2377 ZFCP_LOG_INFO("error: exchange of port data " 2348 if (ret == -EOPNOTSUPP) {
2378 "for adapter %s timed out\n", 2349 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2379 zfcp_get_busid_by_adapter(adapter)); 2350 return ZFCP_ERP_SUCCEEDED;
2380 break; 2351 } else if (ret) {
2381 } 2352 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2382 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2353 return ZFCP_ERP_FAILED;
2383 &adapter->status)) 2354 }
2384 break; 2355 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2385 2356
2386 if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2357 ret = ZFCP_ERP_SUCCEEDED;
2387 sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2358 down(&adapter->erp_ready_sem);
2388 retries++; 2359 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2389 } else 2360 ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
2390 sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2361 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2391 schedule_timeout(sleep); 2362 ret = ZFCP_ERP_FAILED;
2392 } while (1); 2363 }
2364 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) {
2365 ZFCP_LOG_INFO("error: exchange port data failed (adapter "
2366 "%s\n", zfcp_get_busid_by_adapter(adapter));
2367 ret = ZFCP_ERP_FAILED;
2368 }
2393 2369
2394 return ZFCP_ERP_SUCCEEDED; 2370 return ret;
2395} 2371}
2396 2372
2397/* 2373/*
@@ -3439,6 +3415,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3439 "(adapter %s, wwpn=0x%016Lx)\n", 3415 "(adapter %s, wwpn=0x%016Lx)\n",
3440 zfcp_get_busid_by_port(port), 3416 zfcp_get_busid_by_port(port),
3441 port->wwpn); 3417 port->wwpn);
3418 else
3419 scsi_flush_work(adapter->scsi_host);
3442 } 3420 }
3443 zfcp_port_put(port); 3421 zfcp_port_put(port);
3444 break; 3422 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c1ba7cf1b496..700f5402a978 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -194,9 +194,10 @@ extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
195 195
196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
197 struct scsi_cmnd *); 197 struct scsi_cmnd *,
198 struct zfcp_fsf_req *);
198extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 199extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
199 struct scsi_cmnd *, 200 struct scsi_cmnd *, struct zfcp_fsf_req *,
200 struct zfcp_fsf_req *); 201 struct zfcp_fsf_req *);
201extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 202extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
202 struct scsi_cmnd *); 203 struct scsi_cmnd *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9f0cb3d820c0..662ec571d73b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -388,6 +388,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
388 case FSF_PROT_LINK_DOWN: 388 case FSF_PROT_LINK_DOWN:
389 zfcp_fsf_link_down_info_eval(adapter, 389 zfcp_fsf_link_down_info_eval(adapter,
390 &prot_status_qual->link_down_info); 390 &prot_status_qual->link_down_info);
391 zfcp_erp_adapter_reopen(adapter, 0);
391 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 392 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
392 break; 393 break;
393 394
@@ -558,10 +559,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
558 559
559 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 560 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
560 561
561 if (link_down == NULL) { 562 if (link_down == NULL)
562 zfcp_erp_adapter_reopen(adapter, 0); 563 goto out;
563 return;
564 }
565 564
566 switch (link_down->error_code) { 565 switch (link_down->error_code) {
567 case FSF_PSQ_LINK_NO_LIGHT: 566 case FSF_PSQ_LINK_NO_LIGHT:
@@ -643,16 +642,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
643 link_down->explanation_code, 642 link_down->explanation_code,
644 link_down->vendor_specific_code); 643 link_down->vendor_specific_code);
645 644
646 switch (link_down->error_code) { 645 out:
647 case FSF_PSQ_LINK_NO_LIGHT: 646 zfcp_erp_adapter_failed(adapter);
648 case FSF_PSQ_LINK_WRAP_PLUG:
649 case FSF_PSQ_LINK_NO_FCP:
650 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
651 zfcp_erp_adapter_reopen(adapter, 0);
652 break;
653 default:
654 zfcp_erp_adapter_failed(adapter);
655 }
656} 647}
657 648
658/* 649/*
@@ -2304,6 +2295,35 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2304 return retval; 2295 return retval;
2305} 2296}
2306 2297
2298/**
2299 * zfcp_fsf_exchange_port_evaluate
2300 * @fsf_req: fsf_req which belongs to xchg port data request
2301 * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
2302 */
2303static void
2304zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2305{
2306 struct zfcp_adapter *adapter;
2307 struct fsf_qtcb *qtcb;
2308 struct fsf_qtcb_bottom_port *bottom, *data;
2309 struct Scsi_Host *shost;
2310
2311 adapter = fsf_req->adapter;
2312 qtcb = fsf_req->qtcb;
2313 bottom = &qtcb->bottom.port;
2314 shost = adapter->scsi_host;
2315
2316 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2317 if (data)
2318 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2319
2320 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2321 fc_host_permanent_port_name(shost) = bottom->wwpn;
2322 else
2323 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2324 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2325 fc_host_supported_speeds(shost) = bottom->supported_speed;
2326}
2307 2327
2308/** 2328/**
2309 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request 2329 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
@@ -2312,38 +2332,26 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2312static void 2332static void
2313zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2333zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2314{ 2334{
2315 struct zfcp_adapter *adapter = fsf_req->adapter; 2335 struct zfcp_adapter *adapter;
2316 struct Scsi_Host *shost = adapter->scsi_host; 2336 struct fsf_qtcb *qtcb;
2317 struct fsf_qtcb *qtcb = fsf_req->qtcb; 2337
2318 struct fsf_qtcb_bottom_port *bottom, *data; 2338 adapter = fsf_req->adapter;
2339 qtcb = fsf_req->qtcb;
2319 2340
2320 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2341 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2321 return; 2342 return;
2322 2343
2323 switch (qtcb->header.fsf_status) { 2344 switch (qtcb->header.fsf_status) {
2324 case FSF_GOOD: 2345 case FSF_GOOD:
2346 zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
2325 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2347 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2326
2327 bottom = &qtcb->bottom.port;
2328 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2329 if (data)
2330 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2331 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2332 fc_host_permanent_port_name(shost) = bottom->wwpn;
2333 else
2334 fc_host_permanent_port_name(shost) =
2335 fc_host_port_name(shost);
2336 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2337 fc_host_supported_speeds(shost) = bottom->supported_speed;
2338 break; 2348 break;
2339
2340 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2349 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2350 zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
2341 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2351 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2342
2343 zfcp_fsf_link_down_info_eval(adapter, 2352 zfcp_fsf_link_down_info_eval(adapter,
2344 &qtcb->header.fsf_status_qual.link_down_info); 2353 &qtcb->header.fsf_status_qual.link_down_info);
2345 break; 2354 break;
2346
2347 default: 2355 default:
2348 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2356 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2349 debug_event(adapter->erp_dbf, 0, 2357 debug_event(adapter->erp_dbf, 0,
@@ -4203,11 +4211,11 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4203 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4211 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4204 4212
4205 if (scpnt->result != 0) 4213 if (scpnt->result != 0)
4206 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); 4214 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req);
4207 else if (scpnt->retries > 0) 4215 else if (scpnt->retries > 0)
4208 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); 4216 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req);
4209 else 4217 else
4210 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); 4218 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req);
4211 4219
4212 /* cleanup pointer (need this especially for abort) */ 4220 /* cleanup pointer (need this especially for abort) */
4213 scpnt->host_scribble = NULL; 4221 scpnt->host_scribble = NULL;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e0803757c0fa..9f6b4d7a46f3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -242,7 +242,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
243 zfcp_scsi_dbf_event_result("fail", 4, 243 zfcp_scsi_dbf_event_result("fail", 4,
244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
245 scpnt); 245 scpnt, NULL);
246 /* return directly */ 246 /* return directly */
247 scpnt->scsi_done(scpnt); 247 scpnt->scsi_done(scpnt);
248} 248}
@@ -446,7 +446,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
447 if (!old_fsf_req) { 447 if (!old_fsf_req) {
448 write_unlock_irqrestore(&adapter->abort_lock, flags); 448 write_unlock_irqrestore(&adapter->abort_lock, flags);
449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); 449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL);
450 retval = SUCCESS; 450 retval = SUCCESS;
451 goto out; 451 goto out;
452 } 452 }
@@ -460,6 +460,8 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
460 adapter, unit, 0); 460 adapter, unit, 0);
461 if (!new_fsf_req) { 461 if (!new_fsf_req) {
462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
463 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
464 old_fsf_req);
463 retval = FAILED; 465 retval = FAILED;
464 goto out; 466 goto out;
465 } 467 }
@@ -470,13 +472,16 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
470 472
471 /* status should be valid since signals were not permitted */ 473 /* status should be valid since signals were not permitted */
472 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 474 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
473 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); 475 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
476 NULL);
474 retval = SUCCESS; 477 retval = SUCCESS;
475 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 478 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
476 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); 479 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req,
480 NULL);
477 retval = SUCCESS; 481 retval = SUCCESS;
478 } else { 482 } else {
479 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); 483 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req,
484 NULL);
480 retval = FAILED; 485 retval = FAILED;
481 } 486 }
482 zfcp_fsf_req_free(new_fsf_req); 487 zfcp_fsf_req_free(new_fsf_req);
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index dfc07370f412..b29ac25e07f3 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -55,8 +55,6 @@ ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
58ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
59ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
60ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 58ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
61ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 59ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
62ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 60ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
@@ -241,8 +239,6 @@ static struct attribute *zfcp_adapter_attrs[] = {
241 &dev_attr_peer_wwnn.attr, 239 &dev_attr_peer_wwnn.attr,
242 &dev_attr_peer_wwpn.attr, 240 &dev_attr_peer_wwpn.attr,
243 &dev_attr_peer_d_id.attr, 241 &dev_attr_peer_d_id.attr,
244 &dev_attr_physical_wwpn.attr,
245 &dev_attr_physical_s_id.attr,
246 &dev_attr_card_version.attr, 242 &dev_attr_card_version.attr,
247 &dev_attr_lic_version.attr, 243 &dev_attr_lic_version.attr,
248 &dev_attr_status.attr, 244 &dev_attr_status.attr,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 31c497542272..d9152d02088c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -61,6 +61,7 @@
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers. 63 2.26.02.004 - Add support for 9550SX controllers.
64 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64*/ 65*/
65 66
66#include <linux/module.h> 67#include <linux/module.h>
@@ -84,7 +85,7 @@
84#include "3w-9xxx.h" 85#include "3w-9xxx.h"
85 86
86/* Globals */ 87/* Globals */
87#define TW_DRIVER_VERSION "2.26.02.004" 88#define TW_DRIVER_VERSION "2.26.02.005"
88static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 89static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
89static unsigned int twa_device_extension_count; 90static unsigned int twa_device_extension_count;
90static int twa_major = -1; 91static int twa_major = -1;
@@ -1408,7 +1409,7 @@ static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int requ
1408 dma_addr_t mapping; 1409 dma_addr_t mapping;
1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1410 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1411 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1411 int retval = 0; 1412 dma_addr_t retval = 0;
1412 1413
1413 if (cmd->request_bufflen == 0) { 1414 if (cmd->request_bufflen == 0) {
1414 retval = 0; 1415 retval = 0;
@@ -1798,7 +1799,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1798 int i, sg_count; 1799 int i, sg_count;
1799 struct scsi_cmnd *srb = NULL; 1800 struct scsi_cmnd *srb = NULL;
1800 struct scatterlist *sglist = NULL; 1801 struct scatterlist *sglist = NULL;
1801 u32 buffaddr = 0x0; 1802 dma_addr_t buffaddr = 0x0;
1802 int retval = 1; 1803 int retval = 1;
1803 1804
1804 if (tw_dev->srb[request_id]) { 1805 if (tw_dev->srb[request_id]) {
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7139659dd952..a16f8ded8f1d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -173,10 +173,10 @@ int aac_get_config_status(struct aac_dev *dev)
173 int status = 0; 173 int status = 0;
174 struct fib * fibptr; 174 struct fib * fibptr;
175 175
176 if (!(fibptr = fib_alloc(dev))) 176 if (!(fibptr = aac_fib_alloc(dev)))
177 return -ENOMEM; 177 return -ENOMEM;
178 178
179 fib_init(fibptr); 179 aac_fib_init(fibptr);
180 { 180 {
181 struct aac_get_config_status *dinfo; 181 struct aac_get_config_status *dinfo;
182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
@@ -186,7 +186,7 @@ int aac_get_config_status(struct aac_dev *dev)
186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
187 } 187 }
188 188
189 status = fib_send(ContainerCommand, 189 status = aac_fib_send(ContainerCommand,
190 fibptr, 190 fibptr,
191 sizeof (struct aac_get_config_status), 191 sizeof (struct aac_get_config_status),
192 FsaNormal, 192 FsaNormal,
@@ -209,30 +209,30 @@ int aac_get_config_status(struct aac_dev *dev)
209 status = -EINVAL; 209 status = -EINVAL;
210 } 210 }
211 } 211 }
212 fib_complete(fibptr); 212 aac_fib_complete(fibptr);
213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
214 if (status >= 0) { 214 if (status >= 0) {
215 if (commit == 1) { 215 if (commit == 1) {
216 struct aac_commit_config * dinfo; 216 struct aac_commit_config * dinfo;
217 fib_init(fibptr); 217 aac_fib_init(fibptr);
218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr);
219 219
220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 220 dinfo->command = cpu_to_le32(VM_ContainerConfig);
221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
222 222
223 status = fib_send(ContainerCommand, 223 status = aac_fib_send(ContainerCommand,
224 fibptr, 224 fibptr,
225 sizeof (struct aac_commit_config), 225 sizeof (struct aac_commit_config),
226 FsaNormal, 226 FsaNormal,
227 1, 1, 227 1, 1,
228 NULL, NULL); 228 NULL, NULL);
229 fib_complete(fibptr); 229 aac_fib_complete(fibptr);
230 } else if (commit == 0) { 230 } else if (commit == 0) {
231 printk(KERN_WARNING 231 printk(KERN_WARNING
232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 232 "aac_get_config_status: Foreign device configurations are being ignored\n");
233 } 233 }
234 } 234 }
235 fib_free(fibptr); 235 aac_fib_free(fibptr);
236 return status; 236 return status;
237} 237}
238 238
@@ -255,15 +255,15 @@ int aac_get_containers(struct aac_dev *dev)
255 255
256 instance = dev->scsi_host_ptr->unique_id; 256 instance = dev->scsi_host_ptr->unique_id;
257 257
258 if (!(fibptr = fib_alloc(dev))) 258 if (!(fibptr = aac_fib_alloc(dev)))
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 fib_init(fibptr); 261 aac_fib_init(fibptr);
262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig);
264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
265 265
266 status = fib_send(ContainerCommand, 266 status = aac_fib_send(ContainerCommand,
267 fibptr, 267 fibptr,
268 sizeof (struct aac_get_container_count), 268 sizeof (struct aac_get_container_count),
269 FsaNormal, 269 FsaNormal,
@@ -272,7 +272,7 @@ int aac_get_containers(struct aac_dev *dev)
272 if (status >= 0) { 272 if (status >= 0) {
273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
275 fib_complete(fibptr); 275 aac_fib_complete(fibptr);
276 } 276 }
277 277
278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
@@ -280,7 +280,7 @@ int aac_get_containers(struct aac_dev *dev)
280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
282 if (!fsa_dev_ptr) { 282 if (!fsa_dev_ptr) {
283 fib_free(fibptr); 283 aac_fib_free(fibptr);
284 return -ENOMEM; 284 return -ENOMEM;
285 } 285 }
286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
@@ -294,14 +294,14 @@ int aac_get_containers(struct aac_dev *dev)
294 294
295 fsa_dev_ptr[index].devname[0] = '\0'; 295 fsa_dev_ptr[index].devname[0] = '\0';
296 296
297 fib_init(fibptr); 297 aac_fib_init(fibptr);
298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr);
299 299
300 dinfo->command = cpu_to_le32(VM_NameServe); 300 dinfo->command = cpu_to_le32(VM_NameServe);
301 dinfo->count = cpu_to_le32(index); 301 dinfo->count = cpu_to_le32(index);
302 dinfo->type = cpu_to_le32(FT_FILESYS); 302 dinfo->type = cpu_to_le32(FT_FILESYS);
303 303
304 status = fib_send(ContainerCommand, 304 status = aac_fib_send(ContainerCommand,
305 fibptr, 305 fibptr,
306 sizeof (struct aac_query_mount), 306 sizeof (struct aac_query_mount),
307 FsaNormal, 307 FsaNormal,
@@ -319,7 +319,7 @@ int aac_get_containers(struct aac_dev *dev)
319 dinfo->count = cpu_to_le32(index); 319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS); 320 dinfo->type = cpu_to_le32(FT_FILESYS);
321 321
322 if (fib_send(ContainerCommand, 322 if (aac_fib_send(ContainerCommand,
323 fibptr, 323 fibptr,
324 sizeof(struct aac_query_mount), 324 sizeof(struct aac_query_mount),
325 FsaNormal, 325 FsaNormal,
@@ -347,7 +347,7 @@ int aac_get_containers(struct aac_dev *dev)
347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
348 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
349 } 349 }
350 fib_complete(fibptr); 350 aac_fib_complete(fibptr);
351 /* 351 /*
352 * If there are no more containers, then stop asking. 352 * If there are no more containers, then stop asking.
353 */ 353 */
@@ -355,7 +355,7 @@ int aac_get_containers(struct aac_dev *dev)
355 break; 355 break;
356 } 356 }
357 } 357 }
358 fib_free(fibptr); 358 aac_fib_free(fibptr);
359 return status; 359 return status;
360} 360}
361 361
@@ -413,8 +413,8 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
413 413
414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
415 415
416 fib_complete(fibptr); 416 aac_fib_complete(fibptr);
417 fib_free(fibptr); 417 aac_fib_free(fibptr);
418 scsicmd->scsi_done(scsicmd); 418 scsicmd->scsi_done(scsicmd);
419} 419}
420 420
@@ -430,10 +430,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
430 430
431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
432 432
433 if (!(cmd_fibcontext = fib_alloc(dev))) 433 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
434 return -ENOMEM; 434 return -ENOMEM;
435 435
436 fib_init(cmd_fibcontext); 436 aac_fib_init(cmd_fibcontext);
437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
438 438
439 dinfo->command = cpu_to_le32(VM_ContainerConfig); 439 dinfo->command = cpu_to_le32(VM_ContainerConfig);
@@ -441,7 +441,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
441 dinfo->cid = cpu_to_le32(cid); 441 dinfo->cid = cpu_to_le32(cid);
442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
443 443
444 status = fib_send(ContainerCommand, 444 status = aac_fib_send(ContainerCommand,
445 cmd_fibcontext, 445 cmd_fibcontext,
446 sizeof (struct aac_get_name), 446 sizeof (struct aac_get_name),
447 FsaNormal, 447 FsaNormal,
@@ -455,14 +455,14 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
455 if (status == -EINPROGRESS) 455 if (status == -EINPROGRESS)
456 return 0; 456 return 0;
457 457
458 printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 458 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
459 fib_complete(cmd_fibcontext); 459 aac_fib_complete(cmd_fibcontext);
460 fib_free(cmd_fibcontext); 460 aac_fib_free(cmd_fibcontext);
461 return -1; 461 return -1;
462} 462}
463 463
464/** 464/**
465 * probe_container - query a logical volume 465 * aac_probe_container - query a logical volume
466 * @dev: device to query 466 * @dev: device to query
467 * @cid: container identifier 467 * @cid: container identifier
468 * 468 *
@@ -470,7 +470,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
470 * is updated in the struct fsa_dev_info structure rather than returned. 470 * is updated in the struct fsa_dev_info structure rather than returned.
471 */ 471 */
472 472
473int probe_container(struct aac_dev *dev, int cid) 473int aac_probe_container(struct aac_dev *dev, int cid)
474{ 474{
475 struct fsa_dev_info *fsa_dev_ptr; 475 struct fsa_dev_info *fsa_dev_ptr;
476 int status; 476 int status;
@@ -482,10 +482,10 @@ int probe_container(struct aac_dev *dev, int cid)
482 fsa_dev_ptr = dev->fsa_dev; 482 fsa_dev_ptr = dev->fsa_dev;
483 instance = dev->scsi_host_ptr->unique_id; 483 instance = dev->scsi_host_ptr->unique_id;
484 484
485 if (!(fibptr = fib_alloc(dev))) 485 if (!(fibptr = aac_fib_alloc(dev)))
486 return -ENOMEM; 486 return -ENOMEM;
487 487
488 fib_init(fibptr); 488 aac_fib_init(fibptr);
489 489
490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 490 dinfo = (struct aac_query_mount *)fib_data(fibptr);
491 491
@@ -493,14 +493,14 @@ int probe_container(struct aac_dev *dev, int cid)
493 dinfo->count = cpu_to_le32(cid); 493 dinfo->count = cpu_to_le32(cid);
494 dinfo->type = cpu_to_le32(FT_FILESYS); 494 dinfo->type = cpu_to_le32(FT_FILESYS);
495 495
496 status = fib_send(ContainerCommand, 496 status = aac_fib_send(ContainerCommand,
497 fibptr, 497 fibptr,
498 sizeof(struct aac_query_mount), 498 sizeof(struct aac_query_mount),
499 FsaNormal, 499 FsaNormal,
500 1, 1, 500 1, 1,
501 NULL, NULL); 501 NULL, NULL);
502 if (status < 0) { 502 if (status < 0) {
503 printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 503 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n");
504 goto error; 504 goto error;
505 } 505 }
506 506
@@ -512,7 +512,7 @@ int probe_container(struct aac_dev *dev, int cid)
512 dinfo->count = cpu_to_le32(cid); 512 dinfo->count = cpu_to_le32(cid);
513 dinfo->type = cpu_to_le32(FT_FILESYS); 513 dinfo->type = cpu_to_le32(FT_FILESYS);
514 514
515 if (fib_send(ContainerCommand, 515 if (aac_fib_send(ContainerCommand,
516 fibptr, 516 fibptr,
517 sizeof(struct aac_query_mount), 517 sizeof(struct aac_query_mount),
518 FsaNormal, 518 FsaNormal,
@@ -535,8 +535,8 @@ int probe_container(struct aac_dev *dev, int cid)
535 } 535 }
536 536
537error: 537error:
538 fib_complete(fibptr); 538 aac_fib_complete(fibptr);
539 fib_free(fibptr); 539 aac_fib_free(fibptr);
540 540
541 return status; 541 return status;
542} 542}
@@ -700,14 +700,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
700 struct aac_bus_info *command; 700 struct aac_bus_info *command;
701 struct aac_bus_info_response *bus_info; 701 struct aac_bus_info_response *bus_info;
702 702
703 if (!(fibptr = fib_alloc(dev))) 703 if (!(fibptr = aac_fib_alloc(dev)))
704 return -ENOMEM; 704 return -ENOMEM;
705 705
706 fib_init(fibptr); 706 aac_fib_init(fibptr);
707 info = (struct aac_adapter_info *) fib_data(fibptr); 707 info = (struct aac_adapter_info *) fib_data(fibptr);
708 memset(info,0,sizeof(*info)); 708 memset(info,0,sizeof(*info));
709 709
710 rcode = fib_send(RequestAdapterInfo, 710 rcode = aac_fib_send(RequestAdapterInfo,
711 fibptr, 711 fibptr,
712 sizeof(*info), 712 sizeof(*info),
713 FsaNormal, 713 FsaNormal,
@@ -716,8 +716,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
716 NULL); 716 NULL);
717 717
718 if (rcode < 0) { 718 if (rcode < 0) {
719 fib_complete(fibptr); 719 aac_fib_complete(fibptr);
720 fib_free(fibptr); 720 aac_fib_free(fibptr);
721 return rcode; 721 return rcode;
722 } 722 }
723 memcpy(&dev->adapter_info, info, sizeof(*info)); 723 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -725,13 +725,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
726 struct aac_supplement_adapter_info * info; 726 struct aac_supplement_adapter_info * info;
727 727
728 fib_init(fibptr); 728 aac_fib_init(fibptr);
729 729
730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
731 731
732 memset(info,0,sizeof(*info)); 732 memset(info,0,sizeof(*info));
733 733
734 rcode = fib_send(RequestSupplementAdapterInfo, 734 rcode = aac_fib_send(RequestSupplementAdapterInfo,
735 fibptr, 735 fibptr,
736 sizeof(*info), 736 sizeof(*info),
737 FsaNormal, 737 FsaNormal,
@@ -748,7 +748,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
748 * GetBusInfo 748 * GetBusInfo
749 */ 749 */
750 750
751 fib_init(fibptr); 751 aac_fib_init(fibptr);
752 752
753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
754 754
@@ -761,7 +761,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
761 command->MethodId = cpu_to_le32(1); 761 command->MethodId = cpu_to_le32(1);
762 command->CtlCmd = cpu_to_le32(GetBusInfo); 762 command->CtlCmd = cpu_to_le32(GetBusInfo);
763 763
764 rcode = fib_send(ContainerCommand, 764 rcode = aac_fib_send(ContainerCommand,
765 fibptr, 765 fibptr,
766 sizeof (*bus_info), 766 sizeof (*bus_info),
767 FsaNormal, 767 FsaNormal,
@@ -891,8 +891,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
891 } 891 }
892 } 892 }
893 893
894 fib_complete(fibptr); 894 aac_fib_complete(fibptr);
895 fib_free(fibptr); 895 aac_fib_free(fibptr);
896 896
897 return rcode; 897 return rcode;
898} 898}
@@ -976,8 +976,8 @@ static void io_callback(void *context, struct fib * fibptr)
976 ? sizeof(scsicmd->sense_buffer) 976 ? sizeof(scsicmd->sense_buffer)
977 : sizeof(dev->fsa_dev[cid].sense_data)); 977 : sizeof(dev->fsa_dev[cid].sense_data));
978 } 978 }
979 fib_complete(fibptr); 979 aac_fib_complete(fibptr);
980 fib_free(fibptr); 980 aac_fib_free(fibptr);
981 981
982 scsicmd->scsi_done(scsicmd); 982 scsicmd->scsi_done(scsicmd);
983} 983}
@@ -1062,11 +1062,11 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1062 /* 1062 /*
1063 * Alocate and initialize a Fib 1063 * Alocate and initialize a Fib
1064 */ 1064 */
1065 if (!(cmd_fibcontext = fib_alloc(dev))) { 1065 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1066 return -1; 1066 return -1;
1067 } 1067 }
1068 1068
1069 fib_init(cmd_fibcontext); 1069 aac_fib_init(cmd_fibcontext);
1070 1070
1071 if (dev->raw_io_interface) { 1071 if (dev->raw_io_interface) {
1072 struct aac_raw_io *readcmd; 1072 struct aac_raw_io *readcmd;
@@ -1086,7 +1086,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1086 /* 1086 /*
1087 * Now send the Fib to the adapter 1087 * Now send the Fib to the adapter
1088 */ 1088 */
1089 status = fib_send(ContainerRawIo, 1089 status = aac_fib_send(ContainerRawIo,
1090 cmd_fibcontext, 1090 cmd_fibcontext,
1091 fibsize, 1091 fibsize,
1092 FsaNormal, 1092 FsaNormal,
@@ -1112,7 +1112,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1112 /* 1112 /*
1113 * Now send the Fib to the adapter 1113 * Now send the Fib to the adapter
1114 */ 1114 */
1115 status = fib_send(ContainerCommand64, 1115 status = aac_fib_send(ContainerCommand64,
1116 cmd_fibcontext, 1116 cmd_fibcontext,
1117 fibsize, 1117 fibsize,
1118 FsaNormal, 1118 FsaNormal,
@@ -1136,7 +1136,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1136 /* 1136 /*
1137 * Now send the Fib to the adapter 1137 * Now send the Fib to the adapter
1138 */ 1138 */
1139 status = fib_send(ContainerCommand, 1139 status = aac_fib_send(ContainerCommand,
1140 cmd_fibcontext, 1140 cmd_fibcontext,
1141 fibsize, 1141 fibsize,
1142 FsaNormal, 1142 FsaNormal,
@@ -1153,14 +1153,14 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1153 if (status == -EINPROGRESS) 1153 if (status == -EINPROGRESS)
1154 return 0; 1154 return 0;
1155 1155
1156 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1156 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
1157 /* 1157 /*
1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL
1159 */ 1159 */
1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1161 scsicmd->scsi_done(scsicmd); 1161 scsicmd->scsi_done(scsicmd);
1162 fib_complete(cmd_fibcontext); 1162 aac_fib_complete(cmd_fibcontext);
1163 fib_free(cmd_fibcontext); 1163 aac_fib_free(cmd_fibcontext);
1164 return 0; 1164 return 0;
1165} 1165}
1166 1166
@@ -1228,12 +1228,12 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1228 /* 1228 /*
1229 * Allocate and initialize a Fib then setup a BlockWrite command 1229 * Allocate and initialize a Fib then setup a BlockWrite command
1230 */ 1230 */
1231 if (!(cmd_fibcontext = fib_alloc(dev))) { 1231 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1232 scsicmd->result = DID_ERROR << 16; 1232 scsicmd->result = DID_ERROR << 16;
1233 scsicmd->scsi_done(scsicmd); 1233 scsicmd->scsi_done(scsicmd);
1234 return 0; 1234 return 0;
1235 } 1235 }
1236 fib_init(cmd_fibcontext); 1236 aac_fib_init(cmd_fibcontext);
1237 1237
1238 if (dev->raw_io_interface) { 1238 if (dev->raw_io_interface) {
1239 struct aac_raw_io *writecmd; 1239 struct aac_raw_io *writecmd;
@@ -1253,7 +1253,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1253 /* 1253 /*
1254 * Now send the Fib to the adapter 1254 * Now send the Fib to the adapter
1255 */ 1255 */
1256 status = fib_send(ContainerRawIo, 1256 status = aac_fib_send(ContainerRawIo,
1257 cmd_fibcontext, 1257 cmd_fibcontext,
1258 fibsize, 1258 fibsize,
1259 FsaNormal, 1259 FsaNormal,
@@ -1279,7 +1279,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1279 /* 1279 /*
1280 * Now send the Fib to the adapter 1280 * Now send the Fib to the adapter
1281 */ 1281 */
1282 status = fib_send(ContainerCommand64, 1282 status = aac_fib_send(ContainerCommand64,
1283 cmd_fibcontext, 1283 cmd_fibcontext,
1284 fibsize, 1284 fibsize,
1285 FsaNormal, 1285 FsaNormal,
@@ -1305,7 +1305,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1305 /* 1305 /*
1306 * Now send the Fib to the adapter 1306 * Now send the Fib to the adapter
1307 */ 1307 */
1308 status = fib_send(ContainerCommand, 1308 status = aac_fib_send(ContainerCommand,
1309 cmd_fibcontext, 1309 cmd_fibcontext,
1310 fibsize, 1310 fibsize,
1311 FsaNormal, 1311 FsaNormal,
@@ -1322,15 +1322,15 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1322 return 0; 1322 return 0;
1323 } 1323 }
1324 1324
1325 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1325 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
1326 /* 1326 /*
1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL
1328 */ 1328 */
1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1330 scsicmd->scsi_done(scsicmd); 1330 scsicmd->scsi_done(scsicmd);
1331 1331
1332 fib_complete(cmd_fibcontext); 1332 aac_fib_complete(cmd_fibcontext);
1333 fib_free(cmd_fibcontext); 1333 aac_fib_free(cmd_fibcontext);
1334 return 0; 1334 return 0;
1335} 1335}
1336 1336
@@ -1369,8 +1369,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1369 sizeof(cmd->sense_buffer))); 1369 sizeof(cmd->sense_buffer)));
1370 } 1370 }
1371 1371
1372 fib_complete(fibptr); 1372 aac_fib_complete(fibptr);
1373 fib_free(fibptr); 1373 aac_fib_free(fibptr);
1374 cmd->scsi_done(cmd); 1374 cmd->scsi_done(cmd);
1375} 1375}
1376 1376
@@ -1407,10 +1407,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1407 * Allocate and initialize a Fib 1407 * Allocate and initialize a Fib
1408 */ 1408 */
1409 if (!(cmd_fibcontext = 1409 if (!(cmd_fibcontext =
1410 fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1410 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1411 return SCSI_MLQUEUE_HOST_BUSY; 1411 return SCSI_MLQUEUE_HOST_BUSY;
1412 1412
1413 fib_init(cmd_fibcontext); 1413 aac_fib_init(cmd_fibcontext);
1414 1414
1415 synchronizecmd = fib_data(cmd_fibcontext); 1415 synchronizecmd = fib_data(cmd_fibcontext);
1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
@@ -1422,7 +1422,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1422 /* 1422 /*
1423 * Now send the Fib to the adapter 1423 * Now send the Fib to the adapter
1424 */ 1424 */
1425 status = fib_send(ContainerCommand, 1425 status = aac_fib_send(ContainerCommand,
1426 cmd_fibcontext, 1426 cmd_fibcontext,
1427 sizeof(struct aac_synchronize), 1427 sizeof(struct aac_synchronize),
1428 FsaNormal, 1428 FsaNormal,
@@ -1437,9 +1437,9 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1437 return 0; 1437 return 0;
1438 1438
1439 printk(KERN_WARNING 1439 printk(KERN_WARNING
1440 "aac_synchronize: fib_send failed with status: %d.\n", status); 1440 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
1441 fib_complete(cmd_fibcontext); 1441 aac_fib_complete(cmd_fibcontext);
1442 fib_free(cmd_fibcontext); 1442 aac_fib_free(cmd_fibcontext);
1443 return SCSI_MLQUEUE_HOST_BUSY; 1443 return SCSI_MLQUEUE_HOST_BUSY;
1444} 1444}
1445 1445
@@ -1465,7 +1465,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1465 * itself. 1465 * itself.
1466 */ 1466 */
1467 if (scmd_id(scsicmd) != host->this_id) { 1467 if (scmd_id(scsicmd) != host->this_id) {
1468 if ((scsicmd->device->channel == 0) ){ 1468 if ((scsicmd->device->channel == CONTAINER_CHANNEL)) {
1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
1470 scsicmd->result = DID_NO_CONNECT << 16; 1470 scsicmd->result = DID_NO_CONNECT << 16;
1471 scsicmd->scsi_done(scsicmd); 1471 scsicmd->scsi_done(scsicmd);
@@ -1488,7 +1488,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1488 case READ_CAPACITY: 1488 case READ_CAPACITY:
1489 case TEST_UNIT_READY: 1489 case TEST_UNIT_READY:
1490 spin_unlock_irq(host->host_lock); 1490 spin_unlock_irq(host->host_lock);
1491 probe_container(dev, cid); 1491 aac_probe_container(dev, cid);
1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1493 fsa_dev_ptr[cid].valid = 0; 1493 fsa_dev_ptr[cid].valid = 0;
1494 spin_lock_irq(host->host_lock); 1494 spin_lock_irq(host->host_lock);
@@ -1935,33 +1935,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1935 case SRB_STATUS_ERROR_RECOVERY: 1935 case SRB_STATUS_ERROR_RECOVERY:
1936 case SRB_STATUS_PENDING: 1936 case SRB_STATUS_PENDING:
1937 case SRB_STATUS_SUCCESS: 1937 case SRB_STATUS_SUCCESS:
1938 if(scsicmd->cmnd[0] == INQUIRY ){ 1938 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1939 u8 b;
1940 u8 b1;
1941 /* We can't expose disk devices because we can't tell whether they
1942 * are the raw container drives or stand alone drives. If they have
1943 * the removable bit set then we should expose them though.
1944 */
1945 b = (*(u8*)scsicmd->buffer)&0x1f;
1946 b1 = ((u8*)scsicmd->buffer)[1];
1947 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1948 || (b==TYPE_DISK && (b1&0x80)) ){
1949 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1950 /*
1951 * We will allow disk devices if in RAID/SCSI mode and
1952 * the channel is 2
1953 */
1954 } else if ((dev->raid_scsi_mode) &&
1955 (scmd_channel(scsicmd) == 2)) {
1956 scsicmd->result = DID_OK << 16 |
1957 COMMAND_COMPLETE << 8;
1958 } else {
1959 scsicmd->result = DID_NO_CONNECT << 16 |
1960 COMMAND_COMPLETE << 8;
1961 }
1962 } else {
1963 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1964 }
1965 break; 1939 break;
1966 case SRB_STATUS_DATA_OVERRUN: 1940 case SRB_STATUS_DATA_OVERRUN:
1967 switch(scsicmd->cmnd[0]){ 1941 switch(scsicmd->cmnd[0]){
@@ -1981,28 +1955,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1981 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1955 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1982 break; 1956 break;
1983 case INQUIRY: { 1957 case INQUIRY: {
1984 u8 b; 1958 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1985 u8 b1;
1986 /* We can't expose disk devices because we can't tell whether they
1987 * are the raw container drives or stand alone drives
1988 */
1989 b = (*(u8*)scsicmd->buffer)&0x0f;
1990 b1 = ((u8*)scsicmd->buffer)[1];
1991 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1992 || (b==TYPE_DISK && (b1&0x80)) ){
1993 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1994 /*
1995 * We will allow disk devices if in RAID/SCSI mode and
1996 * the channel is 2
1997 */
1998 } else if ((dev->raid_scsi_mode) &&
1999 (scmd_channel(scsicmd) == 2)) {
2000 scsicmd->result = DID_OK << 16 |
2001 COMMAND_COMPLETE << 8;
2002 } else {
2003 scsicmd->result = DID_NO_CONNECT << 16 |
2004 COMMAND_COMPLETE << 8;
2005 }
2006 break; 1959 break;
2007 } 1960 }
2008 default: 1961 default:
@@ -2089,8 +2042,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2089 */ 2042 */
2090 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2043 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
2091 2044
2092 fib_complete(fibptr); 2045 aac_fib_complete(fibptr);
2093 fib_free(fibptr); 2046 aac_fib_free(fibptr);
2094 scsicmd->scsi_done(scsicmd); 2047 scsicmd->scsi_done(scsicmd);
2095} 2048}
2096 2049
@@ -2142,10 +2095,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2142 /* 2095 /*
2143 * Allocate and initialize a Fib then setup a BlockWrite command 2096 * Allocate and initialize a Fib then setup a BlockWrite command
2144 */ 2097 */
2145 if (!(cmd_fibcontext = fib_alloc(dev))) { 2098 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2146 return -1; 2099 return -1;
2147 } 2100 }
2148 fib_init(cmd_fibcontext); 2101 aac_fib_init(cmd_fibcontext);
2149 2102
2150 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2103 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2151 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 2104 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
@@ -2179,7 +2132,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2179 /* 2132 /*
2180 * Now send the Fib to the adapter 2133 * Now send the Fib to the adapter
2181 */ 2134 */
2182 status = fib_send(ScsiPortCommand64, cmd_fibcontext, 2135 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2183 fibsize, FsaNormal, 0, 1, 2136 fibsize, FsaNormal, 0, 1,
2184 (fib_callback) aac_srb_callback, 2137 (fib_callback) aac_srb_callback,
2185 (void *) scsicmd); 2138 (void *) scsicmd);
@@ -2201,7 +2154,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2201 /* 2154 /*
2202 * Now send the Fib to the adapter 2155 * Now send the Fib to the adapter
2203 */ 2156 */
2204 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2157 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2205 (fib_callback) aac_srb_callback, (void *) scsicmd); 2158 (fib_callback) aac_srb_callback, (void *) scsicmd);
2206 } 2159 }
2207 /* 2160 /*
@@ -2211,9 +2164,9 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2211 return 0; 2164 return 0;
2212 } 2165 }
2213 2166
2214 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 2167 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
2215 fib_complete(cmd_fibcontext); 2168 aac_fib_complete(cmd_fibcontext);
2216 fib_free(cmd_fibcontext); 2169 aac_fib_free(cmd_fibcontext);
2217 2170
2218 return -1; 2171 return -1;
2219} 2172}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 66dbb6d2c506..2d430b7e8cf4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1774,16 +1774,16 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1774struct scsi_cmnd; 1774struct scsi_cmnd;
1775 1775
1776const char *aac_driverinfo(struct Scsi_Host *); 1776const char *aac_driverinfo(struct Scsi_Host *);
1777struct fib *fib_alloc(struct aac_dev *dev); 1777struct fib *aac_fib_alloc(struct aac_dev *dev);
1778int fib_setup(struct aac_dev *dev); 1778int aac_fib_setup(struct aac_dev *dev);
1779void fib_map_free(struct aac_dev *dev); 1779void aac_fib_map_free(struct aac_dev *dev);
1780void fib_free(struct fib * context); 1780void aac_fib_free(struct fib * context);
1781void fib_init(struct fib * context); 1781void aac_fib_init(struct fib * context);
1782void aac_printf(struct aac_dev *dev, u32 val); 1782void aac_printf(struct aac_dev *dev, u32 val);
1783int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1783int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1786int fib_complete(struct fib * context); 1786int aac_fib_complete(struct fib * context);
1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1788struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1788struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1789int aac_get_config_status(struct aac_dev *dev); 1789int aac_get_config_status(struct aac_dev *dev);
@@ -1799,11 +1799,11 @@ unsigned int aac_command_normal(struct aac_queue * q);
1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1800int aac_command_thread(struct aac_dev * dev); 1800int aac_command_thread(struct aac_dev * dev);
1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1802int fib_adapter_complete(struct fib * fibptr, unsigned short size); 1802int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
1803struct aac_driver_ident* aac_get_driver_ident(int devtype); 1803struct aac_driver_ident* aac_get_driver_ident(int devtype);
1804int aac_get_adapter_info(struct aac_dev* dev); 1804int aac_get_adapter_info(struct aac_dev* dev);
1805int aac_send_shutdown(struct aac_dev *dev); 1805int aac_send_shutdown(struct aac_dev *dev);
1806int probe_container(struct aac_dev *dev, int cid); 1806int aac_probe_container(struct aac_dev *dev, int cid);
1807extern int numacb; 1807extern int numacb;
1808extern int acbsize; 1808extern int acbsize;
1809extern char aac_driver_version[]; 1809extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 4fe79cd7c957..47fefca72695 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
63 unsigned size; 63 unsigned size;
64 int retval; 64 int retval;
65 65
66 fibptr = fib_alloc(dev); 66 fibptr = aac_fib_alloc(dev);
67 if(fibptr == NULL) { 67 if(fibptr == NULL) {
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
@@ -73,7 +73,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
73 * First copy in the header so that we can check the size field. 73 * First copy in the header so that we can check the size field.
74 */ 74 */
75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
76 fib_free(fibptr); 76 aac_fib_free(fibptr);
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 /* 79 /*
@@ -110,13 +110,13 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
110 */ 110 */
111 kfib->header.XferState = 0; 111 kfib->header.XferState = 0;
112 } else { 112 } else {
113 retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, 113 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
114 le16_to_cpu(kfib->header.Size) , FsaNormal, 114 le16_to_cpu(kfib->header.Size) , FsaNormal,
115 1, 1, NULL, NULL); 115 1, 1, NULL, NULL);
116 if (retval) { 116 if (retval) {
117 goto cleanup; 117 goto cleanup;
118 } 118 }
119 if (fib_complete(fibptr) != 0) { 119 if (aac_fib_complete(fibptr) != 0) {
120 retval = -EINVAL; 120 retval = -EINVAL;
121 goto cleanup; 121 goto cleanup;
122 } 122 }
@@ -138,7 +138,7 @@ cleanup:
138 fibptr->hw_fib_pa = hw_fib_pa; 138 fibptr->hw_fib_pa = hw_fib_pa;
139 fibptr->hw_fib = hw_fib; 139 fibptr->hw_fib = hw_fib;
140 } 140 }
141 fib_free(fibptr); 141 aac_fib_free(fibptr);
142 return retval; 142 return retval;
143} 143}
144 144
@@ -464,10 +464,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
464 /* 464 /*
465 * Allocate and initialize a Fib then setup a BlockWrite command 465 * Allocate and initialize a Fib then setup a BlockWrite command
466 */ 466 */
467 if (!(srbfib = fib_alloc(dev))) { 467 if (!(srbfib = aac_fib_alloc(dev))) {
468 return -ENOMEM; 468 return -ENOMEM;
469 } 469 }
470 fib_init(srbfib); 470 aac_fib_init(srbfib);
471 471
472 srbcmd = (struct aac_srb*) fib_data(srbfib); 472 srbcmd = (struct aac_srb*) fib_data(srbfib);
473 473
@@ -601,7 +601,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
601 601
602 srbcmd->count = cpu_to_le32(byte_count); 602 srbcmd->count = cpu_to_le32(byte_count);
603 psg->count = cpu_to_le32(sg_indx+1); 603 psg->count = cpu_to_le32(sg_indx+1);
604 status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 604 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
605 } else { 605 } else {
606 struct user_sgmap* upsg = &user_srbcmd->sg; 606 struct user_sgmap* upsg = &user_srbcmd->sg;
607 struct sgmap* psg = &srbcmd->sg; 607 struct sgmap* psg = &srbcmd->sg;
@@ -649,7 +649,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
649 } 649 }
650 srbcmd->count = cpu_to_le32(byte_count); 650 srbcmd->count = cpu_to_le32(byte_count);
651 psg->count = cpu_to_le32(sg_indx+1); 651 psg->count = cpu_to_le32(sg_indx+1);
652 status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 652 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
653 } 653 }
654 654
655 if (status != 0){ 655 if (status != 0){
@@ -684,8 +684,8 @@ cleanup:
684 for(i=0; i <= sg_indx; i++){ 684 for(i=0; i <= sg_indx; i++){
685 kfree(sg_list[i]); 685 kfree(sg_list[i]);
686 } 686 }
687 fib_complete(srbfib); 687 aac_fib_complete(srbfib);
688 fib_free(srbfib); 688 aac_fib_free(srbfib);
689 689
690 return rcode; 690 return rcode;
691} 691}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 82821d331c07..1628d094943d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -185,17 +185,17 @@ int aac_send_shutdown(struct aac_dev * dev)
185 struct aac_close *cmd; 185 struct aac_close *cmd;
186 int status; 186 int status;
187 187
188 fibctx = fib_alloc(dev); 188 fibctx = aac_fib_alloc(dev);
189 if (!fibctx) 189 if (!fibctx)
190 return -ENOMEM; 190 return -ENOMEM;
191 fib_init(fibctx); 191 aac_fib_init(fibctx);
192 192
193 cmd = (struct aac_close *) fib_data(fibctx); 193 cmd = (struct aac_close *) fib_data(fibctx);
194 194
195 cmd->command = cpu_to_le32(VM_CloseAll); 195 cmd->command = cpu_to_le32(VM_CloseAll);
196 cmd->cid = cpu_to_le32(0xffffffff); 196 cmd->cid = cpu_to_le32(0xffffffff);
197 197
198 status = fib_send(ContainerCommand, 198 status = aac_fib_send(ContainerCommand,
199 fibctx, 199 fibctx,
200 sizeof(struct aac_close), 200 sizeof(struct aac_close),
201 FsaNormal, 201 FsaNormal,
@@ -203,8 +203,8 @@ int aac_send_shutdown(struct aac_dev * dev)
203 NULL, NULL); 203 NULL, NULL);
204 204
205 if (status == 0) 205 if (status == 0)
206 fib_complete(fibctx); 206 aac_fib_complete(fibctx);
207 fib_free(fibctx); 207 aac_fib_free(fibctx);
208 return status; 208 return status;
209} 209}
210 210
@@ -427,7 +427,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
427 /* 427 /*
428 * Initialize the list of fibs 428 * Initialize the list of fibs
429 */ 429 */
430 if(fib_setup(dev)<0){ 430 if (aac_fib_setup(dev) < 0) {
431 kfree(dev->queues); 431 kfree(dev->queues);
432 return NULL; 432 return NULL;
433 } 433 }
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 014cc8d54a9f..609fd19b1844 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -67,27 +67,27 @@ static int fib_map_alloc(struct aac_dev *dev)
67} 67}
68 68
69/** 69/**
70 * fib_map_free - free the fib objects 70 * aac_fib_map_free - free the fib objects
71 * @dev: Adapter to free 71 * @dev: Adapter to free
72 * 72 *
73 * Free the PCI mappings and the memory allocated for FIB blocks 73 * Free the PCI mappings and the memory allocated for FIB blocks
74 * on this adapter. 74 * on this adapter.
75 */ 75 */
76 76
77void fib_map_free(struct aac_dev *dev) 77void aac_fib_map_free(struct aac_dev *dev)
78{ 78{
79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
80} 80}
81 81
82/** 82/**
83 * fib_setup - setup the fibs 83 * aac_fib_setup - setup the fibs
84 * @dev: Adapter to set up 84 * @dev: Adapter to set up
85 * 85 *
86 * Allocate the PCI space for the fibs, map it and then intialise the 86 * Allocate the PCI space for the fibs, map it and then intialise the
87 * fib area, the unmapped fib data and also the free list 87 * fib area, the unmapped fib data and also the free list
88 */ 88 */
89 89
90int fib_setup(struct aac_dev * dev) 90int aac_fib_setup(struct aac_dev * dev)
91{ 91{
92 struct fib *fibptr; 92 struct fib *fibptr;
93 struct hw_fib *hw_fib_va; 93 struct hw_fib *hw_fib_va;
@@ -134,14 +134,14 @@ int fib_setup(struct aac_dev * dev)
134} 134}
135 135
136/** 136/**
137 * fib_alloc - allocate a fib 137 * aac_fib_alloc - allocate a fib
138 * @dev: Adapter to allocate the fib for 138 * @dev: Adapter to allocate the fib for
139 * 139 *
140 * Allocate a fib from the adapter fib pool. If the pool is empty we 140 * Allocate a fib from the adapter fib pool. If the pool is empty we
141 * return NULL. 141 * return NULL.
142 */ 142 */
143 143
144struct fib * fib_alloc(struct aac_dev *dev) 144struct fib *aac_fib_alloc(struct aac_dev *dev)
145{ 145{
146 struct fib * fibptr; 146 struct fib * fibptr;
147 unsigned long flags; 147 unsigned long flags;
@@ -170,14 +170,14 @@ struct fib * fib_alloc(struct aac_dev *dev)
170} 170}
171 171
172/** 172/**
173 * fib_free - free a fib 173 * aac_fib_free - free a fib
174 * @fibptr: fib to free up 174 * @fibptr: fib to free up
175 * 175 *
176 * Frees up a fib and places it on the appropriate queue 176 * Frees up a fib and places it on the appropriate queue
177 * (either free or timed out) 177 * (either free or timed out)
178 */ 178 */
179 179
180void fib_free(struct fib * fibptr) 180void aac_fib_free(struct fib *fibptr)
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 183
@@ -188,7 +188,7 @@ void fib_free(struct fib * fibptr)
188 fibptr->dev->timeout_fib = fibptr; 188 fibptr->dev->timeout_fib = fibptr;
189 } else { 189 } else {
190 if (fibptr->hw_fib->header.XferState != 0) { 190 if (fibptr->hw_fib->header.XferState != 0) {
191 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 191 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
192 (void*)fibptr, 192 (void*)fibptr,
193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 193 le32_to_cpu(fibptr->hw_fib->header.XferState));
194 } 194 }
@@ -199,13 +199,13 @@ void fib_free(struct fib * fibptr)
199} 199}
200 200
201/** 201/**
202 * fib_init - initialise a fib 202 * aac_fib_init - initialise a fib
203 * @fibptr: The fib to initialize 203 * @fibptr: The fib to initialize
204 * 204 *
205 * Set up the generic fib fields ready for use 205 * Set up the generic fib fields ready for use
206 */ 206 */
207 207
208void fib_init(struct fib *fibptr) 208void aac_fib_init(struct fib *fibptr)
209{ 209{
210 struct hw_fib *hw_fib = fibptr->hw_fib; 210 struct hw_fib *hw_fib = fibptr->hw_fib;
211 211
@@ -362,7 +362,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
362 */ 362 */
363 363
364/** 364/**
365 * fib_send - send a fib to the adapter 365 * aac_fib_send - send a fib to the adapter
366 * @command: Command to send 366 * @command: Command to send
367 * @fibptr: The fib 367 * @fibptr: The fib
368 * @size: Size of fib data area 368 * @size: Size of fib data area
@@ -378,7 +378,9 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
378 * response FIB is received from the adapter. 378 * response FIB is received from the adapter.
379 */ 379 */
380 380
381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
382 int priority, int wait, int reply, fib_callback callback,
383 void *callback_data)
382{ 384{
383 struct aac_dev * dev = fibptr->dev; 385 struct aac_dev * dev = fibptr->dev;
384 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
@@ -493,7 +495,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
493 q->numpending++; 495 q->numpending++;
494 *(q->headers.producer) = cpu_to_le32(index + 1); 496 *(q->headers.producer) = cpu_to_le32(index + 1);
495 spin_unlock_irqrestore(q->lock, qflags); 497 spin_unlock_irqrestore(q->lock, qflags);
496 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 498 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
497 if (!(nointr & aac_config.irq_mod)) 499 if (!(nointr & aac_config.irq_mod))
498 aac_adapter_notify(dev, AdapNormCmdQueue); 500 aac_adapter_notify(dev, AdapNormCmdQueue);
499 } 501 }
@@ -520,7 +522,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
520 list_del(&fibptr->queue); 522 list_del(&fibptr->queue);
521 spin_unlock_irqrestore(q->lock, qflags); 523 spin_unlock_irqrestore(q->lock, qflags);
522 if (wait == -1) { 524 if (wait == -1) {
523 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 525 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
524 "Usually a result of a PCI interrupt routing problem;\n" 526 "Usually a result of a PCI interrupt routing problem;\n"
525 "update mother board BIOS or consider utilizing one of\n" 527 "update mother board BIOS or consider utilizing one of\n"
526 "the SAFE mode kernel options (acpi, apic etc)\n"); 528 "the SAFE mode kernel options (acpi, apic etc)\n");
@@ -624,7 +626,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
624} 626}
625 627
626/** 628/**
627 * fib_adapter_complete - complete adapter issued fib 629 * aac_fib_adapter_complete - complete adapter issued fib
628 * @fibptr: fib to complete 630 * @fibptr: fib to complete
629 * @size: size of fib 631 * @size: size of fib
630 * 632 *
@@ -632,7 +634,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
632 * the adapter. 634 * the adapter.
633 */ 635 */
634 636
635int fib_adapter_complete(struct fib * fibptr, unsigned short size) 637int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
636{ 638{
637 struct hw_fib * hw_fib = fibptr->hw_fib; 639 struct hw_fib * hw_fib = fibptr->hw_fib;
638 struct aac_dev * dev = fibptr->dev; 640 struct aac_dev * dev = fibptr->dev;
@@ -683,20 +685,20 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
683 } 685 }
684 else 686 else
685 { 687 {
686 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 688 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
687 BUG(); 689 BUG();
688 } 690 }
689 return 0; 691 return 0;
690} 692}
691 693
692/** 694/**
693 * fib_complete - fib completion handler 695 * aac_fib_complete - fib completion handler
694 * @fib: FIB to complete 696 * @fib: FIB to complete
695 * 697 *
696 * Will do all necessary work to complete a FIB. 698 * Will do all necessary work to complete a FIB.
697 */ 699 */
698 700
699int fib_complete(struct fib * fibptr) 701int aac_fib_complete(struct fib *fibptr)
700{ 702{
701 struct hw_fib * hw_fib = fibptr->hw_fib; 703 struct hw_fib * hw_fib = fibptr->hw_fib;
702 704
@@ -995,14 +997,14 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
995 if (!dev || !dev->scsi_host_ptr) 997 if (!dev || !dev->scsi_host_ptr)
996 return; 998 return;
997 /* 999 /*
998 * force reload of disk info via probe_container 1000 * force reload of disk info via aac_probe_container
999 */ 1001 */
1000 if ((device_config_needed == CHANGE) 1002 if ((device_config_needed == CHANGE)
1001 && (dev->fsa_dev[container].valid == 1)) 1003 && (dev->fsa_dev[container].valid == 1))
1002 dev->fsa_dev[container].valid = 2; 1004 dev->fsa_dev[container].valid = 2;
1003 if ((device_config_needed == CHANGE) || 1005 if ((device_config_needed == CHANGE) ||
1004 (device_config_needed == ADD)) 1006 (device_config_needed == ADD))
1005 probe_container(dev, container); 1007 aac_probe_container(dev, container);
1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1008 device = scsi_device_lookup(dev->scsi_host_ptr,
1007 CONTAINER_TO_CHANNEL(container), 1009 CONTAINER_TO_CHANNEL(container),
1008 CONTAINER_TO_ID(container), 1010 CONTAINER_TO_ID(container),
@@ -1104,7 +1106,7 @@ int aac_command_thread(struct aac_dev * dev)
1104 /* Handle Driver Notify Events */ 1106 /* Handle Driver Notify Events */
1105 aac_handle_aif(dev, fib); 1107 aac_handle_aif(dev, fib);
1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1108 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1107 fib_adapter_complete(fib, (u16)sizeof(u32)); 1109 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1108 } else { 1110 } else {
1109 struct list_head *entry; 1111 struct list_head *entry;
1110 /* The u32 here is important and intended. We are using 1112 /* The u32 here is important and intended. We are using
@@ -1241,7 +1243,7 @@ int aac_command_thread(struct aac_dev * dev)
1241 * Set the status of this FIB 1243 * Set the status of this FIB
1242 */ 1244 */
1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1245 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1244 fib_adapter_complete(fib, sizeof(u32)); 1246 aac_fib_adapter_complete(fib, sizeof(u32));
1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1247 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1246 /* Free up the remaining resources */ 1248 /* Free up the remaining resources */
1247 hw_fib_p = hw_fib_pool; 1249 hw_fib_p = hw_fib_pool;
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 439948ef8251..f6bcb9486f85 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -206,7 +206,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
206 * Set the status of this FIB 206 * Set the status of this FIB
207 */ 207 */
208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
209 fib_adapter_complete(fib, sizeof(u32)); 209 aac_fib_adapter_complete(fib, sizeof(u32));
210 spin_lock_irqsave(q->lock, flags); 210 spin_lock_irqsave(q->lock, flags);
211 } 211 }
212 } 212 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0bf5f9a943e8..271617890562 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -385,17 +385,45 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
385 385
386static int aac_slave_configure(struct scsi_device *sdev) 386static int aac_slave_configure(struct scsi_device *sdev)
387{ 387{
388 struct Scsi_Host *host = sdev->host; 388 if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
389 sdev->skip_ms_page_8 = 1;
390 sdev->skip_ms_page_3f = 1;
391 }
392 if ((sdev->type == TYPE_DISK) &&
393 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
394 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
395 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
396 sdev->no_uld_attach = 1;
397 }
398 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
399 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
400 struct scsi_device * dev;
401 struct Scsi_Host *host = sdev->host;
402 unsigned num_lsu = 0;
403 unsigned num_one = 0;
404 unsigned depth;
389 405
390 if (sdev->tagged_supported) 406 __shost_for_each_device(dev, host) {
391 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); 407 if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
392 else 408 (sdev_channel(dev) == CONTAINER_CHANNEL))
409 ++num_lsu;
410 else
411 ++num_one;
412 }
413 if (num_lsu == 0)
414 ++num_lsu;
415 depth = (host->can_queue - num_one) / num_lsu;
416 if (depth > 256)
417 depth = 256;
418 else if (depth < 2)
419 depth = 2;
420 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
421 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
422 AAC_OPT_NEW_COMM))
423 blk_queue_max_segment_size(sdev->request_queue, 65536);
424 } else
393 scsi_adjust_queue_depth(sdev, 0, 1); 425 scsi_adjust_queue_depth(sdev, 0, 1);
394 426
395 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options
396 & AAC_OPT_NEW_COMM))
397 blk_queue_max_segment_size(sdev->request_queue, 65536);
398
399 return 0; 427 return 0;
400} 428}
401 429
@@ -870,7 +898,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
870 898
871 /* 899 /*
872 * max channel will be the physical channels plus 1 virtual channel 900 * max channel will be the physical channels plus 1 virtual channel
873 * all containers are on the virtual channel 0 901 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
874 * physical channels are address by their actual physical number+1 902 * physical channels are address by their actual physical number+1
875 */ 903 */
876 if (aac->nondasd_support == 1) 904 if (aac->nondasd_support == 1)
@@ -913,7 +941,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
913 aac_adapter_disable_int(aac); 941 aac_adapter_disable_int(aac);
914 free_irq(pdev->irq, aac); 942 free_irq(pdev->irq, aac);
915 out_unmap: 943 out_unmap:
916 fib_map_free(aac); 944 aac_fib_map_free(aac);
917 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 945 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
918 kfree(aac->queues); 946 kfree(aac->queues);
919 iounmap(aac->regs.sa); 947 iounmap(aac->regs.sa);
@@ -947,7 +975,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
947 975
948 aac_send_shutdown(aac); 976 aac_send_shutdown(aac);
949 aac_adapter_disable_int(aac); 977 aac_adapter_disable_int(aac);
950 fib_map_free(aac); 978 aac_fib_map_free(aac);
951 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 979 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
952 aac->comm_phys); 980 aac->comm_phys);
953 kfree(aac->queues); 981 kfree(aac->queues);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index bd3ffdf6c800..62e3cda859af 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2816,7 +2816,7 @@ static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
2816 } 2816 }
2817#endif 2817#endif
2818 2818
2819 } else { 2819 } else if (scp->request_bufflen) {
2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2820 scp->SCp.Status = GDTH_MAP_SINGLE;
2821 scp->SCp.Message = (read_write == 1 ? 2821 scp->SCp.Message = (read_write == 1 ?
2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 27acf78cf8d8..2bba5e55d7bc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4236,35 +4236,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4236} 4236}
4237 4237
4238/** 4238/**
4239 * ipr_save_ioafp_mode_select - Save adapters mode select data
4240 * @ioa_cfg: ioa config struct
4241 * @scsi_cmd: scsi command struct
4242 *
4243 * This function saves mode select data for the adapter to
4244 * use following an adapter reset.
4245 *
4246 * Return value:
4247 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4248 **/
4249static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4250 struct scsi_cmnd *scsi_cmd)
4251{
4252 if (!ioa_cfg->saved_mode_pages) {
4253 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4254 GFP_ATOMIC);
4255 if (!ioa_cfg->saved_mode_pages) {
4256 dev_err(&ioa_cfg->pdev->dev,
4257 "IOA mode select buffer allocation failed\n");
4258 return SCSI_MLQUEUE_HOST_BUSY;
4259 }
4260 }
4261
4262 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4263 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4264 return 0;
4265}
4266
4267/**
4268 * ipr_queuecommand - Queue a mid-layer request 4239 * ipr_queuecommand - Queue a mid-layer request
4269 * @scsi_cmd: scsi command struct 4240 * @scsi_cmd: scsi command struct
4270 * @done: done function 4241 * @done: done function
@@ -4338,9 +4309,6 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4338 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4309 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4310 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4340 4311
4341 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4342 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4343
4344 if (likely(rc == 0)) 4312 if (likely(rc == 0))
4345 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 4313 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4346 4314
@@ -4829,17 +4797,11 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4829 int length; 4797 int length;
4830 4798
4831 ENTER; 4799 ENTER;
4832 if (ioa_cfg->saved_mode_pages) { 4800 ipr_scsi_bus_speed_limit(ioa_cfg);
4833 memcpy(mode_pages, ioa_cfg->saved_mode_pages, 4801 ipr_check_term_power(ioa_cfg, mode_pages);
4834 ioa_cfg->saved_mode_page_len); 4802 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4835 length = ioa_cfg->saved_mode_page_len; 4803 length = mode_pages->hdr.length + 1;
4836 } else { 4804 mode_pages->hdr.length = 0;
4837 ipr_scsi_bus_speed_limit(ioa_cfg);
4838 ipr_check_term_power(ioa_cfg, mode_pages);
4839 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4840 length = mode_pages->hdr.length + 1;
4841 mode_pages->hdr.length = 0;
4842 }
4843 4805
4844 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4806 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4845 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 4807 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
@@ -5969,7 +5931,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5969 } 5931 }
5970 5932
5971 ipr_free_dump(ioa_cfg); 5933 ipr_free_dump(ioa_cfg);
5972 kfree(ioa_cfg->saved_mode_pages);
5973 kfree(ioa_cfg->trace); 5934 kfree(ioa_cfg->trace);
5974} 5935}
5975 5936
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b639332131f1..fd360bfe56dd 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.1" 39#define IPR_DRIVER_VERSION "2.1.2"
40#define IPR_DRIVER_DATE "(November 15, 2005)" 40#define IPR_DRIVER_DATE "(February 8, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1000,7 +1000,6 @@ struct ipr_ioa_cfg {
1000 struct Scsi_Host *host; 1000 struct Scsi_Host *host;
1001 struct pci_dev *pdev; 1001 struct pci_dev *pdev;
1002 struct ipr_sglist *ucode_sglist; 1002 struct ipr_sglist *ucode_sglist;
1003 struct ipr_mode_pages *saved_mode_pages;
1004 u8 saved_mode_page_len; 1003 u8 saved_mode_page_len;
1005 1004
1006 struct work_struct work_q; 1005 struct work_struct work_q;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 780bfcc67096..ff79e68b347c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -146,7 +146,7 @@ iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
146 spin_unlock_irqrestore(&session->lock, flags); 146 spin_unlock_irqrestore(&session->lock, flags);
147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 147 set_bit(SUSPEND_BIT, &conn->suspend_tx);
148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 148 set_bit(SUSPEND_BIT, &conn->suspend_rx);
149 iscsi_conn_error(iscsi_handle(conn), err); 149 iscsi_conn_error(conn->cls_conn, err);
150} 150}
151 151
152static inline int 152static inline int
@@ -244,12 +244,10 @@ iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 244 if (sc->sc_data_direction == DMA_TO_DEVICE) {
245 struct iscsi_data_task *dtask, *n; 245 struct iscsi_data_task *dtask, *n;
246 /* WRITE: cleanup Data-Out's if any */ 246 /* WRITE: cleanup Data-Out's if any */
247 spin_lock(&conn->lock);
248 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 247 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
249 list_del(&dtask->item); 248 list_del(&dtask->item);
250 mempool_free(dtask, ctask->datapool); 249 mempool_free(dtask, ctask->datapool);
251 } 250 }
252 spin_unlock(&conn->lock);
253 } 251 }
254 ctask->xmstate = XMSTATE_IDLE; 252 ctask->xmstate = XMSTATE_IDLE;
255 ctask->r2t = NULL; 253 ctask->r2t = NULL;
@@ -689,7 +687,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
689 break; 687 break;
690 688
691 if (!conn->in.datalen) { 689 if (!conn->in.datalen) {
692 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 690 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
693 NULL, 0); 691 NULL, 0);
694 if (conn->login_mtask != mtask) { 692 if (conn->login_mtask != mtask) {
695 spin_lock(&session->lock); 693 spin_lock(&session->lock);
@@ -737,7 +735,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
737 if (!conn->in.datalen) { 735 if (!conn->in.datalen) {
738 struct iscsi_mgmt_task *mtask; 736 struct iscsi_mgmt_task *mtask;
739 737
740 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 738 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
741 NULL, 0); 739 NULL, 0);
742 mtask = (struct iscsi_mgmt_task *) 740 mtask = (struct iscsi_mgmt_task *)
743 session->mgmt_cmds[conn->in.itt - 741 session->mgmt_cmds[conn->in.itt -
@@ -761,7 +759,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
761 rc = iscsi_check_assign_cmdsn(session, 759 rc = iscsi_check_assign_cmdsn(session,
762 (struct iscsi_nopin*)hdr); 760 (struct iscsi_nopin*)hdr);
763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 761 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
764 rc = iscsi_recv_pdu(iscsi_handle(conn), 762 rc = iscsi_recv_pdu(conn->cls_conn,
765 hdr, NULL, 0); 763 hdr, NULL, 0);
766 } else 764 } else
767 rc = ISCSI_ERR_PROTO; 765 rc = ISCSI_ERR_PROTO;
@@ -1044,7 +1042,7 @@ iscsi_data_recv(struct iscsi_conn *conn)
1044 goto exit; 1042 goto exit;
1045 } 1043 }
1046 1044
1047 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, 1045 rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr,
1048 conn->data, conn->in.datalen); 1046 conn->data, conn->in.datalen);
1049 1047
1050 if (!rc && conn->datadgst_en && 1048 if (!rc && conn->datadgst_en &&
@@ -2428,19 +2426,20 @@ iscsi_pool_free(struct iscsi_queue *q, void **items)
2428} 2426}
2429 2427
2430static struct iscsi_cls_conn * 2428static struct iscsi_cls_conn *
2431iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) 2429iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
2432{ 2430{
2431 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2433 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2432 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2434 struct iscsi_conn *conn; 2433 struct iscsi_conn *conn;
2435 struct iscsi_cls_conn *cls_conn; 2434 struct iscsi_cls_conn *cls_conn;
2436 2435
2437 cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), 2436 cls_conn = iscsi_create_conn(cls_session, conn_idx);
2438 conn_idx);
2439 if (!cls_conn) 2437 if (!cls_conn)
2440 return NULL; 2438 return NULL;
2441 conn = cls_conn->dd_data; 2439 conn = cls_conn->dd_data;
2440 memset(conn, 0, sizeof(*conn));
2442 2441
2443 memset(conn, 0, sizeof(struct iscsi_conn)); 2442 conn->cls_conn = cls_conn;
2444 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2443 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2445 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2444 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2446 conn->id = conn_idx; 2445 conn->id = conn_idx;
@@ -2452,8 +2451,6 @@ iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx)
2452 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2451 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2453 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2452 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2454 2453
2455 spin_lock_init(&conn->lock);
2456
2457 /* initialize general xmit PDU commands queue */ 2454 /* initialize general xmit PDU commands queue */
2458 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), 2455 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2459 GFP_KERNEL, NULL); 2456 GFP_KERNEL, NULL);
@@ -2625,11 +2622,13 @@ iscsi_conn_destroy(struct iscsi_cls_conn *cls_conn)
2625} 2622}
2626 2623
2627static int 2624static int
2628iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, 2625iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2629 uint32_t transport_fd, int is_leading) 2626 struct iscsi_cls_conn *cls_conn, uint32_t transport_fd,
2627 int is_leading)
2630{ 2628{
2631 struct iscsi_session *session = iscsi_ptr(sessionh); 2629 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2632 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); 2630 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2631 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data;
2633 struct sock *sk; 2632 struct sock *sk;
2634 struct socket *sock; 2633 struct socket *sock;
2635 int err; 2634 int err;
@@ -2703,9 +2702,9 @@ iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2703} 2702}
2704 2703
2705static int 2704static int
2706iscsi_conn_start(iscsi_connh_t connh) 2705iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2707{ 2706{
2708 struct iscsi_conn *conn = iscsi_ptr(connh); 2707 struct iscsi_conn *conn = cls_conn->dd_data;
2709 struct iscsi_session *session = conn->session; 2708 struct iscsi_session *session = conn->session;
2710 struct sock *sk; 2709 struct sock *sk;
2711 2710
@@ -2754,9 +2753,9 @@ iscsi_conn_start(iscsi_connh_t connh)
2754} 2753}
2755 2754
2756static void 2755static void
2757iscsi_conn_stop(iscsi_connh_t connh, int flag) 2756iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2758{ 2757{
2759 struct iscsi_conn *conn = iscsi_ptr(connh); 2758 struct iscsi_conn *conn = cls_conn->dd_data;
2760 struct iscsi_session *session = conn->session; 2759 struct iscsi_session *session = conn->session;
2761 struct sock *sk; 2760 struct sock *sk;
2762 unsigned long flags; 2761 unsigned long flags;
@@ -3253,9 +3252,9 @@ static struct scsi_host_template iscsi_sht = {
3253 3252
3254static struct iscsi_transport iscsi_tcp_transport; 3253static struct iscsi_transport iscsi_tcp_transport;
3255 3254
3256static struct Scsi_Host * 3255static struct iscsi_cls_session *
3257iscsi_session_create(struct scsi_transport_template *scsit, 3256iscsi_session_create(struct scsi_transport_template *scsit,
3258 uint32_t initial_cmdsn) 3257 uint32_t initial_cmdsn, uint32_t *sid)
3259{ 3258{
3260 struct Scsi_Host *shost; 3259 struct Scsi_Host *shost;
3261 struct iscsi_session *session; 3260 struct iscsi_session *session;
@@ -3268,13 +3267,14 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3268 session = iscsi_hostdata(shost->hostdata); 3267 session = iscsi_hostdata(shost->hostdata);
3269 memset(session, 0, sizeof(struct iscsi_session)); 3268 memset(session, 0, sizeof(struct iscsi_session));
3270 session->host = shost; 3269 session->host = shost;
3271 session->state = ISCSI_STATE_LOGGED_IN; 3270 session->state = ISCSI_STATE_FREE;
3272 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3271 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3273 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3272 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3274 session->cmdsn = initial_cmdsn; 3273 session->cmdsn = initial_cmdsn;
3275 session->exp_cmdsn = initial_cmdsn + 1; 3274 session->exp_cmdsn = initial_cmdsn + 1;
3276 session->max_cmdsn = initial_cmdsn + 1; 3275 session->max_cmdsn = initial_cmdsn + 1;
3277 session->max_r2t = 1; 3276 session->max_r2t = 1;
3277 *sid = shost->host_no;
3278 3278
3279 /* initialize SCSI PDU commands pool */ 3279 /* initialize SCSI PDU commands pool */
3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -3311,22 +3311,24 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3311 if (iscsi_r2tpool_alloc(session)) 3311 if (iscsi_r2tpool_alloc(session))
3312 goto r2tpool_alloc_fail; 3312 goto r2tpool_alloc_fail;
3313 3313
3314 return shost; 3314 return hostdata_session(shost->hostdata);
3315 3315
3316r2tpool_alloc_fail: 3316r2tpool_alloc_fail:
3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3318 kfree(session->mgmt_cmds[cmd_i]->data); 3318 kfree(session->mgmt_cmds[cmd_i]->data);
3319 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3320immdata_alloc_fail: 3319immdata_alloc_fail:
3320 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3321mgmtpool_alloc_fail: 3321mgmtpool_alloc_fail:
3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3323cmdpool_alloc_fail: 3323cmdpool_alloc_fail:
3324 iscsi_transport_destroy_session(shost);
3324 return NULL; 3325 return NULL;
3325} 3326}
3326 3327
3327static void 3328static void
3328iscsi_session_destroy(struct Scsi_Host *shost) 3329iscsi_session_destroy(struct iscsi_cls_session *cls_session)
3329{ 3330{
3331 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3330 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3332 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3331 int cmd_i; 3333 int cmd_i;
3332 struct iscsi_data_task *dtask, *n; 3334 struct iscsi_data_task *dtask, *n;
@@ -3350,10 +3352,10 @@ iscsi_session_destroy(struct Scsi_Host *shost)
3350} 3352}
3351 3353
3352static int 3354static int
3353iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, 3355iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
3354 uint32_t value) 3356 uint32_t value)
3355{ 3357{
3356 struct iscsi_conn *conn = iscsi_ptr(connh); 3358 struct iscsi_conn *conn = cls_conn->dd_data;
3357 struct iscsi_session *session = conn->session; 3359 struct iscsi_session *session = conn->session;
3358 3360
3359 spin_lock_bh(&session->lock); 3361 spin_lock_bh(&session->lock);
@@ -3495,9 +3497,10 @@ iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3495} 3497}
3496 3498
3497static int 3499static int
3498iscsi_session_get_param(struct Scsi_Host *shost, 3500iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3499 enum iscsi_param param, uint32_t *value) 3501 enum iscsi_param param, uint32_t *value)
3500{ 3502{
3503 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3501 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3504 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3502 3505
3503 switch(param) { 3506 switch(param) {
@@ -3539,9 +3542,10 @@ iscsi_session_get_param(struct Scsi_Host *shost,
3539} 3542}
3540 3543
3541static int 3544static int
3542iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) 3545iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3546 enum iscsi_param param, uint32_t *value)
3543{ 3547{
3544 struct iscsi_conn *conn = data; 3548 struct iscsi_conn *conn = cls_conn->dd_data;
3545 3549
3546 switch(param) { 3550 switch(param) {
3547 case ISCSI_PARAM_MAX_RECV_DLENGTH: 3551 case ISCSI_PARAM_MAX_RECV_DLENGTH:
@@ -3564,9 +3568,9 @@ iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value)
3564} 3568}
3565 3569
3566static void 3570static void
3567iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) 3571iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
3568{ 3572{
3569 struct iscsi_conn *conn = iscsi_ptr(connh); 3573 struct iscsi_conn *conn = cls_conn->dd_data;
3570 3574
3571 stats->txdata_octets = conn->txdata_octets; 3575 stats->txdata_octets = conn->txdata_octets;
3572 stats->rxdata_octets = conn->rxdata_octets; 3576 stats->rxdata_octets = conn->rxdata_octets;
@@ -3587,10 +3591,10 @@ iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3587} 3591}
3588 3592
3589static int 3593static int
3590iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, 3594iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
3591 uint32_t data_size) 3595 char *data, uint32_t data_size)
3592{ 3596{
3593 struct iscsi_conn *conn = iscsi_ptr(connh); 3597 struct iscsi_conn *conn = cls_conn->dd_data;
3594 int rc; 3598 int rc;
3595 3599
3596 mutex_lock(&conn->xmitmutex); 3600 mutex_lock(&conn->xmitmutex);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index f95e61b76f70..ba26741ac154 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -113,7 +113,10 @@ struct iscsi_tcp_recv {
113 int datadgst; 113 int datadgst;
114}; 114};
115 115
116struct iscsi_cls_conn;
117
116struct iscsi_conn { 118struct iscsi_conn {
119 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
117 struct iscsi_hdr hdr; /* header placeholder */ 120 struct iscsi_hdr hdr; /* header placeholder */
118 char hdrext[4*sizeof(__u16) + 121 char hdrext[4*sizeof(__u16) +
119 sizeof(__u32)]; 122 sizeof(__u32)];
@@ -143,7 +146,6 @@ struct iscsi_conn {
143 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
144 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
145 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
146 spinlock_t lock; /* FIXME: to be removed */
147 149
148 /* old values for socket callbacks */ 150 /* old values for socket callbacks */
149 void (*old_data_ready)(struct sock *, int); 151 void (*old_data_ready)(struct sock *, int);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index d101a8a6f4e8..7144674bc8e6 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -5049,7 +5049,7 @@ static struct pci_device_id megaraid_pci_tbl[] = {
5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
5050 5050
5051static struct pci_driver megaraid_pci_driver = { 5051static struct pci_driver megaraid_pci_driver = {
5052 .name = "megaraid", 5052 .name = "megaraid_legacy",
5053 .id_table = megaraid_pci_tbl, 5053 .id_table = megaraid_pci_tbl,
5054 .probe = megaraid_probe_one, 5054 .probe = megaraid_probe_one,
5055 .remove = __devexit_p(megaraid_remove_one), 5055 .remove = __devexit_p(megaraid_remove_one),
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4b3e0d6e5afa..4b75fe619d9c 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -5,7 +5,7 @@
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6 6
7#define MEGARAID_VERSION \ 7#define MEGARAID_VERSION \
8 "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" 8 "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
9 9
10/* 10/*
11 * Driver features - change the values to enable or disable features in the 11 * Driver features - change the values to enable or disable features in the
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a487f414960e..7de267e14458 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.02 13 * Version : v00.00.02.04
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -60,6 +60,12 @@ static struct pci_device_id megasas_pci_table[] = {
60 PCI_ANY_ID, 60 PCI_ANY_ID,
61 }, 61 },
62 { 62 {
63 PCI_VENDOR_ID_LSI_LOGIC,
64 PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
63 PCI_VENDOR_ID_DELL, 69 PCI_VENDOR_ID_DELL,
64 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP 70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP
65 PCI_ANY_ID, 71 PCI_ANY_ID,
@@ -199,6 +205,86 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
199*/ 205*/
200 206
201/** 207/**
208* The following functions are defined for ppc (deviceid : 0x60)
209* controllers
210*/
211
212/**
213 * megasas_enable_intr_ppc - Enables interrupts
214 * @regs: MFI register set
215 */
216static inline void
217megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
218{
219 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
220
221 writel(~0x80000004, &(regs)->outbound_intr_mask);
222
223 /* Dummy readl to force pci flush */
224 readl(&regs->outbound_intr_mask);
225}
226
227/**
228 * megasas_read_fw_status_reg_ppc - returns the current FW status value
229 * @regs: MFI register set
230 */
231static u32
232megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
233{
234 return readl(&(regs)->outbound_scratch_pad);
235}
236
237/**
238 * megasas_clear_interrupt_ppc - Check & clear interrupt
239 * @regs: MFI register set
240 */
241static int
242megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
243{
244 u32 status;
245 /*
246 * Check if it is our interrupt
247 */
248 status = readl(&regs->outbound_intr_status);
249
250 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
251 return 1;
252 }
253
254 /*
255 * Clear the interrupt by writing back the same value
256 */
257 writel(status, &regs->outbound_doorbell_clear);
258
259 return 0;
260}
261/**
262 * megasas_fire_cmd_ppc - Sends command to the FW
263 * @frame_phys_addr : Physical address of cmd
264 * @frame_count : Number of frames for the command
265 * @regs : MFI register set
266 */
267static inline void
268megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
269{
270 writel((frame_phys_addr | (frame_count<<1))|1,
271 &(regs)->inbound_queue_port);
272}
273
274static struct megasas_instance_template megasas_instance_template_ppc = {
275
276 .fire_cmd = megasas_fire_cmd_ppc,
277 .enable_intr = megasas_enable_intr_ppc,
278 .clear_intr = megasas_clear_intr_ppc,
279 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
280};
281
282/**
283* This is the end of set of functions & definitions
284* specific to ppc (deviceid : 0x60) controllers
285*/
286
287/**
202 * megasas_disable_intr - Disables interrupts 288 * megasas_disable_intr - Disables interrupts
203 * @regs: MFI register set 289 * @regs: MFI register set
204 */ 290 */
@@ -1607,7 +1693,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1607 1693
1608 reg_set = instance->reg_set; 1694 reg_set = instance->reg_set;
1609 1695
1610 instance->instancet = &megasas_instance_template_xscale; 1696 switch(instance->pdev->device)
1697 {
1698 case PCI_DEVICE_ID_LSI_SAS1078R:
1699 instance->instancet = &megasas_instance_template_ppc;
1700 break;
1701 case PCI_DEVICE_ID_LSI_SAS1064R:
1702 case PCI_DEVICE_ID_DELL_PERC5:
1703 default:
1704 instance->instancet = &megasas_instance_template_xscale;
1705 break;
1706 }
1611 1707
1612 /* 1708 /*
1613 * We expect the FW state to be READY 1709 * We expect the FW state to be READY
@@ -1983,6 +2079,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
1983 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 2079 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1984 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 2080 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1985 host->max_lun = MEGASAS_MAX_LUN; 2081 host->max_lun = MEGASAS_MAX_LUN;
2082 host->max_cmd_len = 16;
1986 2083
1987 /* 2084 /*
1988 * Notify the mid-layer about the new controller 2085 * Notify the mid-layer about the new controller
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index d6d166c0663f..89639f0c38ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.02.02" 21#define MEGASAS_VERSION "00.00.02.04"
22#define MEGASAS_RELDATE "Jan 23, 2006" 22#define MEGASAS_RELDATE "Feb 03, 2006"
23#define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" 23#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006"
24/* 24/*
25 * ===================================== 25 * =====================================
26 * MegaRAID SAS MFI firmware definitions 26 * MegaRAID SAS MFI firmware definitions
@@ -553,31 +553,46 @@ struct megasas_ctrl_info {
553#define MFI_OB_INTR_STATUS_MASK 0x00000002 553#define MFI_OB_INTR_STATUS_MASK 0x00000002
554#define MFI_POLL_TIMEOUT_SECS 10 554#define MFI_POLL_TIMEOUT_SECS 10
555 555
556#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
557#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060
558
556struct megasas_register_set { 559struct megasas_register_set {
560 u32 reserved_0[4]; /*0000h*/
557 561
558 u32 reserved_0[4]; /*0000h */ 562 u32 inbound_msg_0; /*0010h*/
563 u32 inbound_msg_1; /*0014h*/
564 u32 outbound_msg_0; /*0018h*/
565 u32 outbound_msg_1; /*001Ch*/
559 566
560 u32 inbound_msg_0; /*0010h */ 567 u32 inbound_doorbell; /*0020h*/
561 u32 inbound_msg_1; /*0014h */ 568 u32 inbound_intr_status; /*0024h*/
562 u32 outbound_msg_0; /*0018h */ 569 u32 inbound_intr_mask; /*0028h*/
563 u32 outbound_msg_1; /*001Ch */
564 570
565 u32 inbound_doorbell; /*0020h */ 571 u32 outbound_doorbell; /*002Ch*/
566 u32 inbound_intr_status; /*0024h */ 572 u32 outbound_intr_status; /*0030h*/
567 u32 inbound_intr_mask; /*0028h */ 573 u32 outbound_intr_mask; /*0034h*/
568 574
569 u32 outbound_doorbell; /*002Ch */ 575 u32 reserved_1[2]; /*0038h*/
570 u32 outbound_intr_status; /*0030h */
571 u32 outbound_intr_mask; /*0034h */
572 576
573 u32 reserved_1[2]; /*0038h */ 577 u32 inbound_queue_port; /*0040h*/
578 u32 outbound_queue_port; /*0044h*/
574 579
575 u32 inbound_queue_port; /*0040h */ 580 u32 reserved_2[22]; /*0048h*/
576 u32 outbound_queue_port; /*0044h */
577 581
578 u32 reserved_2; /*004Ch */ 582 u32 outbound_doorbell_clear; /*00A0h*/
579 583
580 u32 index_registers[1004]; /*0050h */ 584 u32 reserved_3[3]; /*00A4h*/
585
586 u32 outbound_scratch_pad ; /*00B0h*/
587
588 u32 reserved_4[3]; /*00B4h*/
589
590 u32 inbound_low_queue_port ; /*00C0h*/
591
592 u32 inbound_high_queue_port ; /*00C4h*/
593
594 u32 reserved_5; /*00C8h*/
595 u32 index_registers[820]; /*00CCh*/
581 596
582} __attribute__ ((packed)); 597} __attribute__ ((packed));
583 598
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b17ee62dd1a9..92b3e13e9061 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12/* SYSFS attributes --------------------------------------------------------- */ 11/* SYSFS attributes --------------------------------------------------------- */
13 12
@@ -114,7 +113,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
114 struct device, kobj))); 113 struct device, kobj)));
115 unsigned long flags; 114 unsigned long flags;
116 115
117 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 116 if (!capable(CAP_SYS_ADMIN) || off != 0)
118 return 0; 117 return 0;
119 118
120 /* Read NVRAM. */ 119 /* Read NVRAM. */
@@ -123,7 +122,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
123 ha->nvram_size); 122 ha->nvram_size);
124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 123 spin_unlock_irqrestore(&ha->hardware_lock, flags);
125 124
126 return (count); 125 return ha->nvram_size;
127} 126}
128 127
129static ssize_t 128static ssize_t
@@ -175,19 +174,150 @@ static struct bin_attribute sysfs_nvram_attr = {
175 .mode = S_IRUSR | S_IWUSR, 174 .mode = S_IRUSR | S_IWUSR,
176 .owner = THIS_MODULE, 175 .owner = THIS_MODULE,
177 }, 176 },
178 .size = 0, 177 .size = 512,
179 .read = qla2x00_sysfs_read_nvram, 178 .read = qla2x00_sysfs_read_nvram,
180 .write = qla2x00_sysfs_write_nvram, 179 .write = qla2x00_sysfs_write_nvram,
181}; 180};
182 181
182static ssize_t
183qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
184 size_t count)
185{
186 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
187 struct device, kobj)));
188
189 if (ha->optrom_state != QLA_SREADING)
190 return 0;
191 if (off > ha->optrom_size)
192 return 0;
193 if (off + count > ha->optrom_size)
194 count = ha->optrom_size - off;
195
196 memcpy(buf, &ha->optrom_buffer[off], count);
197
198 return count;
199}
200
201static ssize_t
202qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off,
203 size_t count)
204{
205 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
206 struct device, kobj)));
207
208 if (ha->optrom_state != QLA_SWRITING)
209 return -EINVAL;
210 if (off > ha->optrom_size)
211 return -ERANGE;
212 if (off + count > ha->optrom_size)
213 count = ha->optrom_size - off;
214
215 memcpy(&ha->optrom_buffer[off], buf, count);
216
217 return count;
218}
219
220static struct bin_attribute sysfs_optrom_attr = {
221 .attr = {
222 .name = "optrom",
223 .mode = S_IRUSR | S_IWUSR,
224 .owner = THIS_MODULE,
225 },
226 .size = OPTROM_SIZE_24XX,
227 .read = qla2x00_sysfs_read_optrom,
228 .write = qla2x00_sysfs_write_optrom,
229};
230
231static ssize_t
232qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
233 size_t count)
234{
235 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
236 struct device, kobj)));
237 int val;
238
239 if (off)
240 return 0;
241
242 if (sscanf(buf, "%d", &val) != 1)
243 return -EINVAL;
244
245 switch (val) {
246 case 0:
247 if (ha->optrom_state != QLA_SREADING &&
248 ha->optrom_state != QLA_SWRITING)
249 break;
250
251 ha->optrom_state = QLA_SWAITING;
252 vfree(ha->optrom_buffer);
253 ha->optrom_buffer = NULL;
254 break;
255 case 1:
256 if (ha->optrom_state != QLA_SWAITING)
257 break;
258
259 ha->optrom_state = QLA_SREADING;
260 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
261 if (ha->optrom_buffer == NULL) {
262 qla_printk(KERN_WARNING, ha,
263 "Unable to allocate memory for optrom retrieval "
264 "(%x).\n", ha->optrom_size);
265
266 ha->optrom_state = QLA_SWAITING;
267 return count;
268 }
269
270 memset(ha->optrom_buffer, 0, ha->optrom_size);
271 ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0,
272 ha->optrom_size);
273 break;
274 case 2:
275 if (ha->optrom_state != QLA_SWAITING)
276 break;
277
278 ha->optrom_state = QLA_SWRITING;
279 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
280 if (ha->optrom_buffer == NULL) {
281 qla_printk(KERN_WARNING, ha,
282 "Unable to allocate memory for optrom update "
283 "(%x).\n", ha->optrom_size);
284
285 ha->optrom_state = QLA_SWAITING;
286 return count;
287 }
288 memset(ha->optrom_buffer, 0, ha->optrom_size);
289 break;
290 case 3:
291 if (ha->optrom_state != QLA_SWRITING)
292 break;
293
294 ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0,
295 ha->optrom_size);
296 break;
297 }
298 return count;
299}
300
301static struct bin_attribute sysfs_optrom_ctl_attr = {
302 .attr = {
303 .name = "optrom_ctl",
304 .mode = S_IWUSR,
305 .owner = THIS_MODULE,
306 },
307 .size = 0,
308 .write = qla2x00_sysfs_write_optrom_ctl,
309};
310
183void 311void
184qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 312qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
185{ 313{
186 struct Scsi_Host *host = ha->host; 314 struct Scsi_Host *host = ha->host;
187 315
188 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 316 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
189 sysfs_nvram_attr.size = ha->nvram_size;
190 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 317 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
318 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
319 sysfs_create_bin_file(&host->shost_gendev.kobj,
320 &sysfs_optrom_ctl_attr);
191} 321}
192 322
193void 323void
@@ -197,6 +327,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
197 327
198 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 328 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
199 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 329 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
330 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
331 sysfs_remove_bin_file(&host->shost_gendev.kobj,
332 &sysfs_optrom_ctl_attr);
333
334 if (ha->beacon_blink_led == 1)
335 ha->isp_ops.beacon_off(ha);
200} 336}
201 337
202/* Scsi_Host attributes. */ 338/* Scsi_Host attributes. */
@@ -384,6 +520,50 @@ qla2x00_zio_timer_store(struct class_device *cdev, const char *buf,
384 return strlen(buf); 520 return strlen(buf);
385} 521}
386 522
523static ssize_t
524qla2x00_beacon_show(struct class_device *cdev, char *buf)
525{
526 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
527 int len = 0;
528
529 if (ha->beacon_blink_led)
530 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
531 else
532 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
533 return len;
534}
535
536static ssize_t
537qla2x00_beacon_store(struct class_device *cdev, const char *buf,
538 size_t count)
539{
540 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
541 int val = 0;
542 int rval;
543
544 if (IS_QLA2100(ha) || IS_QLA2200(ha))
545 return -EPERM;
546
547 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
548 qla_printk(KERN_WARNING, ha,
549 "Abort ISP active -- ignoring beacon request.\n");
550 return -EBUSY;
551 }
552
553 if (sscanf(buf, "%d", &val) != 1)
554 return -EINVAL;
555
556 if (val)
557 rval = ha->isp_ops.beacon_on(ha);
558 else
559 rval = ha->isp_ops.beacon_off(ha);
560
561 if (rval != QLA_SUCCESS)
562 count = 0;
563
564 return count;
565}
566
387static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 567static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
388 NULL); 568 NULL);
389static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 569static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
@@ -398,6 +578,8 @@ static CLASS_DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show,
398 qla2x00_zio_store); 578 qla2x00_zio_store);
399static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 579static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
400 qla2x00_zio_timer_store); 580 qla2x00_zio_timer_store);
581static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
582 qla2x00_beacon_store);
401 583
402struct class_device_attribute *qla2x00_host_attrs[] = { 584struct class_device_attribute *qla2x00_host_attrs[] = {
403 &class_device_attr_driver_version, 585 &class_device_attr_driver_version,
@@ -411,6 +593,7 @@ struct class_device_attribute *qla2x00_host_attrs[] = {
411 &class_device_attr_state, 593 &class_device_attr_state,
412 &class_device_attr_zio, 594 &class_device_attr_zio,
413 &class_device_attr_zio_timer, 595 &class_device_attr_zio_timer,
596 &class_device_attr_beacon,
414 NULL, 597 NULL,
415}; 598};
416 599
@@ -426,6 +609,49 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
426} 609}
427 610
428static void 611static void
612qla2x00_get_host_speed(struct Scsi_Host *shost)
613{
614 scsi_qla_host_t *ha = to_qla_host(shost);
615 uint32_t speed = 0;
616
617 switch (ha->link_data_rate) {
618 case LDR_1GB:
619 speed = 1;
620 break;
621 case LDR_2GB:
622 speed = 2;
623 break;
624 case LDR_4GB:
625 speed = 4;
626 break;
627 }
628 fc_host_speed(shost) = speed;
629}
630
631static void
632qla2x00_get_host_port_type(struct Scsi_Host *shost)
633{
634 scsi_qla_host_t *ha = to_qla_host(shost);
635 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
636
637 switch (ha->current_topology) {
638 case ISP_CFG_NL:
639 port_type = FC_PORTTYPE_LPORT;
640 break;
641 case ISP_CFG_FL:
642 port_type = FC_PORTTYPE_NLPORT;
643 break;
644 case ISP_CFG_N:
645 port_type = FC_PORTTYPE_PTP;
646 break;
647 case ISP_CFG_F:
648 port_type = FC_PORTTYPE_NPORT;
649 break;
650 }
651 fc_host_port_type(shost) = port_type;
652}
653
654static void
429qla2x00_get_starget_node_name(struct scsi_target *starget) 655qla2x00_get_starget_node_name(struct scsi_target *starget)
430{ 656{
431 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 657 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
@@ -512,6 +738,41 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
512 return 0; 738 return 0;
513} 739}
514 740
741static struct fc_host_statistics *
742qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
743{
744 scsi_qla_host_t *ha = to_qla_host(shost);
745 int rval;
746 uint16_t mb_stat[1];
747 link_stat_t stat_buf;
748 struct fc_host_statistics *pfc_host_stat;
749
750 pfc_host_stat = &ha->fc_host_stat;
751 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
752
753 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
754 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
755 sizeof(stat_buf) / 4, mb_stat);
756 } else {
757 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
758 mb_stat);
759 }
760 if (rval != 0) {
761 qla_printk(KERN_WARNING, ha,
762 "Unable to retrieve host statistics (%d).\n", mb_stat[0]);
763 return pfc_host_stat;
764 }
765
766 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
767 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
768 pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt;
769 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
770 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
771 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
772
773 return pfc_host_stat;
774}
775
515struct fc_function_template qla2xxx_transport_functions = { 776struct fc_function_template qla2xxx_transport_functions = {
516 777
517 .show_host_node_name = 1, 778 .show_host_node_name = 1,
@@ -520,6 +781,10 @@ struct fc_function_template qla2xxx_transport_functions = {
520 781
521 .get_host_port_id = qla2x00_get_host_port_id, 782 .get_host_port_id = qla2x00_get_host_port_id,
522 .show_host_port_id = 1, 783 .show_host_port_id = 1,
784 .get_host_speed = qla2x00_get_host_speed,
785 .show_host_speed = 1,
786 .get_host_port_type = qla2x00_get_host_port_type,
787 .show_host_port_type = 1,
523 788
524 .dd_fcrport_size = sizeof(struct fc_port *), 789 .dd_fcrport_size = sizeof(struct fc_port *),
525 .show_rport_supported_classes = 1, 790 .show_rport_supported_classes = 1,
@@ -536,6 +801,7 @@ struct fc_function_template qla2xxx_transport_functions = {
536 .show_rport_dev_loss_tmo = 1, 801 .show_rport_dev_loss_tmo = 1,
537 802
538 .issue_fc_host_lip = qla2x00_issue_lip, 803 .issue_fc_host_lip = qla2x00_issue_lip,
804 .get_fc_host_stats = qla2x00_get_fc_host_stats,
539}; 805};
540 806
541void 807void
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index bad066e5772a..b31a03bbd14f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
31#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_transport_fc.h>
32 33
33#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 34#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
34#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) 35#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE)
@@ -181,6 +182,13 @@
181#define WRT_REG_DWORD(addr, data) writel(data,addr) 182#define WRT_REG_DWORD(addr, data) writel(data,addr)
182 183
183/* 184/*
185 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
186 * 133Mhz slot.
187 */
188#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
189#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr))
190
191/*
184 * Fibre Channel device definitions. 192 * Fibre Channel device definitions.
185 */ 193 */
186#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ 194#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
@@ -432,6 +440,9 @@ struct device_reg_2xxx {
432#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 440#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
433#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 441#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080
434#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 442#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0
443#define GPIO_LED_ALL_OFF 0x0000
444#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */
445#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */
435 446
436 union { 447 union {
437 struct { 448 struct {
@@ -2199,6 +2210,15 @@ struct isp_operations {
2199 2210
2200 void (*fw_dump) (struct scsi_qla_host *, int); 2211 void (*fw_dump) (struct scsi_qla_host *, int);
2201 void (*ascii_fw_dump) (struct scsi_qla_host *); 2212 void (*ascii_fw_dump) (struct scsi_qla_host *);
2213
2214 int (*beacon_on) (struct scsi_qla_host *);
2215 int (*beacon_off) (struct scsi_qla_host *);
2216 void (*beacon_blink) (struct scsi_qla_host *);
2217
2218 uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
2219 uint32_t, uint32_t);
2220 int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
2221 uint32_t);
2202}; 2222};
2203 2223
2204/* 2224/*
@@ -2331,6 +2351,10 @@ typedef struct scsi_qla_host {
2331 uint16_t min_external_loopid; /* First external loop Id */ 2351 uint16_t min_external_loopid; /* First external loop Id */
2332 2352
2333 uint16_t link_data_rate; /* F/W operating speed */ 2353 uint16_t link_data_rate; /* F/W operating speed */
2354#define LDR_1GB 0
2355#define LDR_2GB 1
2356#define LDR_4GB 3
2357#define LDR_UNKNOWN 0xFFFF
2334 2358
2335 uint8_t current_topology; 2359 uint8_t current_topology;
2336 uint8_t prev_topology; 2360 uint8_t prev_topology;
@@ -2486,12 +2510,26 @@ typedef struct scsi_qla_host {
2486 uint8_t *port_name; 2510 uint8_t *port_name;
2487 uint32_t isp_abort_cnt; 2511 uint32_t isp_abort_cnt;
2488 2512
2513 /* Option ROM information. */
2514 char *optrom_buffer;
2515 uint32_t optrom_size;
2516 int optrom_state;
2517#define QLA_SWAITING 0
2518#define QLA_SREADING 1
2519#define QLA_SWRITING 2
2520
2489 /* Needed for BEACON */ 2521 /* Needed for BEACON */
2490 uint16_t beacon_blink_led; 2522 uint16_t beacon_blink_led;
2491 uint16_t beacon_green_on; 2523 uint8_t beacon_color_state;
2524#define QLA_LED_GRN_ON 0x01
2525#define QLA_LED_YLW_ON 0x02
2526#define QLA_LED_ABR_ON 0x04
2527#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2528 /* ISP2322: red, green, amber. */
2492 2529
2493 uint16_t zio_mode; 2530 uint16_t zio_mode;
2494 uint16_t zio_timer; 2531 uint16_t zio_timer;
2532 struct fc_host_statistics fc_host_stat;
2495} scsi_qla_host_t; 2533} scsi_qla_host_t;
2496 2534
2497 2535
@@ -2557,7 +2595,9 @@ struct _qla2x00stats {
2557/* 2595/*
2558 * Flash support definitions 2596 * Flash support definitions
2559 */ 2597 */
2560#define FLASH_IMAGE_SIZE 131072 2598#define OPTROM_SIZE_2300 0x20000
2599#define OPTROM_SIZE_2322 0x100000
2600#define OPTROM_SIZE_24XX 0x100000
2561 2601
2562#include "qla_gbl.h" 2602#include "qla_gbl.h"
2563#include "qla_dbg.h" 2603#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 35266bd5d538..ffdc2680f049 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -75,12 +75,12 @@ extern void qla2x00_cmd_timeout(srb_t *);
75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
77 77
78extern void qla2x00_blink_led(scsi_qla_host_t *);
79
80extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 78extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
81 79
82extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 80extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
83 81
82extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
83
84/* 84/*
85 * Global Function Prototypes in qla_iocb.c source file. 85 * Global Function Prototypes in qla_iocb.c source file.
86 */ 86 */
@@ -185,6 +185,13 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
185extern int 185extern int
186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
187 187
188extern int
189qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *,
190 uint16_t *);
191
192extern int
193qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *);
194
188extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 195extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
189extern int qla24xx_abort_target(fc_port_t *); 196extern int qla24xx_abort_target(fc_port_t *);
190 197
@@ -228,6 +235,22 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
228extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 235extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
229 uint32_t); 236 uint32_t);
230 237
238extern int qla2x00_beacon_on(struct scsi_qla_host *);
239extern int qla2x00_beacon_off(struct scsi_qla_host *);
240extern void qla2x00_beacon_blink(struct scsi_qla_host *);
241extern int qla24xx_beacon_on(struct scsi_qla_host *);
242extern int qla24xx_beacon_off(struct scsi_qla_host *);
243extern void qla24xx_beacon_blink(struct scsi_qla_host *);
244
245extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
246 uint32_t, uint32_t);
247extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
248 uint32_t, uint32_t);
249extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
250 uint32_t, uint32_t);
251extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
252 uint32_t, uint32_t);
253
231/* 254/*
232 * Global Function Prototypes in qla_dbg.c source file. 255 * Global Function Prototypes in qla_dbg.c source file.
233 */ 256 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e67bb0997818..634ee174bff2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <scsi/scsi_transport_fc.h>
12 11
13#include "qla_devtbl.h" 12#include "qla_devtbl.h"
14 13
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7ec0b8d6f07b..6544b6d0891d 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -814,6 +814,7 @@ qla24xx_start_scsi(srb_t *sp)
814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
815 815
816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
817 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
817 818
818 /* Load SCSI command packet. */ 819 /* Load SCSI command packet. */
819 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 820 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71a46fcee8cc..42aa7a7c1a73 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -402,9 +402,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
402 break; 402 break;
403 403
404 case MBA_LOOP_UP: /* Loop Up Event */ 404 case MBA_LOOP_UP: /* Loop Up Event */
405 ha->link_data_rate = 0;
406 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
407 link_speed = link_speeds[0]; 406 link_speed = link_speeds[0];
407 ha->link_data_rate = LDR_1GB;
408 } else { 408 } else {
409 link_speed = link_speeds[LS_UNKNOWN]; 409 link_speed = link_speeds[LS_UNKNOWN];
410 if (mb[1] < 5) 410 if (mb[1] < 5)
@@ -436,7 +436,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
436 } 436 }
437 437
438 ha->flags.management_server_logged_in = 0; 438 ha->flags.management_server_logged_in = 0;
439 ha->link_data_rate = 0; 439 ha->link_data_rate = LDR_UNKNOWN;
440 if (ql2xfdmienable) 440 if (ql2xfdmienable)
441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
442 442
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3099b379de9d..363dfdd042b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12static void 11static void
13qla2x00_mbx_sem_timeout(unsigned long data) 12qla2x00_mbx_sem_timeout(unsigned long data)
@@ -1874,7 +1873,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1874 mcp->mb[3] = LSW(id_list_dma); 1873 mcp->mb[3] = LSW(id_list_dma);
1875 mcp->mb[6] = MSW(MSD(id_list_dma)); 1874 mcp->mb[6] = MSW(MSD(id_list_dma));
1876 mcp->mb[7] = LSW(MSD(id_list_dma)); 1875 mcp->mb[7] = LSW(MSD(id_list_dma));
1877 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; 1876 mcp->mb[8] = 0;
1877 mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1878 } else { 1878 } else {
1879 mcp->mb[1] = MSW(id_list_dma); 1879 mcp->mb[1] = MSW(id_list_dma);
1880 mcp->mb[2] = LSW(id_list_dma); 1880 mcp->mb[2] = LSW(id_list_dma);
@@ -2017,8 +2017,109 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2017 2017
2018 return rval; 2018 return rval;
2019} 2019}
2020#endif
2021
2022/*
2023 * qla2x00_get_link_status
2024 *
2025 * Input:
2026 * ha = adapter block pointer.
2027 * loop_id = device loop ID.
2028 * ret_buf = pointer to link status return buffer.
2029 *
2030 * Returns:
2031 * 0 = success.
2032 * BIT_0 = mem alloc error.
2033 * BIT_1 = mailbox error.
2034 */
2035int
2036qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2037 link_stat_t *ret_buf, uint16_t *status)
2038{
2039 int rval;
2040 mbx_cmd_t mc;
2041 mbx_cmd_t *mcp = &mc;
2042 link_stat_t *stat_buf;
2043 dma_addr_t stat_buf_dma;
2044
2045 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);)
2046
2047 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2048 if (stat_buf == NULL) {
2049 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
2050 __func__, ha->host_no));
2051 return BIT_0;
2052 }
2053 memset(stat_buf, 0, sizeof(link_stat_t));
2054
2055 mcp->mb[0] = MBC_GET_LINK_STATUS;
2056 mcp->mb[2] = MSW(stat_buf_dma);
2057 mcp->mb[3] = LSW(stat_buf_dma);
2058 mcp->mb[6] = MSW(MSD(stat_buf_dma));
2059 mcp->mb[7] = LSW(MSD(stat_buf_dma));
2060 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2061 mcp->in_mb = MBX_0;
2062 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
2063 mcp->mb[1] = loop_id;
2064 mcp->mb[4] = 0;
2065 mcp->mb[10] = 0;
2066 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2067 mcp->in_mb |= MBX_1;
2068 } else if (HAS_EXTENDED_IDS(ha)) {
2069 mcp->mb[1] = loop_id;
2070 mcp->mb[10] = 0;
2071 mcp->out_mb |= MBX_10|MBX_1;
2072 } else {
2073 mcp->mb[1] = loop_id << 8;
2074 mcp->out_mb |= MBX_1;
2075 }
2076 mcp->tov = 30;
2077 mcp->flags = IOCTL_CMD;
2078 rval = qla2x00_mailbox_command(ha, mcp);
2079
2080 if (rval == QLA_SUCCESS) {
2081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2082 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2083 __func__, ha->host_no, mcp->mb[0]);)
2084 status[0] = mcp->mb[0];
2085 rval = BIT_1;
2086 } else {
2087 /* copy over data -- firmware data is LE. */
2088 ret_buf->link_fail_cnt =
2089 le32_to_cpu(stat_buf->link_fail_cnt);
2090 ret_buf->loss_sync_cnt =
2091 le32_to_cpu(stat_buf->loss_sync_cnt);
2092 ret_buf->loss_sig_cnt =
2093 le32_to_cpu(stat_buf->loss_sig_cnt);
2094 ret_buf->prim_seq_err_cnt =
2095 le32_to_cpu(stat_buf->prim_seq_err_cnt);
2096 ret_buf->inval_xmit_word_cnt =
2097 le32_to_cpu(stat_buf->inval_xmit_word_cnt);
2098 ret_buf->inval_crc_cnt =
2099 le32_to_cpu(stat_buf->inval_crc_cnt);
2100
2101 DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d "
2102 "loss_sync=%d loss_sig=%d seq_err=%d "
2103 "inval_xmt_word=%d inval_crc=%d.\n", __func__,
2104 ha->host_no, stat_buf->link_fail_cnt,
2105 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2106 stat_buf->prim_seq_err_cnt,
2107 stat_buf->inval_xmit_word_cnt,
2108 stat_buf->inval_crc_cnt);)
2109 }
2110 } else {
2111 /* Failed. */
2112 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2113 ha->host_no, rval);)
2114 rval = BIT_1;
2115 }
2116
2117 dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
2020 2118
2021uint8_t 2119 return rval;
2120}
2121
2122int
2022qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2123qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2023 uint16_t *status) 2124 uint16_t *status)
2024{ 2125{
@@ -2080,7 +2181,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2080 2181
2081 return rval; 2182 return rval;
2082} 2183}
2083#endif
2084 2184
2085int 2185int
2086qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2186qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5866a7c706a8..9f91f1a20542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -366,6 +366,12 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
366 goto qc_fail_command; 366 goto qc_fail_command;
367 } 367 }
368 368
369 /* Close window on fcport/rport state-transitioning. */
370 if (!*(fc_port_t **)rport->dd_data) {
371 cmd->result = DID_IMM_RETRY << 16;
372 goto qc_fail_command;
373 }
374
369 if (atomic_read(&fcport->state) != FCS_ONLINE) { 375 if (atomic_read(&fcport->state) != FCS_ONLINE) {
370 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 376 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
371 atomic_read(&ha->loop_state) == LOOP_DEAD) { 377 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -421,6 +427,12 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
421 goto qc24_fail_command; 427 goto qc24_fail_command;
422 } 428 }
423 429
430 /* Close window on fcport/rport state-transitioning. */
431 if (!*(fc_port_t **)rport->dd_data) {
432 cmd->result = DID_IMM_RETRY << 16;
433 goto qc24_fail_command;
434 }
435
424 if (atomic_read(&fcport->state) != FCS_ONLINE) { 436 if (atomic_read(&fcport->state) != FCS_ONLINE) {
425 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 437 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
426 atomic_read(&ha->loop_state) == LOOP_DEAD) { 438 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -513,7 +525,7 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
513 * Success (Adapter is online) : 0 525 * Success (Adapter is online) : 0
514 * Failed (Adapter is offline/disabled) : 1 526 * Failed (Adapter is offline/disabled) : 1
515 */ 527 */
516static int 528int
517qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 529qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
518{ 530{
519 int return_status; 531 int return_status;
@@ -1312,6 +1324,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1312 ha->ports = MAX_BUSES; 1324 ha->ports = MAX_BUSES;
1313 ha->init_cb_size = sizeof(init_cb_t); 1325 ha->init_cb_size = sizeof(init_cb_t);
1314 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1326 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1327 ha->link_data_rate = LDR_UNKNOWN;
1328 ha->optrom_size = OPTROM_SIZE_2300;
1315 1329
1316 /* Assign ISP specific operations. */ 1330 /* Assign ISP specific operations. */
1317 ha->isp_ops.pci_config = qla2100_pci_config; 1331 ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1339,6 +1353,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1339 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1353 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1340 ha->isp_ops.fw_dump = qla2100_fw_dump; 1354 ha->isp_ops.fw_dump = qla2100_fw_dump;
1341 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1355 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1356 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1357 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1342 if (IS_QLA2100(ha)) { 1358 if (IS_QLA2100(ha)) {
1343 host->max_id = MAX_TARGETS_2100; 1359 host->max_id = MAX_TARGETS_2100;
1344 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1360 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1364,7 +1380,12 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1364 ha->isp_ops.intr_handler = qla2300_intr_handler; 1380 ha->isp_ops.intr_handler = qla2300_intr_handler;
1365 ha->isp_ops.fw_dump = qla2300_fw_dump; 1381 ha->isp_ops.fw_dump = qla2300_fw_dump;
1366 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1382 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1383 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1384 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1385 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
1367 ha->gid_list_info_size = 6; 1386 ha->gid_list_info_size = 6;
1387 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1388 ha->optrom_size = OPTROM_SIZE_2322;
1368 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1389 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1369 host->max_id = MAX_TARGETS_2200; 1390 host->max_id = MAX_TARGETS_2200;
1370 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1391 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1400,7 +1421,13 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1400 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1421 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1401 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1422 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1402 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1423 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1424 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1425 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1426 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1427 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1428 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1403 ha->gid_list_info_size = 8; 1429 ha->gid_list_info_size = 8;
1430 ha->optrom_size = OPTROM_SIZE_24XX;
1404 } 1431 }
1405 host->can_queue = ha->request_q_length + 128; 1432 host->can_queue = ha->request_q_length + 128;
1406 1433
@@ -1657,11 +1684,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1657 spin_lock_irqsave(&fcport->rport_lock, flags); 1684 spin_lock_irqsave(&fcport->rport_lock, flags);
1658 fcport->drport = rport; 1685 fcport->drport = rport;
1659 fcport->rport = NULL; 1686 fcport->rport = NULL;
1687 *(fc_port_t **)rport->dd_data = NULL;
1660 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1688 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1661 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1689 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
1662 } else { 1690 } else {
1663 spin_lock_irqsave(&fcport->rport_lock, flags); 1691 spin_lock_irqsave(&fcport->rport_lock, flags);
1664 fcport->rport = NULL; 1692 fcport->rport = NULL;
1693 *(fc_port_t **)rport->dd_data = NULL;
1665 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1694 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1666 fc_remote_port_delete(rport); 1695 fc_remote_port_delete(rport);
1667 } 1696 }
@@ -2066,6 +2095,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2066 ha->fw_dumped = 0; 2095 ha->fw_dumped = 0;
2067 ha->fw_dump_reading = 0; 2096 ha->fw_dump_reading = 0;
2068 ha->fw_dump_buffer = NULL; 2097 ha->fw_dump_buffer = NULL;
2098
2099 vfree(ha->optrom_buffer);
2069} 2100}
2070 2101
2071/* 2102/*
@@ -2314,6 +2345,9 @@ qla2x00_do_dpc(void *data)
2314 if (!ha->interrupts_on) 2345 if (!ha->interrupts_on)
2315 ha->isp_ops.enable_intrs(ha); 2346 ha->isp_ops.enable_intrs(ha);
2316 2347
2348 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2349 ha->isp_ops.beacon_blink(ha);
2350
2317 ha->dpc_active = 0; 2351 ha->dpc_active = 0;
2318 } /* End of while(1) */ 2352 } /* End of while(1) */
2319 2353
@@ -2491,6 +2525,12 @@ qla2x00_timer(scsi_qla_host_t *ha)
2491 atomic_read(&ha->loop_down_timer))); 2525 atomic_read(&ha->loop_down_timer)));
2492 } 2526 }
2493 2527
2528 /* Check if beacon LED needs to be blinked */
2529 if (ha->beacon_blink_led == 1) {
2530 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags);
2531 start_dpc++;
2532 }
2533
2494 /* Schedule the DPC routine if needed */ 2534 /* Schedule the DPC routine if needed */
2495 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2535 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2496 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2536 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
@@ -2499,6 +2539,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2499 start_dpc || 2539 start_dpc ||
2500 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2540 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2501 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2541 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
2542 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
2502 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2543 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
2503 ha->dpc_wait && !ha->dpc_active) { 2544 ha->dpc_wait && !ha->dpc_active) {
2504 2545
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 2c3342108dd8..b70bebe18c01 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -6,8 +6,6 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <scsi/scsi_transport_fc.h>
10
11/** 9/**
12 * IO descriptor handle definitions. 10 * IO descriptor handle definitions.
13 * 11 *
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f4d755a643e4..3866a5760f15 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -695,3 +695,966 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
695 695
696 return ret; 696 return ret;
697} 697}
698
699
700static inline void
701qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
702{
703 if (IS_QLA2322(ha)) {
704 /* Flip all colors. */
705 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
706 /* Turn off. */
707 ha->beacon_color_state = 0;
708 *pflags = GPIO_LED_ALL_OFF;
709 } else {
710 /* Turn on. */
711 ha->beacon_color_state = QLA_LED_ALL_ON;
712 *pflags = GPIO_LED_RGA_ON;
713 }
714 } else {
715 /* Flip green led only. */
716 if (ha->beacon_color_state == QLA_LED_GRN_ON) {
717 /* Turn off. */
718 ha->beacon_color_state = 0;
719 *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF;
720 } else {
721 /* Turn on. */
722 ha->beacon_color_state = QLA_LED_GRN_ON;
723 *pflags = GPIO_LED_GREEN_ON_AMBER_OFF;
724 }
725 }
726}
727
728void
729qla2x00_beacon_blink(struct scsi_qla_host *ha)
730{
731 uint16_t gpio_enable;
732 uint16_t gpio_data;
733 uint16_t led_color = 0;
734 unsigned long flags;
735 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
736
737 if (ha->pio_address)
738 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
739
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741
742 /* Save the Original GPIOE. */
743 if (ha->pio_address) {
744 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
745 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
746 } else {
747 gpio_enable = RD_REG_WORD(&reg->gpioe);
748 gpio_data = RD_REG_WORD(&reg->gpiod);
749 }
750
751 /* Set the modified gpio_enable values */
752 gpio_enable |= GPIO_LED_MASK;
753
754 if (ha->pio_address) {
755 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
756 } else {
757 WRT_REG_WORD(&reg->gpioe, gpio_enable);
758 RD_REG_WORD(&reg->gpioe);
759 }
760
761 qla2x00_flip_colors(ha, &led_color);
762
763 /* Clear out any previously set LED color. */
764 gpio_data &= ~GPIO_LED_MASK;
765
766 /* Set the new input LED color to GPIOD. */
767 gpio_data |= led_color;
768
769 /* Set the modified gpio_data values */
770 if (ha->pio_address) {
771 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
772 } else {
773 WRT_REG_WORD(&reg->gpiod, gpio_data);
774 RD_REG_WORD(&reg->gpiod);
775 }
776
777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
778}
779
780int
781qla2x00_beacon_on(struct scsi_qla_host *ha)
782{
783 uint16_t gpio_enable;
784 uint16_t gpio_data;
785 unsigned long flags;
786 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
787
788 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
789 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
790
791 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
792 qla_printk(KERN_WARNING, ha,
793 "Unable to update fw options (beacon on).\n");
794 return QLA_FUNCTION_FAILED;
795 }
796
797 if (ha->pio_address)
798 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
799
800 /* Turn off LEDs. */
801 spin_lock_irqsave(&ha->hardware_lock, flags);
802 if (ha->pio_address) {
803 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
804 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
805 } else {
806 gpio_enable = RD_REG_WORD(&reg->gpioe);
807 gpio_data = RD_REG_WORD(&reg->gpiod);
808 }
809 gpio_enable |= GPIO_LED_MASK;
810
811 /* Set the modified gpio_enable values. */
812 if (ha->pio_address) {
813 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
814 } else {
815 WRT_REG_WORD(&reg->gpioe, gpio_enable);
816 RD_REG_WORD(&reg->gpioe);
817 }
818
819 /* Clear out previously set LED colour. */
820 gpio_data &= ~GPIO_LED_MASK;
821 if (ha->pio_address) {
822 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
823 } else {
824 WRT_REG_WORD(&reg->gpiod, gpio_data);
825 RD_REG_WORD(&reg->gpiod);
826 }
827 spin_unlock_irqrestore(&ha->hardware_lock, flags);
828
829 /*
830 * Let the per HBA timer kick off the blinking process based on
831 * the following flags. No need to do anything else now.
832 */
833 ha->beacon_blink_led = 1;
834 ha->beacon_color_state = 0;
835
836 return QLA_SUCCESS;
837}
838
839int
840qla2x00_beacon_off(struct scsi_qla_host *ha)
841{
842 int rval = QLA_SUCCESS;
843
844 ha->beacon_blink_led = 0;
845
846 /* Set the on flag so when it gets flipped it will be off. */
847 if (IS_QLA2322(ha))
848 ha->beacon_color_state = QLA_LED_ALL_ON;
849 else
850 ha->beacon_color_state = QLA_LED_GRN_ON;
851
852 ha->isp_ops.beacon_blink(ha); /* This turns green LED off */
853
854 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
855 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
856
857 rval = qla2x00_set_fw_options(ha, ha->fw_options);
858 if (rval != QLA_SUCCESS)
859 qla_printk(KERN_WARNING, ha,
860 "Unable to update fw options (beacon off).\n");
861 return rval;
862}
863
864
865static inline void
866qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
867{
868 /* Flip all colors. */
869 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
870 /* Turn off. */
871 ha->beacon_color_state = 0;
872 *pflags = 0;
873 } else {
874 /* Turn on. */
875 ha->beacon_color_state = QLA_LED_ALL_ON;
876 *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON;
877 }
878}
879
880void
881qla24xx_beacon_blink(struct scsi_qla_host *ha)
882{
883 uint16_t led_color = 0;
884 uint32_t gpio_data;
885 unsigned long flags;
886 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
887
888 /* Save the Original GPIOD. */
889 spin_lock_irqsave(&ha->hardware_lock, flags);
890 gpio_data = RD_REG_DWORD(&reg->gpiod);
891
892 /* Enable the gpio_data reg for update. */
893 gpio_data |= GPDX_LED_UPDATE_MASK;
894
895 WRT_REG_DWORD(&reg->gpiod, gpio_data);
896 gpio_data = RD_REG_DWORD(&reg->gpiod);
897
898 /* Set the color bits. */
899 qla24xx_flip_colors(ha, &led_color);
900
901 /* Clear out any previously set LED color. */
902 gpio_data &= ~GPDX_LED_COLOR_MASK;
903
904 /* Set the new input LED color to GPIOD. */
905 gpio_data |= led_color;
906
907 /* Set the modified gpio_data values. */
908 WRT_REG_DWORD(&reg->gpiod, gpio_data);
909 gpio_data = RD_REG_DWORD(&reg->gpiod);
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911}
912
913int
914qla24xx_beacon_on(struct scsi_qla_host *ha)
915{
916 uint32_t gpio_data;
917 unsigned long flags;
918 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
919
920 if (ha->beacon_blink_led == 0) {
921 /* Enable firmware for update */
922 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
923
924 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS)
925 return QLA_FUNCTION_FAILED;
926
927 if (qla2x00_get_fw_options(ha, ha->fw_options) !=
928 QLA_SUCCESS) {
929 qla_printk(KERN_WARNING, ha,
930 "Unable to update fw options (beacon on).\n");
931 return QLA_FUNCTION_FAILED;
932 }
933
934 spin_lock_irqsave(&ha->hardware_lock, flags);
935 gpio_data = RD_REG_DWORD(&reg->gpiod);
936
937 /* Enable the gpio_data reg for update. */
938 gpio_data |= GPDX_LED_UPDATE_MASK;
939 WRT_REG_DWORD(&reg->gpiod, gpio_data);
940 RD_REG_DWORD(&reg->gpiod);
941
942 spin_unlock_irqrestore(&ha->hardware_lock, flags);
943 }
944
945 /* So all colors blink together. */
946 ha->beacon_color_state = 0;
947
948 /* Let the per HBA timer kick off the blinking process. */
949 ha->beacon_blink_led = 1;
950
951 return QLA_SUCCESS;
952}
953
954int
955qla24xx_beacon_off(struct scsi_qla_host *ha)
956{
957 uint32_t gpio_data;
958 unsigned long flags;
959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
960
961 ha->beacon_blink_led = 0;
962 ha->beacon_color_state = QLA_LED_ALL_ON;
963
964 ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */
965
966 /* Give control back to firmware. */
967 spin_lock_irqsave(&ha->hardware_lock, flags);
968 gpio_data = RD_REG_DWORD(&reg->gpiod);
969
970 /* Disable the gpio_data reg for update. */
971 gpio_data &= ~GPDX_LED_UPDATE_MASK;
972 WRT_REG_DWORD(&reg->gpiod, gpio_data);
973 RD_REG_DWORD(&reg->gpiod);
974 spin_unlock_irqrestore(&ha->hardware_lock, flags);
975
976 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
977
978 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
979 qla_printk(KERN_WARNING, ha,
980 "Unable to update fw options (beacon off).\n");
981 return QLA_FUNCTION_FAILED;
982 }
983
984 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
985 qla_printk(KERN_WARNING, ha,
986 "Unable to get fw options (beacon off).\n");
987 return QLA_FUNCTION_FAILED;
988 }
989
990 return QLA_SUCCESS;
991}
992
993
994/*
995 * Flash support routines
996 */
997
998/**
999 * qla2x00_flash_enable() - Setup flash for reading and writing.
1000 * @ha: HA context
1001 */
1002static void
1003qla2x00_flash_enable(scsi_qla_host_t *ha)
1004{
1005 uint16_t data;
1006 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1007
1008 data = RD_REG_WORD(&reg->ctrl_status);
1009 data |= CSR_FLASH_ENABLE;
1010 WRT_REG_WORD(&reg->ctrl_status, data);
1011 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1012}
1013
1014/**
1015 * qla2x00_flash_disable() - Disable flash and allow RISC to run.
1016 * @ha: HA context
1017 */
1018static void
1019qla2x00_flash_disable(scsi_qla_host_t *ha)
1020{
1021 uint16_t data;
1022 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1023
1024 data = RD_REG_WORD(&reg->ctrl_status);
1025 data &= ~(CSR_FLASH_ENABLE);
1026 WRT_REG_WORD(&reg->ctrl_status, data);
1027 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1028}
1029
1030/**
1031 * qla2x00_read_flash_byte() - Reads a byte from flash
1032 * @ha: HA context
1033 * @addr: Address in flash to read
1034 *
1035 * A word is read from the chip, but, only the lower byte is valid.
1036 *
1037 * Returns the byte read from flash @addr.
1038 */
1039static uint8_t
1040qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1041{
1042 uint16_t data;
1043 uint16_t bank_select;
1044 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1045
1046 bank_select = RD_REG_WORD(&reg->ctrl_status);
1047
1048 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1049 /* Specify 64K address range: */
1050 /* clear out Module Select and Flash Address bits [19:16]. */
1051 bank_select &= ~0xf8;
1052 bank_select |= addr >> 12 & 0xf0;
1053 bank_select |= CSR_FLASH_64K_BANK;
1054 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1055 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1056
1057 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1058 data = RD_REG_WORD(&reg->flash_data);
1059
1060 return (uint8_t)data;
1061 }
1062
1063 /* Setup bit 16 of flash address. */
1064 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1065 bank_select |= CSR_FLASH_64K_BANK;
1066 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1067 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1068 } else if (((addr & BIT_16) == 0) &&
1069 (bank_select & CSR_FLASH_64K_BANK)) {
1070 bank_select &= ~(CSR_FLASH_64K_BANK);
1071 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1072 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1073 }
1074
1075 /* Always perform IO mapped accesses to the FLASH registers. */
1076 if (ha->pio_address) {
1077 uint16_t data2;
1078
1079 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1080 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1081 do {
1082 data = RD_REG_WORD_PIO(&reg->flash_data);
1083 barrier();
1084 cpu_relax();
1085 data2 = RD_REG_WORD_PIO(&reg->flash_data);
1086 } while (data != data2);
1087 } else {
1088 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1089 data = qla2x00_debounce_register(&reg->flash_data);
1090 }
1091
1092 return (uint8_t)data;
1093}
1094
1095/**
1096 * qla2x00_write_flash_byte() - Write a byte to flash
1097 * @ha: HA context
1098 * @addr: Address in flash to write
1099 * @data: Data to write
1100 */
1101static void
1102qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1103{
1104 uint16_t bank_select;
1105 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1106
1107 bank_select = RD_REG_WORD(&reg->ctrl_status);
1108 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1109 /* Specify 64K address range: */
1110 /* clear out Module Select and Flash Address bits [19:16]. */
1111 bank_select &= ~0xf8;
1112 bank_select |= addr >> 12 & 0xf0;
1113 bank_select |= CSR_FLASH_64K_BANK;
1114 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1115 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1116
1117 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1118 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1119 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1120 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1121
1122 return;
1123 }
1124
1125 /* Setup bit 16 of flash address. */
1126 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1127 bank_select |= CSR_FLASH_64K_BANK;
1128 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1129 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1130 } else if (((addr & BIT_16) == 0) &&
1131 (bank_select & CSR_FLASH_64K_BANK)) {
1132 bank_select &= ~(CSR_FLASH_64K_BANK);
1133 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1134 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1135 }
1136
1137 /* Always perform IO mapped accesses to the FLASH registers. */
1138 if (ha->pio_address) {
1139 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1140 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1141 WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
1142 } else {
1143 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1144 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1145 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1146 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1147 }
1148}
1149
1150/**
1151 * qla2x00_poll_flash() - Polls flash for completion.
1152 * @ha: HA context
1153 * @addr: Address in flash to poll
1154 * @poll_data: Data to be polled
1155 * @man_id: Flash manufacturer ID
1156 * @flash_id: Flash ID
1157 *
1158 * This function polls the device until bit 7 of what is read matches data
1159 * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
1160 * out (a fatal error). The flash book recommeds reading bit 7 again after
1161 * reading bit 5 as a 1.
1162 *
1163 * Returns 0 on success, else non-zero.
1164 */
1165static int
1166qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1167 uint8_t man_id, uint8_t flash_id)
1168{
1169 int status;
1170 uint8_t flash_data;
1171 uint32_t cnt;
1172
1173 status = 1;
1174
1175 /* Wait for 30 seconds for command to finish. */
1176 poll_data &= BIT_7;
1177 for (cnt = 3000000; cnt; cnt--) {
1178 flash_data = qla2x00_read_flash_byte(ha, addr);
1179 if ((flash_data & BIT_7) == poll_data) {
1180 status = 0;
1181 break;
1182 }
1183
1184 if (man_id != 0x40 && man_id != 0xda) {
1185 if ((flash_data & BIT_5) && cnt > 2)
1186 cnt = 2;
1187 }
1188 udelay(10);
1189 barrier();
1190 }
1191 return status;
1192}
1193
1194#define IS_OEM_001(ha) \
1195 ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \
1196 (ha)->pdev->subsystem_vendor == 0x1028 && \
1197 (ha)->pdev->subsystem_device == 0x0170)
1198
1199/**
1200 * qla2x00_program_flash_address() - Programs a flash address
1201 * @ha: HA context
1202 * @addr: Address in flash to program
1203 * @data: Data to be written in flash
1204 * @man_id: Flash manufacturer ID
1205 * @flash_id: Flash ID
1206 *
1207 * Returns 0 on success, else non-zero.
1208 */
1209static int
1210qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1211 uint8_t man_id, uint8_t flash_id)
1212{
1213 /* Write Program Command Sequence. */
1214 if (IS_OEM_001(ha)) {
1215 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1216 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1217 qla2x00_write_flash_byte(ha, 0xaaa, 0xa0);
1218 qla2x00_write_flash_byte(ha, addr, data);
1219 } else {
1220 if (man_id == 0xda && flash_id == 0xc1) {
1221 qla2x00_write_flash_byte(ha, addr, data);
1222 if (addr & 0x7e)
1223 return 0;
1224 } else {
1225 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1226 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1227 qla2x00_write_flash_byte(ha, 0x5555, 0xa0);
1228 qla2x00_write_flash_byte(ha, addr, data);
1229 }
1230 }
1231
1232 udelay(150);
1233
1234 /* Wait for write to complete. */
1235 return qla2x00_poll_flash(ha, addr, data, man_id, flash_id);
1236}
1237
1238/**
1239 * qla2x00_erase_flash() - Erase the flash.
1240 * @ha: HA context
1241 * @man_id: Flash manufacturer ID
1242 * @flash_id: Flash ID
1243 *
1244 * Returns 0 on success, else non-zero.
1245 */
1246static int
1247qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1248{
1249 /* Individual Sector Erase Command Sequence */
1250 if (IS_OEM_001(ha)) {
1251 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1252 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1253 qla2x00_write_flash_byte(ha, 0xaaa, 0x80);
1254 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1255 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1256 qla2x00_write_flash_byte(ha, 0xaaa, 0x10);
1257 } else {
1258 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1259 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1260 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1261 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1262 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1263 qla2x00_write_flash_byte(ha, 0x5555, 0x10);
1264 }
1265
1266 udelay(150);
1267
1268 /* Wait for erase to complete. */
1269 return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id);
1270}
1271
1272/**
1273 * qla2x00_erase_flash_sector() - Erase a flash sector.
1274 * @ha: HA context
1275 * @addr: Flash sector to erase
1276 * @sec_mask: Sector address mask
1277 * @man_id: Flash manufacturer ID
1278 * @flash_id: Flash ID
1279 *
1280 * Returns 0 on success, else non-zero.
1281 */
1282static int
1283qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1284 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1285{
1286 /* Individual Sector Erase Command Sequence */
1287 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1288 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1289 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1290 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1291 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1292 if (man_id == 0x1f && flash_id == 0x13)
1293 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10);
1294 else
1295 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30);
1296
1297 udelay(150);
1298
1299 /* Wait for erase to complete. */
1300 return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id);
1301}
1302
1303/**
1304 * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
1305 * @man_id: Flash manufacturer ID
1306 * @flash_id: Flash ID
1307 */
1308static void
1309qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1310 uint8_t *flash_id)
1311{
1312 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1313 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1314 qla2x00_write_flash_byte(ha, 0x5555, 0x90);
1315 *man_id = qla2x00_read_flash_byte(ha, 0x0000);
1316 *flash_id = qla2x00_read_flash_byte(ha, 0x0001);
1317 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1318 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1319 qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
1320}
1321
1322
1323static inline void
1324qla2x00_suspend_hba(struct scsi_qla_host *ha)
1325{
1326 int cnt;
1327 unsigned long flags;
1328 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1329
1330 /* Suspend HBA. */
1331 scsi_block_requests(ha->host);
1332 ha->isp_ops.disable_intrs(ha);
1333 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1334
1335 /* Pause RISC. */
1336 spin_lock_irqsave(&ha->hardware_lock, flags);
1337 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1338 RD_REG_WORD(&reg->hccr);
1339 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1340 for (cnt = 0; cnt < 30000; cnt++) {
1341 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
1342 break;
1343 udelay(100);
1344 }
1345 } else {
1346 udelay(10);
1347 }
1348 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1349}
1350
1351static inline void
1352qla2x00_resume_hba(struct scsi_qla_host *ha)
1353{
1354 /* Resume HBA. */
1355 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1356 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1357 up(ha->dpc_wait);
1358 qla2x00_wait_for_hba_online(ha);
1359 scsi_unblock_requests(ha->host);
1360}
1361
1362uint8_t *
1363qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1364 uint32_t offset, uint32_t length)
1365{
1366 unsigned long flags;
1367 uint32_t addr, midpoint;
1368 uint8_t *data;
1369 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1370
1371 /* Suspend HBA. */
1372 qla2x00_suspend_hba(ha);
1373
1374 /* Go with read. */
1375 spin_lock_irqsave(&ha->hardware_lock, flags);
1376 midpoint = ha->optrom_size / 2;
1377
1378 qla2x00_flash_enable(ha);
1379 WRT_REG_WORD(&reg->nvram, 0);
1380 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1381 for (addr = offset, data = buf; addr < length; addr++, data++) {
1382 if (addr == midpoint) {
1383 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1384 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1385 }
1386
1387 *data = qla2x00_read_flash_byte(ha, addr);
1388 }
1389 qla2x00_flash_disable(ha);
1390 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1391
1392 /* Resume HBA. */
1393 qla2x00_resume_hba(ha);
1394
1395 return buf;
1396}
1397
1398int
1399qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1400 uint32_t offset, uint32_t length)
1401{
1402
1403 int rval;
1404 unsigned long flags;
1405 uint8_t man_id, flash_id, sec_number, data;
1406 uint16_t wd;
1407 uint32_t addr, liter, sec_mask, rest_addr;
1408 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1409
1410 /* Suspend HBA. */
1411 qla2x00_suspend_hba(ha);
1412
1413 rval = QLA_SUCCESS;
1414 sec_number = 0;
1415
1416 /* Reset ISP chip. */
1417 spin_lock_irqsave(&ha->hardware_lock, flags);
1418 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1419 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1420
1421 /* Go with write. */
1422 qla2x00_flash_enable(ha);
1423 do { /* Loop once to provide quick error exit */
1424 /* Structure of flash memory based on manufacturer */
1425 if (IS_OEM_001(ha)) {
1426 /* OEM variant with special flash part. */
1427 man_id = flash_id = 0;
1428 rest_addr = 0xffff;
1429 sec_mask = 0x10000;
1430 goto update_flash;
1431 }
1432 qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id);
1433 switch (man_id) {
1434 case 0x20: /* ST flash. */
1435 if (flash_id == 0xd2 || flash_id == 0xe3) {
1436 /*
1437 * ST m29w008at part - 64kb sector size with
1438 * 32kb,8kb,8kb,16kb sectors at memory address
1439 * 0xf0000.
1440 */
1441 rest_addr = 0xffff;
1442 sec_mask = 0x10000;
1443 break;
1444 }
1445 /*
1446 * ST m29w010b part - 16kb sector size
1447 * Default to 16kb sectors
1448 */
1449 rest_addr = 0x3fff;
1450 sec_mask = 0x1c000;
1451 break;
1452 case 0x40: /* Mostel flash. */
1453 /* Mostel v29c51001 part - 512 byte sector size. */
1454 rest_addr = 0x1ff;
1455 sec_mask = 0x1fe00;
1456 break;
1457 case 0xbf: /* SST flash. */
1458 /* SST39sf10 part - 4kb sector size. */
1459 rest_addr = 0xfff;
1460 sec_mask = 0x1f000;
1461 break;
1462 case 0xda: /* Winbond flash. */
1463 /* Winbond W29EE011 part - 256 byte sector size. */
1464 rest_addr = 0x7f;
1465 sec_mask = 0x1ff80;
1466 break;
1467 case 0xc2: /* Macronix flash. */
1468 /* 64k sector size. */
1469 if (flash_id == 0x38 || flash_id == 0x4f) {
1470 rest_addr = 0xffff;
1471 sec_mask = 0x10000;
1472 break;
1473 }
1474 /* Fall through... */
1475
1476 case 0x1f: /* Atmel flash. */
1477 /* 512k sector size. */
1478 if (flash_id == 0x13) {
1479 rest_addr = 0x7fffffff;
1480 sec_mask = 0x80000000;
1481 break;
1482 }
1483 /* Fall through... */
1484
1485 case 0x01: /* AMD flash. */
1486 if (flash_id == 0x38 || flash_id == 0x40 ||
1487 flash_id == 0x4f) {
1488 /* Am29LV081 part - 64kb sector size. */
1489 /* Am29LV002BT part - 64kb sector size. */
1490 rest_addr = 0xffff;
1491 sec_mask = 0x10000;
1492 break;
1493 } else if (flash_id == 0x3e) {
1494 /*
1495 * Am29LV008b part - 64kb sector size with
1496 * 32kb,8kb,8kb,16kb sector at memory address
1497 * h0xf0000.
1498 */
1499 rest_addr = 0xffff;
1500 sec_mask = 0x10000;
1501 break;
1502 } else if (flash_id == 0x20 || flash_id == 0x6e) {
1503 /*
1504 * Am29LV010 part or AM29f010 - 16kb sector
1505 * size.
1506 */
1507 rest_addr = 0x3fff;
1508 sec_mask = 0x1c000;
1509 break;
1510 } else if (flash_id == 0x6d) {
1511 /* Am29LV001 part - 8kb sector size. */
1512 rest_addr = 0x1fff;
1513 sec_mask = 0x1e000;
1514 break;
1515 }
1516 default:
1517 /* Default to 16 kb sector size. */
1518 rest_addr = 0x3fff;
1519 sec_mask = 0x1c000;
1520 break;
1521 }
1522
1523update_flash:
1524 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1525 if (qla2x00_erase_flash(ha, man_id, flash_id)) {
1526 rval = QLA_FUNCTION_FAILED;
1527 break;
1528 }
1529 }
1530
1531 for (addr = offset, liter = 0; liter < length; liter++,
1532 addr++) {
1533 data = buf[liter];
1534 /* Are we at the beginning of a sector? */
1535 if ((addr & rest_addr) == 0) {
1536 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1537 if (addr >= 0x10000UL) {
1538 if (((addr >> 12) & 0xf0) &&
1539 ((man_id == 0x01 &&
1540 flash_id == 0x3e) ||
1541 (man_id == 0x20 &&
1542 flash_id == 0xd2))) {
1543 sec_number++;
1544 if (sec_number == 1) {
1545 rest_addr =
1546 0x7fff;
1547 sec_mask =
1548 0x18000;
1549 } else if (
1550 sec_number == 2 ||
1551 sec_number == 3) {
1552 rest_addr =
1553 0x1fff;
1554 sec_mask =
1555 0x1e000;
1556 } else if (
1557 sec_number == 4) {
1558 rest_addr =
1559 0x3fff;
1560 sec_mask =
1561 0x1c000;
1562 }
1563 }
1564 }
1565 } else if (addr == ha->optrom_size / 2) {
1566 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1567 RD_REG_WORD(&reg->nvram);
1568 }
1569
1570 if (flash_id == 0xda && man_id == 0xc1) {
1571 qla2x00_write_flash_byte(ha, 0x5555,
1572 0xaa);
1573 qla2x00_write_flash_byte(ha, 0x2aaa,
1574 0x55);
1575 qla2x00_write_flash_byte(ha, 0x5555,
1576 0xa0);
1577 } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) {
1578 /* Then erase it */
1579 if (qla2x00_erase_flash_sector(ha,
1580 addr, sec_mask, man_id,
1581 flash_id)) {
1582 rval = QLA_FUNCTION_FAILED;
1583 break;
1584 }
1585 if (man_id == 0x01 && flash_id == 0x6d)
1586 sec_number++;
1587 }
1588 }
1589
1590 if (man_id == 0x01 && flash_id == 0x6d) {
1591 if (sec_number == 1 &&
1592 addr == (rest_addr - 1)) {
1593 rest_addr = 0x0fff;
1594 sec_mask = 0x1f000;
1595 } else if (sec_number == 3 && (addr & 0x7ffe)) {
1596 rest_addr = 0x3fff;
1597 sec_mask = 0x1c000;
1598 }
1599 }
1600
1601 if (qla2x00_program_flash_address(ha, addr, data,
1602 man_id, flash_id)) {
1603 rval = QLA_FUNCTION_FAILED;
1604 break;
1605 }
1606 }
1607 } while (0);
1608 qla2x00_flash_disable(ha);
1609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1610
1611 /* Resume HBA. */
1612 qla2x00_resume_hba(ha);
1613
1614 return rval;
1615}
1616
1617uint8_t *
1618qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1619 uint32_t offset, uint32_t length)
1620{
1621 /* Suspend HBA. */
1622 scsi_block_requests(ha->host);
1623 ha->isp_ops.disable_intrs(ha);
1624 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1625
1626 /* Go with read. */
1627 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2);
1628
1629 /* Resume HBA. */
1630 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1631 ha->isp_ops.enable_intrs(ha);
1632 scsi_unblock_requests(ha->host);
1633
1634 return buf;
1635}
1636
1637int
1638qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1639 uint32_t offset, uint32_t length)
1640{
1641 int rval;
1642
1643 /* Suspend HBA. */
1644 scsi_block_requests(ha->host);
1645 ha->isp_ops.disable_intrs(ha);
1646 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1647
1648 /* Go with write. */
1649 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2,
1650 length >> 2);
1651
1652 /* Resume HBA -- RISC reset needed. */
1653 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1654 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1655 up(ha->dpc_wait);
1656 qla2x00_wait_for_hba_online(ha);
1657 scsi_unblock_requests(ha->host);
1658
1659 return rval;
1660}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4a602853a98e..4362dcde74af 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/hardirq.h>
19 20
20#include <scsi/scsi.h> 21#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h> 22#include <scsi/scsi_dbg.h>
@@ -2248,3 +2249,61 @@ scsi_target_unblock(struct device *dev)
2248 device_for_each_child(dev, NULL, target_unblock); 2249 device_for_each_child(dev, NULL, target_unblock);
2249} 2250}
2250EXPORT_SYMBOL_GPL(scsi_target_unblock); 2251EXPORT_SYMBOL_GPL(scsi_target_unblock);
2252
2253
2254struct work_queue_work {
2255 struct work_struct work;
2256 void (*fn)(void *);
2257 void *data;
2258};
2259
2260static void execute_in_process_context_work(void *data)
2261{
2262 void (*fn)(void *data);
2263 struct work_queue_work *wqw = data;
2264
2265 fn = wqw->fn;
2266 data = wqw->data;
2267
2268 kfree(wqw);
2269
2270 fn(data);
2271}
2272
2273/**
2274 * scsi_execute_in_process_context - reliably execute the routine with user context
2275 * @fn: the function to execute
2276 * @data: data to pass to the function
2277 *
2278 * Executes the function immediately if process context is available,
2279 * otherwise schedules the function for delayed execution.
2280 *
2281 * Returns: 0 - function was executed
2282 * 1 - function was scheduled for execution
2283 * <0 - error
2284 */
2285int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
2286{
2287 struct work_queue_work *wqw;
2288
2289 if (!in_interrupt()) {
2290 fn(data);
2291 return 0;
2292 }
2293
2294 wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
2295
2296 if (unlikely(!wqw)) {
2297 printk(KERN_ERR "Failed to allocate memory\n");
2298 WARN_ON(1);
2299 return -ENOMEM;
2300 }
2301
2302 INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
2303 wqw->fn = fn;
2304 wqw->data = data;
2305 schedule_work(&wqw->work);
2306
2307 return 1;
2308}
2309EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 752fb5da3de4..5acb83ca5ae5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -387,19 +387,12 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
387 return found_target; 387 return found_target;
388} 388}
389 389
390struct work_queue_wrapper { 390static void scsi_target_reap_usercontext(void *data)
391 struct work_struct work; 391{
392 struct scsi_target *starget; 392 struct scsi_target *starget = data;
393};
394
395static void scsi_target_reap_work(void *data) {
396 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
397 struct scsi_target *starget = wqw->starget;
398 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 393 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
399 unsigned long flags; 394 unsigned long flags;
400 395
401 kfree(wqw);
402
403 spin_lock_irqsave(shost->host_lock, flags); 396 spin_lock_irqsave(shost->host_lock, flags);
404 397
405 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { 398 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
@@ -428,18 +421,7 @@ static void scsi_target_reap_work(void *data) {
428 */ 421 */
429void scsi_target_reap(struct scsi_target *starget) 422void scsi_target_reap(struct scsi_target *starget)
430{ 423{
431 struct work_queue_wrapper *wqw = 424 scsi_execute_in_process_context(scsi_target_reap_usercontext, starget);
432 kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
433
434 if (!wqw) {
435 starget_printk(KERN_ERR, starget,
436 "Failed to allocate memory in scsi_reap_target()\n");
437 return;
438 }
439
440 INIT_WORK(&wqw->work, scsi_target_reap_work, wqw);
441 wqw->starget = starget;
442 schedule_work(&wqw->work);
443} 425}
444 426
445/** 427/**
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index a77b32deaf8f..902a5def8e62 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -217,8 +217,9 @@ static void scsi_device_cls_release(struct class_device *class_dev)
217 put_device(&sdev->sdev_gendev); 217 put_device(&sdev->sdev_gendev);
218} 218}
219 219
220static void scsi_device_dev_release(struct device *dev) 220static void scsi_device_dev_release_usercontext(void *data)
221{ 221{
222 struct device *dev = data;
222 struct scsi_device *sdev; 223 struct scsi_device *sdev;
223 struct device *parent; 224 struct device *parent;
224 struct scsi_target *starget; 225 struct scsi_target *starget;
@@ -237,6 +238,7 @@ static void scsi_device_dev_release(struct device *dev)
237 238
238 if (sdev->request_queue) { 239 if (sdev->request_queue) {
239 sdev->request_queue->queuedata = NULL; 240 sdev->request_queue->queuedata = NULL;
241 /* user context needed to free queue */
240 scsi_free_queue(sdev->request_queue); 242 scsi_free_queue(sdev->request_queue);
241 /* temporary expedient, try to catch use of queue lock 243 /* temporary expedient, try to catch use of queue lock
242 * after free of sdev */ 244 * after free of sdev */
@@ -252,6 +254,11 @@ static void scsi_device_dev_release(struct device *dev)
252 put_device(parent); 254 put_device(parent);
253} 255}
254 256
257static void scsi_device_dev_release(struct device *dev)
258{
259 scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev);
260}
261
255static struct class sdev_class = { 262static struct class sdev_class = {
256 .name = "scsi_device", 263 .name = "scsi_device",
257 .release = scsi_device_cls_release, 264 .release = scsi_device_cls_release,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 723f7acbeb12..71e54a64adca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -39,10 +39,6 @@ struct iscsi_internal {
39 struct iscsi_transport *iscsi_transport; 39 struct iscsi_transport *iscsi_transport;
40 struct list_head list; 40 struct list_head list;
41 /* 41 /*
42 * List of sessions for this transport
43 */
44 struct list_head sessions;
45 /*
46 * based on transport capabilities, at register time we set these 42 * based on transport capabilities, at register time we set these
47 * bits to tell the transport class it wants attributes displayed 43 * bits to tell the transport class it wants attributes displayed
48 * in sysfs or that it can support different iSCSI Data-Path 44 * in sysfs or that it can support different iSCSI Data-Path
@@ -164,9 +160,43 @@ static struct mempool_zone *z_reply;
164#define Z_MAX_ERROR 16 160#define Z_MAX_ERROR 16
165#define Z_HIWAT_ERROR 12 161#define Z_HIWAT_ERROR 12
166 162
163static LIST_HEAD(sesslist);
164static DEFINE_SPINLOCK(sesslock);
167static LIST_HEAD(connlist); 165static LIST_HEAD(connlist);
168static DEFINE_SPINLOCK(connlock); 166static DEFINE_SPINLOCK(connlock);
169 167
168static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle)
169{
170 unsigned long flags;
171 struct iscsi_cls_session *sess;
172
173 spin_lock_irqsave(&sesslock, flags);
174 list_for_each_entry(sess, &sesslist, sess_list) {
175 if (sess == iscsi_ptr(handle)) {
176 spin_unlock_irqrestore(&sesslock, flags);
177 return sess;
178 }
179 }
180 spin_unlock_irqrestore(&sesslock, flags);
181 return NULL;
182}
183
184static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle)
185{
186 unsigned long flags;
187 struct iscsi_cls_conn *conn;
188
189 spin_lock_irqsave(&connlock, flags);
190 list_for_each_entry(conn, &connlist, conn_list) {
191 if (conn == iscsi_ptr(handle)) {
192 spin_unlock_irqrestore(&connlock, flags);
193 return conn;
194 }
195 }
196 spin_unlock_irqrestore(&connlock, flags);
197 return NULL;
198}
199
170/* 200/*
171 * The following functions can be used by LLDs that allocate 201 * The following functions can be used by LLDs that allocate
172 * their own scsi_hosts or by software iscsi LLDs 202 * their own scsi_hosts or by software iscsi LLDs
@@ -365,6 +395,7 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
365{ 395{
366 struct iscsi_cls_session *session; 396 struct iscsi_cls_session *session;
367 struct Scsi_Host *shost; 397 struct Scsi_Host *shost;
398 unsigned long flags;
368 399
369 shost = scsi_host_alloc(transport->host_template, 400 shost = scsi_host_alloc(transport->host_template,
370 hostdata_privsize(transport)); 401 hostdata_privsize(transport));
@@ -389,6 +420,9 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
389 goto remove_host; 420 goto remove_host;
390 421
391 *(unsigned long*)shost->hostdata = (unsigned long)session; 422 *(unsigned long*)shost->hostdata = (unsigned long)session;
423 spin_lock_irqsave(&sesslock, flags);
424 list_add(&session->sess_list, &sesslist);
425 spin_unlock_irqrestore(&sesslock, flags);
392 return shost; 426 return shost;
393 427
394remove_host: 428remove_host:
@@ -410,9 +444,13 @@ EXPORT_SYMBOL_GPL(iscsi_transport_create_session);
410int iscsi_transport_destroy_session(struct Scsi_Host *shost) 444int iscsi_transport_destroy_session(struct Scsi_Host *shost)
411{ 445{
412 struct iscsi_cls_session *session; 446 struct iscsi_cls_session *session;
447 unsigned long flags;
413 448
414 scsi_remove_host(shost); 449 scsi_remove_host(shost);
415 session = hostdata_session(shost->hostdata); 450 session = hostdata_session(shost->hostdata);
451 spin_lock_irqsave(&sesslock, flags);
452 list_del(&session->sess_list);
453 spin_unlock_irqrestore(&sesslock, flags);
416 iscsi_destroy_session(session); 454 iscsi_destroy_session(session);
417 /* ref from host alloc */ 455 /* ref from host alloc */
418 scsi_host_put(shost); 456 scsi_host_put(shost);
@@ -424,22 +462,6 @@ EXPORT_SYMBOL_GPL(iscsi_transport_destroy_session);
424/* 462/*
425 * iscsi interface functions 463 * iscsi interface functions
426 */ 464 */
427static struct iscsi_cls_conn*
428iscsi_if_find_conn(uint64_t key)
429{
430 unsigned long flags;
431 struct iscsi_cls_conn *conn;
432
433 spin_lock_irqsave(&connlock, flags);
434 list_for_each_entry(conn, &connlist, conn_list)
435 if (conn->connh == key) {
436 spin_unlock_irqrestore(&connlock, flags);
437 return conn;
438 }
439 spin_unlock_irqrestore(&connlock, flags);
440 return NULL;
441}
442
443static struct iscsi_internal * 465static struct iscsi_internal *
444iscsi_if_transport_lookup(struct iscsi_transport *tt) 466iscsi_if_transport_lookup(struct iscsi_transport *tt)
445{ 467{
@@ -504,6 +526,12 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
504 if (!zp) 526 if (!zp)
505 return NULL; 527 return NULL;
506 528
529 zp->size = size;
530 zp->hiwat = hiwat;
531 INIT_LIST_HEAD(&zp->freequeue);
532 spin_lock_init(&zp->freelock);
533 atomic_set(&zp->allocated, 0);
534
507 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 535 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
508 mempool_zone_free_skb, zp); 536 mempool_zone_free_skb, zp);
509 if (!zp->pool) { 537 if (!zp->pool) {
@@ -511,13 +539,6 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
511 return NULL; 539 return NULL;
512 } 540 }
513 541
514 zp->size = size;
515 zp->hiwat = hiwat;
516
517 INIT_LIST_HEAD(&zp->freequeue);
518 spin_lock_init(&zp->freelock);
519 atomic_set(&zp->allocated, 0);
520
521 return zp; 542 return zp;
522} 543}
523 544
@@ -559,25 +580,21 @@ iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb)
559 return 0; 580 return 0;
560} 581}
561 582
562int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, 583int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
563 char *data, uint32_t data_size) 584 char *data, uint32_t data_size)
564{ 585{
565 struct nlmsghdr *nlh; 586 struct nlmsghdr *nlh;
566 struct sk_buff *skb; 587 struct sk_buff *skb;
567 struct iscsi_uevent *ev; 588 struct iscsi_uevent *ev;
568 struct iscsi_cls_conn *conn;
569 char *pdu; 589 char *pdu;
570 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 590 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) +
571 data_size); 591 data_size);
572 592
573 conn = iscsi_if_find_conn(connh);
574 BUG_ON(!conn);
575
576 mempool_zone_complete(conn->z_pdu); 593 mempool_zone_complete(conn->z_pdu);
577 594
578 skb = mempool_zone_get_skb(conn->z_pdu); 595 skb = mempool_zone_get_skb(conn->z_pdu);
579 if (!skb) { 596 if (!skb) {
580 iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); 597 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
581 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 598 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
582 "control PDU: OOM\n"); 599 "control PDU: OOM\n");
583 return -ENOMEM; 600 return -ENOMEM;
@@ -590,7 +607,7 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
590 ev->type = ISCSI_KEVENT_RECV_PDU; 607 ev->type = ISCSI_KEVENT_RECV_PDU;
591 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 608 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat)
592 ev->iferror = -ENOMEM; 609 ev->iferror = -ENOMEM;
593 ev->r.recv_req.conn_handle = connh; 610 ev->r.recv_req.conn_handle = iscsi_handle(conn);
594 pdu = (char*)ev + sizeof(*ev); 611 pdu = (char*)ev + sizeof(*ev);
595 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 612 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
596 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 613 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
@@ -599,17 +616,13 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
599} 616}
600EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 617EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
601 618
602void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) 619void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
603{ 620{
604 struct nlmsghdr *nlh; 621 struct nlmsghdr *nlh;
605 struct sk_buff *skb; 622 struct sk_buff *skb;
606 struct iscsi_uevent *ev; 623 struct iscsi_uevent *ev;
607 struct iscsi_cls_conn *conn;
608 int len = NLMSG_SPACE(sizeof(*ev)); 624 int len = NLMSG_SPACE(sizeof(*ev));
609 625
610 conn = iscsi_if_find_conn(connh);
611 BUG_ON(!conn);
612
613 mempool_zone_complete(conn->z_error); 626 mempool_zone_complete(conn->z_error);
614 627
615 skb = mempool_zone_get_skb(conn->z_error); 628 skb = mempool_zone_get_skb(conn->z_error);
@@ -626,7 +639,7 @@ void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error)
626 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 639 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat)
627 ev->iferror = -ENOMEM; 640 ev->iferror = -ENOMEM;
628 ev->r.connerror.error = error; 641 ev->r.connerror.error = error;
629 ev->r.connerror.conn_handle = connh; 642 ev->r.connerror.conn_handle = iscsi_handle(conn);
630 643
631 iscsi_unicast_skb(conn->z_error, skb); 644 iscsi_unicast_skb(conn->z_error, skb);
632 645
@@ -662,8 +675,7 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
662} 675}
663 676
664static int 677static int
665iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, 678iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
666 struct nlmsghdr *nlh)
667{ 679{
668 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 680 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
669 struct iscsi_stats *stats; 681 struct iscsi_stats *stats;
@@ -677,7 +689,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
677 ISCSI_STATS_CUSTOM_MAX); 689 ISCSI_STATS_CUSTOM_MAX);
678 int err = 0; 690 int err = 0;
679 691
680 conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); 692 conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle);
681 if (!conn) 693 if (!conn)
682 return -EEXIST; 694 return -EEXIST;
683 695
@@ -707,14 +719,14 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
707 ((char*)evstat + sizeof(*evstat)); 719 ((char*)evstat + sizeof(*evstat));
708 memset(stats, 0, sizeof(*stats)); 720 memset(stats, 0, sizeof(*stats));
709 721
710 transport->get_stats(ev->u.get_stats.conn_handle, stats); 722 transport->get_stats(conn, stats);
711 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 723 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) +
712 sizeof(struct iscsi_stats) + 724 sizeof(struct iscsi_stats) +
713 sizeof(struct iscsi_stats_custom) * 725 sizeof(struct iscsi_stats_custom) *
714 stats->custom_length); 726 stats->custom_length);
715 actual_size -= sizeof(*nlhstat); 727 actual_size -= sizeof(*nlhstat);
716 actual_size = NLMSG_LENGTH(actual_size); 728 actual_size = NLMSG_LENGTH(actual_size);
717 skb_trim(skb, NLMSG_ALIGN(actual_size)); 729 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
718 nlhstat->nlmsg_len = actual_size; 730 nlhstat->nlmsg_len = actual_size;
719 731
720 err = iscsi_unicast_skb(conn->z_pdu, skbstat); 732 err = iscsi_unicast_skb(conn->z_pdu, skbstat);
@@ -727,58 +739,34 @@ static int
727iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 739iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
728{ 740{
729 struct iscsi_transport *transport = priv->iscsi_transport; 741 struct iscsi_transport *transport = priv->iscsi_transport;
730 struct Scsi_Host *shost; 742 struct iscsi_cls_session *session;
731 743 uint32_t sid;
732 if (!transport->create_session)
733 return -EINVAL;
734 744
735 shost = transport->create_session(&priv->t, 745 session = transport->create_session(&priv->t,
736 ev->u.c_session.initial_cmdsn); 746 ev->u.c_session.initial_cmdsn,
737 if (!shost) 747 &sid);
748 if (!session)
738 return -ENOMEM; 749 return -ENOMEM;
739 750
740 ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); 751 ev->r.c_session_ret.session_handle = iscsi_handle(session);
741 ev->r.c_session_ret.sid = shost->host_no; 752 ev->r.c_session_ret.sid = sid;
742 return 0; 753 return 0;
743} 754}
744 755
745static int 756static int
746iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 757iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
747{ 758{
748 struct iscsi_transport *transport = priv->iscsi_transport;
749
750 struct Scsi_Host *shost;
751
752 if (!transport->destroy_session)
753 return -EINVAL;
754
755 shost = scsi_host_lookup(ev->u.d_session.sid);
756 if (shost == ERR_PTR(-ENXIO))
757 return -EEXIST;
758
759 if (transport->destroy_session)
760 transport->destroy_session(shost);
761 /* ref from host lookup */
762 scsi_host_put(shost);
763 return 0;
764}
765
766static int
767iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){
768 struct Scsi_Host *shost;
769 struct iscsi_cls_conn *conn; 759 struct iscsi_cls_conn *conn;
760 struct iscsi_cls_session *session;
770 unsigned long flags; 761 unsigned long flags;
771 762
772 if (!transport->create_conn) 763 session = iscsi_session_lookup(ev->u.c_conn.session_handle);
764 if (!session)
773 return -EINVAL; 765 return -EINVAL;
774 766
775 shost = scsi_host_lookup(ev->u.c_conn.sid); 767 conn = transport->create_conn(session, ev->u.c_conn.cid);
776 if (shost == ERR_PTR(-ENXIO))
777 return -EEXIST;
778
779 conn = transport->create_conn(shost, ev->u.c_conn.cid);
780 if (!conn) 768 if (!conn)
781 goto release_ref; 769 return -ENOMEM;
782 770
783 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 771 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
784 NLMSG_SPACE(sizeof(struct iscsi_uevent) + 772 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
@@ -800,14 +788,13 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
800 goto free_pdu_pool; 788 goto free_pdu_pool;
801 } 789 }
802 790
803 ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); 791 ev->r.handle = iscsi_handle(conn);
804 792
805 spin_lock_irqsave(&connlock, flags); 793 spin_lock_irqsave(&connlock, flags);
806 list_add(&conn->conn_list, &connlist); 794 list_add(&conn->conn_list, &connlist);
807 conn->active = 1; 795 conn->active = 1;
808 spin_unlock_irqrestore(&connlock, flags); 796 spin_unlock_irqrestore(&connlock, flags);
809 797
810 scsi_host_put(shost);
811 return 0; 798 return 0;
812 799
813free_pdu_pool: 800free_pdu_pool:
@@ -815,8 +802,6 @@ free_pdu_pool:
815destroy_conn: 802destroy_conn:
816 if (transport->destroy_conn) 803 if (transport->destroy_conn)
817 transport->destroy_conn(conn->dd_data); 804 transport->destroy_conn(conn->dd_data);
818release_ref:
819 scsi_host_put(shost);
820 return -ENOMEM; 805 return -ENOMEM;
821} 806}
822 807
@@ -827,13 +812,9 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
827 struct iscsi_cls_conn *conn; 812 struct iscsi_cls_conn *conn;
828 struct mempool_zone *z_error, *z_pdu; 813 struct mempool_zone *z_error, *z_pdu;
829 814
830 conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); 815 conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle);
831 if (!conn) 816 if (!conn)
832 return -EEXIST;
833
834 if (!transport->destroy_conn)
835 return -EINVAL; 817 return -EINVAL;
836
837 spin_lock_irqsave(&connlock, flags); 818 spin_lock_irqsave(&connlock, flags);
838 conn->active = 0; 819 conn->active = 0;
839 list_del(&conn->conn_list); 820 list_del(&conn->conn_list);
@@ -858,23 +839,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 839 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
859 struct iscsi_transport *transport = NULL; 840 struct iscsi_transport *transport = NULL;
860 struct iscsi_internal *priv; 841 struct iscsi_internal *priv;
861 842 struct iscsi_cls_session *session;
862 if (NETLINK_CREDS(skb)->uid) 843 struct iscsi_cls_conn *conn;
863 return -EPERM;
864 844
865 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 845 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
866 if (!priv) 846 if (!priv)
867 return -EINVAL; 847 return -EINVAL;
868 transport = priv->iscsi_transport; 848 transport = priv->iscsi_transport;
869 849
870 daemon_pid = NETLINK_CREDS(skb)->pid; 850 if (!try_module_get(transport->owner))
851 return -EINVAL;
871 852
872 switch (nlh->nlmsg_type) { 853 switch (nlh->nlmsg_type) {
873 case ISCSI_UEVENT_CREATE_SESSION: 854 case ISCSI_UEVENT_CREATE_SESSION:
874 err = iscsi_if_create_session(priv, ev); 855 err = iscsi_if_create_session(priv, ev);
875 break; 856 break;
876 case ISCSI_UEVENT_DESTROY_SESSION: 857 case ISCSI_UEVENT_DESTROY_SESSION:
877 err = iscsi_if_destroy_session(priv, ev); 858 session = iscsi_session_lookup(ev->u.d_session.session_handle);
859 if (session)
860 transport->destroy_session(session);
861 else
862 err = -EINVAL;
878 break; 863 break;
879 case ISCSI_UEVENT_CREATE_CONN: 864 case ISCSI_UEVENT_CREATE_CONN:
880 err = iscsi_if_create_conn(transport, ev); 865 err = iscsi_if_create_conn(transport, ev);
@@ -883,56 +868,64 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
883 err = iscsi_if_destroy_conn(transport, ev); 868 err = iscsi_if_destroy_conn(transport, ev);
884 break; 869 break;
885 case ISCSI_UEVENT_BIND_CONN: 870 case ISCSI_UEVENT_BIND_CONN:
886 if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) 871 session = iscsi_session_lookup(ev->u.b_conn.session_handle);
887 return -EEXIST; 872 conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle);
888 ev->r.retcode = transport->bind_conn( 873
889 ev->u.b_conn.session_handle, 874 if (session && conn)
890 ev->u.b_conn.conn_handle, 875 ev->r.retcode = transport->bind_conn(session, conn,
891 ev->u.b_conn.transport_fd, 876 ev->u.b_conn.transport_fd,
892 ev->u.b_conn.is_leading); 877 ev->u.b_conn.is_leading);
878 else
879 err = -EINVAL;
893 break; 880 break;
894 case ISCSI_UEVENT_SET_PARAM: 881 case ISCSI_UEVENT_SET_PARAM:
895 if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) 882 conn = iscsi_conn_lookup(ev->u.set_param.conn_handle);
896 return -EEXIST; 883 if (conn)
897 ev->r.retcode = transport->set_param( 884 ev->r.retcode = transport->set_param(conn,
898 ev->u.set_param.conn_handle, 885 ev->u.set_param.param, ev->u.set_param.value);
899 ev->u.set_param.param, ev->u.set_param.value); 886 else
887 err = -EINVAL;
900 break; 888 break;
901 case ISCSI_UEVENT_START_CONN: 889 case ISCSI_UEVENT_START_CONN:
902 if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) 890 conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle);
903 return -EEXIST; 891 if (conn)
904 ev->r.retcode = transport->start_conn( 892 ev->r.retcode = transport->start_conn(conn);
905 ev->u.start_conn.conn_handle); 893 else
894 err = -EINVAL;
895
906 break; 896 break;
907 case ISCSI_UEVENT_STOP_CONN: 897 case ISCSI_UEVENT_STOP_CONN:
908 if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) 898 conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle);
909 return -EEXIST; 899 if (conn)
910 transport->stop_conn(ev->u.stop_conn.conn_handle, 900 transport->stop_conn(conn, ev->u.stop_conn.flag);
911 ev->u.stop_conn.flag); 901 else
902 err = -EINVAL;
912 break; 903 break;
913 case ISCSI_UEVENT_SEND_PDU: 904 case ISCSI_UEVENT_SEND_PDU:
914 if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) 905 conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle);
915 return -EEXIST; 906 if (conn)
916 ev->r.retcode = transport->send_pdu( 907 ev->r.retcode = transport->send_pdu(conn,
917 ev->u.send_pdu.conn_handle, 908 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
918 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 909 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
919 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 910 ev->u.send_pdu.data_size);
920 ev->u.send_pdu.data_size); 911 else
912 err = -EINVAL;
921 break; 913 break;
922 case ISCSI_UEVENT_GET_STATS: 914 case ISCSI_UEVENT_GET_STATS:
923 err = iscsi_if_get_stats(transport, skb, nlh); 915 err = iscsi_if_get_stats(transport, nlh);
924 break; 916 break;
925 default: 917 default:
926 err = -EINVAL; 918 err = -EINVAL;
927 break; 919 break;
928 } 920 }
929 921
922 module_put(transport->owner);
930 return err; 923 return err;
931} 924}
932 925
933/* Get message from skb (based on rtnetlink_rcv_skb). Each message is 926/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
934 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 927 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are
935 * discarded silently. */ 928 * or invalid creds discarded silently. */
936static void 929static void
937iscsi_if_rx(struct sock *sk, int len) 930iscsi_if_rx(struct sock *sk, int len)
938{ 931{
@@ -940,6 +933,12 @@ iscsi_if_rx(struct sock *sk, int len)
940 933
941 mutex_lock(&rx_queue_mutex); 934 mutex_lock(&rx_queue_mutex);
942 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 935 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
936 if (NETLINK_CREDS(skb)->uid) {
937 skb_pull(skb, skb->len);
938 goto free_skb;
939 }
940 daemon_pid = NETLINK_CREDS(skb)->pid;
941
943 while (skb->len >= NLMSG_SPACE(0)) { 942 while (skb->len >= NLMSG_SPACE(0)) {
944 int err; 943 int err;
945 uint32_t rlen; 944 uint32_t rlen;
@@ -951,10 +950,12 @@ iscsi_if_rx(struct sock *sk, int len)
951 skb->len < nlh->nlmsg_len) { 950 skb->len < nlh->nlmsg_len) {
952 break; 951 break;
953 } 952 }
953
954 ev = NLMSG_DATA(nlh); 954 ev = NLMSG_DATA(nlh);
955 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 955 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
956 if (rlen > skb->len) 956 if (rlen > skb->len)
957 rlen = skb->len; 957 rlen = skb->len;
958
958 err = iscsi_if_recv_msg(skb, nlh); 959 err = iscsi_if_recv_msg(skb, nlh);
959 if (err) { 960 if (err) {
960 ev->type = ISCSI_KEVENT_IF_ERROR; 961 ev->type = ISCSI_KEVENT_IF_ERROR;
@@ -978,6 +979,7 @@ iscsi_if_rx(struct sock *sk, int len)
978 } while (err < 0 && err != -ECONNREFUSED); 979 } while (err < 0 && err != -ECONNREFUSED);
979 skb_pull(skb, rlen); 980 skb_pull(skb, rlen);
980 } 981 }
982free_skb:
981 kfree_skb(skb); 983 kfree_skb(skb);
982 } 984 }
983 mutex_unlock(&rx_queue_mutex); 985 mutex_unlock(&rx_queue_mutex);
@@ -997,7 +999,7 @@ show_conn_int_param_##param(struct class_device *cdev, char *buf) \
997 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 999 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
998 struct iscsi_transport *t = conn->transport; \ 1000 struct iscsi_transport *t = conn->transport; \
999 \ 1001 \
1000 t->get_conn_param(conn->dd_data, param, &value); \ 1002 t->get_conn_param(conn, param, &value); \
1001 return snprintf(buf, 20, format"\n", value); \ 1003 return snprintf(buf, 20, format"\n", value); \
1002} 1004}
1003 1005
@@ -1024,10 +1026,9 @@ show_session_int_param_##param(struct class_device *cdev, char *buf) \
1024{ \ 1026{ \
1025 uint32_t value = 0; \ 1027 uint32_t value = 0; \
1026 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1028 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1027 struct Scsi_Host *shost = iscsi_session_to_shost(session); \
1028 struct iscsi_transport *t = session->transport; \ 1029 struct iscsi_transport *t = session->transport; \
1029 \ 1030 \
1030 t->get_session_param(shost, param, &value); \ 1031 t->get_session_param(session, param, &value); \
1031 return snprintf(buf, 20, format"\n", value); \ 1032 return snprintf(buf, 20, format"\n", value); \
1032} 1033}
1033 1034
@@ -1121,7 +1122,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1121 return NULL; 1122 return NULL;
1122 memset(priv, 0, sizeof(*priv)); 1123 memset(priv, 0, sizeof(*priv));
1123 INIT_LIST_HEAD(&priv->list); 1124 INIT_LIST_HEAD(&priv->list);
1124 INIT_LIST_HEAD(&priv->sessions);
1125 priv->iscsi_transport = tt; 1125 priv->iscsi_transport = tt;
1126 1126
1127 priv->cdev.class = &iscsi_transport_class; 1127 priv->cdev.class = &iscsi_transport_class;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 8260f040d39c..f4854c33f48d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3588,7 +3588,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int
3588 3588
3589 if (pm) { 3589 if (pm) {
3590 dp_scr = scr_to_cpu(pm->ret); 3590 dp_scr = scr_to_cpu(pm->ret);
3591 dp_ofs -= scr_to_cpu(pm->sg.size); 3591 dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff;
3592 } 3592 }
3593 3593
3594 /* 3594 /*
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index b85e2b180a44..a2e201dc40f7 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -843,6 +843,9 @@ static int neofb_set_par(struct fb_info *info)
843 843
844 par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */ 844 par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */
845 845
846 /* Initialize: by default, we want display config register to be read */
847 par->PanelDispCntlRegRead = 1;
848
846 /* Enable any user specified display devices. */ 849 /* Enable any user specified display devices. */
847 par->PanelDispCntlReg1 = 0x00; 850 par->PanelDispCntlReg1 = 0x00;
848 if (par->internal_display) 851 if (par->internal_display)
@@ -1334,11 +1337,17 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
1334 struct neofb_par *par = info->par; 1337 struct neofb_par *par = info->par;
1335 int seqflags, lcdflags, dpmsflags, reg; 1338 int seqflags, lcdflags, dpmsflags, reg;
1336 1339
1340
1337 /* 1341 /*
1338 * Reload the value stored in the register, might have been changed via 1342 * Reload the value stored in the register, if sensible. It might have
1339 * FN keystroke 1343 * been changed via FN keystroke.
1340 */ 1344 */
1341 par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; 1345 if (par->PanelDispCntlRegRead) {
1346 neoUnlock();
1347 par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
1348 neoLock(&par->state);
1349 }
1350 par->PanelDispCntlRegRead = !blank_mode;
1342 1351
1343 switch (blank_mode) { 1352 switch (blank_mode) {
1344 case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ 1353 case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d17c97d07c80..675bd2568297 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1442,13 +1442,15 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
1442 &bytes_read, &smb_read_data, 1442 &bytes_read, &smb_read_data,
1443 &buf_type); 1443 &buf_type);
1444 pSMBr = (struct smb_com_read_rsp *)smb_read_data; 1444 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1445 if (copy_to_user(current_offset,
1446 smb_read_data + 4 /* RFC1001 hdr */
1447 + le16_to_cpu(pSMBr->DataOffset),
1448 bytes_read)) {
1449 rc = -EFAULT;
1450 }
1451 if (smb_read_data) { 1445 if (smb_read_data) {
1446 if (copy_to_user(current_offset,
1447 smb_read_data +
1448 4 /* RFC1001 length field */ +
1449 le16_to_cpu(pSMBr->DataOffset),
1450 bytes_read)) {
1451 rc = -EFAULT;
1452 }
1453
1452 if(buf_type == CIFS_SMALL_BUFFER) 1454 if(buf_type == CIFS_SMALL_BUFFER)
1453 cifs_small_buf_release(smb_read_data); 1455 cifs_small_buf_release(smb_read_data);
1454 else if(buf_type == CIFS_LARGE_BUFFER) 1456 else if(buf_type == CIFS_LARGE_BUFFER)
diff --git a/fs/exec.c b/fs/exec.c
index 055378d2513e..0e1c95074d42 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1403,7 +1403,7 @@ static void zap_threads (struct mm_struct *mm)
1403 do_each_thread(g,p) { 1403 do_each_thread(g,p) {
1404 if (mm == p->mm && p != tsk && 1404 if (mm == p->mm && p != tsk &&
1405 p->ptrace && p->parent->mm == mm) { 1405 p->ptrace && p->parent->mm == mm) {
1406 __ptrace_unlink(p); 1406 __ptrace_detach(p, 0);
1407 } 1407 }
1408 } while_each_thread(g,p); 1408 } while_each_thread(g,p);
1409 write_unlock_irq(&tasklist_lock); 1409 write_unlock_irq(&tasklist_lock);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 42eb53b5293b..23ceaa7127b4 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -208,6 +208,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
208#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 208#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
209#define DLM_LOCK_RES_MIGRATING 0x00000020 209#define DLM_LOCK_RES_MIGRATING 0x00000020
210 210
211/* max milliseconds to wait to sync up a network failure with a node death */
212#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
213
211#define DLM_PURGE_INTERVAL_MS (8 * 1000) 214#define DLM_PURGE_INTERVAL_MS (8 * 1000)
212 215
213struct dlm_lock_resource 216struct dlm_lock_resource
@@ -658,6 +661,7 @@ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
658void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 661void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
659void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 662void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
660int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 663int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
664int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
661 665
662void dlm_put(struct dlm_ctxt *dlm); 666void dlm_put(struct dlm_ctxt *dlm);
663struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 667struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 6001b22a997d..f66e2d818ccd 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -392,6 +392,11 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
392 } else { 392 } else {
393 mlog_errno(tmpret); 393 mlog_errno(tmpret);
394 if (dlm_is_host_down(tmpret)) { 394 if (dlm_is_host_down(tmpret)) {
395 /* instead of logging the same network error over
396 * and over, sleep here and wait for the heartbeat
397 * to notice the node is dead. times out after 5s. */
398 dlm_wait_for_node_death(dlm, res->owner,
399 DLM_NODE_DEATH_WAIT_MAX);
395 ret = DLM_RECOVERING; 400 ret = DLM_RECOVERING;
396 mlog(0, "node %u died so returning DLM_RECOVERING " 401 mlog(0, "node %u died so returning DLM_RECOVERING "
397 "from convert message!\n", res->owner); 402 "from convert message!\n", res->owner);
@@ -421,7 +426,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
421 struct dlm_lockstatus *lksb; 426 struct dlm_lockstatus *lksb;
422 enum dlm_status status = DLM_NORMAL; 427 enum dlm_status status = DLM_NORMAL;
423 u32 flags; 428 u32 flags;
424 int call_ast = 0, kick_thread = 0; 429 int call_ast = 0, kick_thread = 0, ast_reserved = 0;
425 430
426 if (!dlm_grab(dlm)) { 431 if (!dlm_grab(dlm)) {
427 dlm_error(DLM_REJECTED); 432 dlm_error(DLM_REJECTED);
@@ -490,6 +495,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
490 status = __dlm_lockres_state_to_status(res); 495 status = __dlm_lockres_state_to_status(res);
491 if (status == DLM_NORMAL) { 496 if (status == DLM_NORMAL) {
492 __dlm_lockres_reserve_ast(res); 497 __dlm_lockres_reserve_ast(res);
498 ast_reserved = 1;
493 res->state |= DLM_LOCK_RES_IN_PROGRESS; 499 res->state |= DLM_LOCK_RES_IN_PROGRESS;
494 status = __dlmconvert_master(dlm, res, lock, flags, 500 status = __dlmconvert_master(dlm, res, lock, flags,
495 cnv->requested_type, 501 cnv->requested_type,
@@ -512,10 +518,10 @@ leave:
512 else 518 else
513 dlm_lock_put(lock); 519 dlm_lock_put(lock);
514 520
515 /* either queue the ast or release it */ 521 /* either queue the ast or release it, if reserved */
516 if (call_ast) 522 if (call_ast)
517 dlm_queue_ast(dlm, lock); 523 dlm_queue_ast(dlm, lock);
518 else 524 else if (ast_reserved)
519 dlm_lockres_release_ast(dlm, res); 525 dlm_lockres_release_ast(dlm, res);
520 526
521 if (kick_thread) 527 if (kick_thread)
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index d1a0038557a3..671d4ff222cc 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -220,6 +220,17 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
220 dlm_error(status); 220 dlm_error(status);
221 dlm_revert_pending_lock(res, lock); 221 dlm_revert_pending_lock(res, lock);
222 dlm_lock_put(lock); 222 dlm_lock_put(lock);
223 } else if (dlm_is_recovery_lock(res->lockname.name,
224 res->lockname.len)) {
225 /* special case for the $RECOVERY lock.
226 * there will never be an AST delivered to put
227 * this lock on the proper secondary queue
228 * (granted), so do it manually. */
229 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
230 "mastered by %u; got lock, manually granting (no ast)\n",
231 dlm->name, dlm->node_num, res->owner);
232 list_del_init(&lock->list);
233 list_add_tail(&lock->list, &res->granted);
223 } 234 }
224 spin_unlock(&res->spinlock); 235 spin_unlock(&res->spinlock);
225 236
@@ -646,7 +657,19 @@ retry_lock:
646 mlog(0, "retrying lock with migration/" 657 mlog(0, "retrying lock with migration/"
647 "recovery/in progress\n"); 658 "recovery/in progress\n");
648 msleep(100); 659 msleep(100);
649 dlm_wait_for_recovery(dlm); 660 /* no waiting for dlm_reco_thread */
661 if (recovery) {
662 if (status == DLM_RECOVERING) {
663 mlog(0, "%s: got RECOVERING "
664 "for $REOCVERY lock, master "
665 "was %u\n", dlm->name,
666 res->owner);
667 dlm_wait_for_node_death(dlm, res->owner,
668 DLM_NODE_DEATH_WAIT_MAX);
669 }
670 } else {
671 dlm_wait_for_recovery(dlm);
672 }
650 goto retry_lock; 673 goto retry_lock;
651 } 674 }
652 675
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a3194fe173d9..2e2e95e69499 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2482,7 +2482,9 @@ top:
2482 atomic_set(&mle->woken, 1); 2482 atomic_set(&mle->woken, 1);
2483 spin_unlock(&mle->spinlock); 2483 spin_unlock(&mle->spinlock);
2484 wake_up(&mle->wq); 2484 wake_up(&mle->wq);
2485 /* final put will take care of list removal */ 2485 /* do not need events any longer, so detach
2486 * from heartbeat */
2487 __dlm_mle_detach_hb_events(dlm, mle);
2486 __dlm_put_mle(mle); 2488 __dlm_put_mle(mle);
2487 } 2489 }
2488 continue; 2490 continue;
@@ -2537,6 +2539,9 @@ top:
2537 spin_unlock(&res->spinlock); 2539 spin_unlock(&res->spinlock);
2538 dlm_lockres_put(res); 2540 dlm_lockres_put(res);
2539 2541
2542 /* about to get rid of mle, detach from heartbeat */
2543 __dlm_mle_detach_hb_events(dlm, mle);
2544
2540 /* dump the mle */ 2545 /* dump the mle */
2541 spin_lock(&dlm->master_lock); 2546 spin_lock(&dlm->master_lock);
2542 __dlm_put_mle(mle); 2547 __dlm_put_mle(mle);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 186e9a76aa58..ed76bda1a534 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -278,6 +278,24 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
278 return dead; 278 return dead;
279} 279}
280 280
281int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
282{
283 if (timeout) {
284 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
285 "death of node %u\n", dlm->name, timeout, node);
286 wait_event_timeout(dlm->dlm_reco_thread_wq,
287 dlm_is_node_dead(dlm, node),
288 msecs_to_jiffies(timeout));
289 } else {
290 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
291 "of death of node %u\n", dlm->name, node);
292 wait_event(dlm->dlm_reco_thread_wq,
293 dlm_is_node_dead(dlm, node));
294 }
295 /* for now, return 0 */
296 return 0;
297}
298
281/* callers of the top-level api calls (dlmlock/dlmunlock) should 299/* callers of the top-level api calls (dlmlock/dlmunlock) should
282 * block on the dlm->reco.event when recovery is in progress. 300 * block on the dlm->reco.event when recovery is in progress.
283 * the dlm recovery thread will set this state when it begins 301 * the dlm recovery thread will set this state when it begins
@@ -2032,6 +2050,30 @@ again:
2032 dlm->reco.new_master); 2050 dlm->reco.new_master);
2033 status = -EEXIST; 2051 status = -EEXIST;
2034 } else { 2052 } else {
2053 status = 0;
2054
2055 /* see if recovery was already finished elsewhere */
2056 spin_lock(&dlm->spinlock);
2057 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2058 status = -EINVAL;
2059 mlog(0, "%s: got reco EX lock, but "
2060 "node got recovered already\n", dlm->name);
2061 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2062 mlog(ML_ERROR, "%s: new master is %u "
2063 "but no dead node!\n",
2064 dlm->name, dlm->reco.new_master);
2065 BUG();
2066 }
2067 }
2068 spin_unlock(&dlm->spinlock);
2069 }
2070
2071 /* if this node has actually become the recovery master,
2072 * set the master and send the messages to begin recovery */
2073 if (!status) {
2074 mlog(0, "%s: dead=%u, this=%u, sending "
2075 "begin_reco now\n", dlm->name,
2076 dlm->reco.dead_node, dlm->node_num);
2035 status = dlm_send_begin_reco_message(dlm, 2077 status = dlm_send_begin_reco_message(dlm,
2036 dlm->reco.dead_node); 2078 dlm->reco.dead_node);
2037 /* this always succeeds */ 2079 /* this always succeeds */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fa0bcac5ceae..d329c9df90ae 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1584,10 +1584,9 @@ static int ocfs2_commit_thread(void *arg)
1584 while (!(kthread_should_stop() && 1584 while (!(kthread_should_stop() &&
1585 atomic_read(&journal->j_num_trans) == 0)) { 1585 atomic_read(&journal->j_num_trans) == 0)) {
1586 1586
1587 wait_event_interruptible_timeout(osb->checkpoint_event, 1587 wait_event_interruptible(osb->checkpoint_event,
1588 atomic_read(&journal->j_num_trans) 1588 atomic_read(&journal->j_num_trans)
1589 || kthread_should_stop(), 1589 || kthread_should_stop());
1590 OCFS2_CHECKPOINT_INTERVAL);
1591 1590
1592 status = ocfs2_commit_cache(osb); 1591 status = ocfs2_commit_cache(osb);
1593 if (status < 0) 1592 if (status < 0)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 7d0a816184fa..2f3a6acdac45 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -29,8 +29,6 @@
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/jbd.h> 30#include <linux/jbd.h>
31 31
32#define OCFS2_CHECKPOINT_INTERVAL (8 * HZ)
33
34enum ocfs2_journal_state { 32enum ocfs2_journal_state {
35 OCFS2_JOURNAL_FREE = 0, 33 OCFS2_JOURNAL_FREE = 0,
36 OCFS2_JOURNAL_LOADED, 34 OCFS2_JOURNAL_LOADED,
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
index a21515c16a43..5f24c755f577 100644
--- a/include/asm-alpha/mman.h
+++ b/include/asm-alpha/mman.h
@@ -42,9 +42,11 @@
42#define MADV_WILLNEED 3 /* will need these pages */ 42#define MADV_WILLNEED 3 /* will need these pages */
43#define MADV_SPACEAVAIL 5 /* ensure resources are available */ 43#define MADV_SPACEAVAIL 5 /* ensure resources are available */
44#define MADV_DONTNEED 6 /* don't need these pages */ 44#define MADV_DONTNEED 6 /* don't need these pages */
45#define MADV_REMOVE 7 /* remove these pages & resources */ 45
46#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 46/* common/generic parameters */
47#define MADV_DOFORK 0x31 /* do inherit across fork */ 47#define MADV_REMOVE 9 /* remove these pages & resources */
48#define MADV_DONTFORK 10 /* don't inherit across fork */
49#define MADV_DOFORK 11 /* do inherit across fork */
48 50
49/* compatibility flags */ 51/* compatibility flags */
50#define MAP_ANON MAP_ANONYMOUS 52#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h
index 693ed859e632..54570d2e95b7 100644
--- a/include/asm-arm/mman.h
+++ b/include/asm-arm/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ARM_MMAN_H__ 1#ifndef __ARM_MMAN_H__
2#define __ARM_MMAN_H__ 2#define __ARM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ARM_MMAN_H__ */ 17#endif /* __ARM_MMAN_H__ */
diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h
index 2096c50df888..4000a6c1b76b 100644
--- a/include/asm-arm26/mman.h
+++ b/include/asm-arm26/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ARM_MMAN_H__ 1#ifndef __ARM_MMAN_H__
2#define __ARM_MMAN_H__ 2#define __ARM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ARM_MMAN_H__ */ 17#endif /* __ARM_MMAN_H__ */
diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h
index deddfb239ff5..1c35e1b66b46 100644
--- a/include/asm-cris/mman.h
+++ b/include/asm-cris/mman.h
@@ -3,19 +3,7 @@
3 3
4/* verbatim copy of asm-i386/ version */ 4/* verbatim copy of asm-i386/ version */
5 5
6#define PROT_READ 0x1 /* page can be read */ 6#include <asm-generic/mman.h>
7#define PROT_WRITE 0x2 /* page can be written */
8#define PROT_EXEC 0x4 /* page can be executed */
9#define PROT_SEM 0x8 /* page may be used for atomic ops */
10#define PROT_NONE 0x0 /* page can not be accessed */
11#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
12#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
13
14#define MAP_SHARED 0x01 /* Share changes */
15#define MAP_PRIVATE 0x02 /* Changes are private */
16#define MAP_TYPE 0x0f /* Mask for type of mapping */
17#define MAP_FIXED 0x10 /* Interpret addr exactly */
18#define MAP_ANONYMOUS 0x20 /* don't use a file */
19 7
20#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
21#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -25,24 +13,7 @@
25#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
26#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
27 15
28#define MS_ASYNC 1 /* sync memory asynchronously */
29#define MS_INVALIDATE 2 /* invalidate the caches */
30#define MS_SYNC 4 /* synchronous memory sync */
31
32#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
33#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
34 18
35#define MADV_NORMAL 0x0 /* default page-in behavior */
36#define MADV_RANDOM 0x1 /* page-in minimum required */
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
41#define MADV_DONTFORK 0x30 /* dont inherit across fork */
42#define MADV_DOFORK 0x31 /* do inherit across fork */
43
44/* compatibility flags */
45#define MAP_ANON MAP_ANONYMOUS
46#define MAP_FILE 0
47
48#endif /* __CRIS_MMAN_H__ */ 19#endif /* __CRIS_MMAN_H__ */
diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h
index d3bca306da82..b4371e928683 100644
--- a/include/asm-frv/mman.h
+++ b/include/asm-frv/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ASM_MMAN_H__ 1#ifndef __ASM_MMAN_H__
2#define __ASM_MMAN_H__ 2#define __ASM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,25 +11,8 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ASM_MMAN_H__ */ 17#endif /* __ASM_MMAN_H__ */
47 18
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
new file mode 100644
index 000000000000..3b41d2bb70da
--- /dev/null
+++ b/include/asm-generic/mman.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_GENERIC_MMAN_H
2#define _ASM_GENERIC_MMAN_H
3
4/*
5 Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
6 Based on: asm-xxx/mman.h
7*/
8
9#define PROT_READ 0x1 /* page can be read */
10#define PROT_WRITE 0x2 /* page can be written */
11#define PROT_EXEC 0x4 /* page can be executed */
12#define PROT_SEM 0x8 /* page may be used for atomic ops */
13#define PROT_NONE 0x0 /* page can not be accessed */
14#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
15#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
16
17#define MAP_SHARED 0x01 /* Share changes */
18#define MAP_PRIVATE 0x02 /* Changes are private */
19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22
23#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MADV_NORMAL 0 /* no further special treatment */
28#define MADV_RANDOM 1 /* expect random page references */
29#define MADV_SEQUENTIAL 2 /* expect sequential page references */
30#define MADV_WILLNEED 3 /* will need these pages */
31#define MADV_DONTNEED 4 /* don't need these pages */
32
33/* common parameters: try to keep these consistent across architectures */
34#define MADV_REMOVE 9 /* remove these pages & resources */
35#define MADV_DONTFORK 10 /* don't inherit across fork */
36#define MADV_DOFORK 11 /* do inherit across fork */
37
38/* compatibility flags */
39#define MAP_ANON MAP_ANONYMOUS
40#define MAP_FILE 0
41
42#endif
diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h
index ac0346f7d11d..b9f104f22a36 100644
--- a/include/asm-h8300/mman.h
+++ b/include/asm-h8300/mman.h
@@ -1,19 +1,7 @@
1#ifndef __H8300_MMAN_H__ 1#ifndef __H8300_MMAN_H__
2#define __H8300_MMAN_H__ 2#define __H8300_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __H8300_MMAN_H__ */ 17#endif /* __H8300_MMAN_H__ */
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
index ab2339a1d807..8fd9d7ab7faf 100644
--- a/include/asm-i386/mman.h
+++ b/include/asm-i386/mman.h
@@ -1,19 +1,7 @@
1#ifndef __I386_MMAN_H__ 1#ifndef __I386_MMAN_H__
2#define __I386_MMAN_H__ 2#define __I386_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __I386_MMAN_H__ */ 17#endif /* __I386_MMAN_H__ */
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
index 357ebb780cc0..6ba179f12718 100644
--- a/include/asm-ia64/mman.h
+++ b/include/asm-ia64/mman.h
@@ -8,19 +8,7 @@
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co 8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */ 9 */
10 10
11#define PROT_READ 0x1 /* page can be read */ 11#include <asm-generic/mman.h>
12#define PROT_WRITE 0x2 /* page can be written */
13#define PROT_EXEC 0x4 /* page can be executed */
14#define PROT_SEM 0x8 /* page may be used for atomic ops */
15#define PROT_NONE 0x0 /* page can not be accessed */
16#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
17#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
18
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */
24 12
25#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ 13#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
26#define MAP_GROWSUP 0x00200 /* register stack-like segment */ 14#define MAP_GROWSUP 0x00200 /* register stack-like segment */
@@ -31,24 +19,7 @@
31#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ 19#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
32#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 20#define MAP_NONBLOCK 0x10000 /* do not block on IO */
33 21
34#define MS_ASYNC 1 /* sync memory asynchronously */
35#define MS_INVALIDATE 2 /* invalidate the caches */
36#define MS_SYNC 4 /* synchronous memory sync */
37
38#define MCL_CURRENT 1 /* lock all current mappings */ 22#define MCL_CURRENT 1 /* lock all current mappings */
39#define MCL_FUTURE 2 /* lock all future mappings */ 23#define MCL_FUTURE 2 /* lock all future mappings */
40 24
41#define MADV_NORMAL 0x0 /* default page-in behavior */
42#define MADV_RANDOM 0x1 /* page-in minimum required */
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
47#define MADV_DONTFORK 0x30 /* dont inherit across fork */
48#define MADV_DOFORK 0x31 /* do inherit across fork */
49
50/* compatibility flags */
51#define MAP_ANON MAP_ANONYMOUS
52#define MAP_FILE 0
53
54#endif /* _ASM_IA64_MMAN_H */ 25#endif /* _ASM_IA64_MMAN_H */
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h
index 6b02fe3fcff2..695a860c024f 100644
--- a/include/asm-m32r/mman.h
+++ b/include/asm-m32r/mman.h
@@ -1,21 +1,9 @@
1#ifndef __M32R_MMAN_H__ 1#ifndef __M32R_MMAN_H__
2#define __M32R_MMAN_H__ 2#define __M32R_MMAN_H__
3 3
4/* orig : i386 2.6.0-test6 */ 4#include <asm-generic/mman.h>
5
6#define PROT_READ 0x1 /* page can be read */
7#define PROT_WRITE 0x2 /* page can be written */
8#define PROT_EXEC 0x4 /* page can be executed */
9#define PROT_SEM 0x8 /* page may be used for atomic ops */
10#define PROT_NONE 0x0 /* page can not be accessed */
11#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
12#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
13 5
14#define MAP_SHARED 0x01 /* Share changes */ 6/* orig : i386 2.6.0-test6 */
15#define MAP_PRIVATE 0x02 /* Changes are private */
16#define MAP_TYPE 0x0f /* Mask for type of mapping */
17#define MAP_FIXED 0x10 /* Interpret addr exactly */
18#define MAP_ANONYMOUS 0x20 /* don't use a file */
19 7
20#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
21#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -25,24 +13,7 @@
25#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
26#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
27 15
28#define MS_ASYNC 1 /* sync memory asynchronously */
29#define MS_INVALIDATE 2 /* invalidate the caches */
30#define MS_SYNC 4 /* synchronous memory sync */
31
32#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
33#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
34 18
35#define MADV_NORMAL 0x0 /* default page-in behavior */
36#define MADV_RANDOM 0x1 /* page-in minimum required */
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
41#define MADV_DONTFORK 0x30 /* dont inherit across fork */
42#define MADV_DOFORK 0x31 /* do inherit across fork */
43
44/* compatibility flags */
45#define MAP_ANON MAP_ANONYMOUS
46#define MAP_FILE 0
47
48#endif /* __M32R_MMAN_H__ */ 19#endif /* __M32R_MMAN_H__ */
diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h
index efd12bc4ccb7..1626d37f4898 100644
--- a/include/asm-m68k/mman.h
+++ b/include/asm-m68k/mman.h
@@ -1,19 +1,7 @@
1#ifndef __M68K_MMAN_H__ 1#ifndef __M68K_MMAN_H__
2#define __M68K_MMAN_H__ 2#define __M68K_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __M68K_MMAN_H__ */ 17#endif /* __M68K_MMAN_H__ */
diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h
index 6d01e26830fa..046cf686bee7 100644
--- a/include/asm-mips/mman.h
+++ b/include/asm-mips/mman.h
@@ -60,17 +60,19 @@
60#define MCL_CURRENT 1 /* lock all current mappings */ 60#define MCL_CURRENT 1 /* lock all current mappings */
61#define MCL_FUTURE 2 /* lock all future mappings */ 61#define MCL_FUTURE 2 /* lock all future mappings */
62 62
63#define MADV_NORMAL 0x0 /* default page-in behavior */ 63#define MADV_NORMAL 0 /* no further special treatment */
64#define MADV_RANDOM 0x1 /* page-in minimum required */ 64#define MADV_RANDOM 1 /* expect random page references */
65#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 65#define MADV_SEQUENTIAL 2 /* expect sequential page references */
66#define MADV_WILLNEED 0x3 /* pre-fault pages */ 66#define MADV_WILLNEED 3 /* will need these pages */
67#define MADV_DONTNEED 0x4 /* discard these pages */ 67#define MADV_DONTNEED 4 /* don't need these pages */
68#define MADV_REMOVE 0x5 /* remove these pages & resources */ 68
69#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 69/* common parameters: try to keep these consistent across architectures */
70#define MADV_DOFORK 0x31 /* do inherit across fork */ 70#define MADV_REMOVE 9 /* remove these pages & resources */
71#define MADV_DONTFORK 10 /* don't inherit across fork */
72#define MADV_DOFORK 11 /* do inherit across fork */
71 73
72/* compatibility flags */ 74/* compatibility flags */
73#define MAP_ANON MAP_ANONYMOUS 75#define MAP_ANON MAP_ANONYMOUS
74#define MAP_FILE 0 76#define MAP_FILE 0
75 77
76#endif /* _ASM_MMAN_H */ 78#endif /* _ASM_MMAN_H */
diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h
index a381cf5c8f55..0ef15ee0f17e 100644
--- a/include/asm-parisc/mman.h
+++ b/include/asm-parisc/mman.h
@@ -38,7 +38,11 @@
38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ 38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ 39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */ 40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
41#define MADV_REMOVE 8 /* remove these pages & resources */ 41
42/* common/generic parameters */
43#define MADV_REMOVE 9 /* remove these pages & resources */
44#define MADV_DONTFORK 10 /* don't inherit across fork */
45#define MADV_DOFORK 11 /* do inherit across fork */
42 46
43/* The range 12-64 is reserved for page size specification. */ 47/* The range 12-64 is reserved for page size specification. */
44#define MADV_4K_PAGES 12 /* Use 4K pages */ 48#define MADV_4K_PAGES 12 /* Use 4K pages */
@@ -49,8 +53,6 @@
49#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ 53#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
50#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ 54#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
51#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ 55#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
52#define MADV_DONTFORK 0x30 /* dont inherit across fork */
53#define MADV_DOFORK 0x31 /* do inherit across fork */
54 56
55/* compatibility flags */ 57/* compatibility flags */
56#define MAP_ANON MAP_ANONYMOUS 58#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h
index fcff25d13f13..24cf664a8295 100644
--- a/include/asm-powerpc/mman.h
+++ b/include/asm-powerpc/mman.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_POWERPC_MMAN_H 1#ifndef _ASM_POWERPC_MMAN_H
2#define _ASM_POWERPC_MMAN_H 2#define _ASM_POWERPC_MMAN_H
3 3
4#include <asm-generic/mman.h>
5
4/* 6/*
5 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -8,19 +10,6 @@
8 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
9 */ 11 */
10 12
11#define PROT_READ 0x1 /* page can be read */
12#define PROT_WRITE 0x2 /* page can be written */
13#define PROT_EXEC 0x4 /* page can be executed */
14#define PROT_SEM 0x8 /* page may be used for atomic ops */
15#define PROT_NONE 0x0 /* page can not be accessed */
16#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
17#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
18
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */
24#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 13#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
25#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 14#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
26#define MAP_LOCKED 0x80 15#define MAP_LOCKED 0x80
@@ -29,27 +18,10 @@
29#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 18#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
30#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 19#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
31 20
32#define MS_ASYNC 1 /* sync memory asynchronously */
33#define MS_INVALIDATE 2 /* invalidate the caches */
34#define MS_SYNC 4 /* synchronous memory sync */
35
36#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 21#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
37#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 22#define MCL_FUTURE 0x4000 /* lock all additions to address space */
38 23
39#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 24#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
40#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 25#define MAP_NONBLOCK 0x10000 /* do not block on IO */
41 26
42#define MADV_NORMAL 0x0 /* default page-in behavior */
43#define MADV_RANDOM 0x1 /* page-in minimum required */
44#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
45#define MADV_WILLNEED 0x3 /* pre-fault pages */
46#define MADV_DONTNEED 0x4 /* discard these pages */
47#define MADV_REMOVE 0x5 /* remove these pages & resources */
48#define MADV_DONTFORK 0x30 /* dont inherit across fork */
49#define MADV_DOFORK 0x31 /* do inherit across fork */
50
51/* compatibility flags */
52#define MAP_ANON MAP_ANONYMOUS
53#define MAP_FILE 0
54
55#endif /* _ASM_POWERPC_MMAN_H */ 27#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h
index d41ca1477010..7839767d837e 100644
--- a/include/asm-s390/mman.h
+++ b/include/asm-s390/mman.h
@@ -9,19 +9,7 @@
9#ifndef __S390_MMAN_H__ 9#ifndef __S390_MMAN_H__
10#define __S390_MMAN_H__ 10#define __S390_MMAN_H__
11 11
12#define PROT_READ 0x1 /* page can be read */ 12#include <asm-generic/mman.h>
13#define PROT_WRITE 0x2 /* page can be written */
14#define PROT_EXEC 0x4 /* page can be executed */
15#define PROT_SEM 0x8 /* page may be used for atomic ops */
16#define PROT_NONE 0x0 /* page can not be accessed */
17#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
18#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
19
20#define MAP_SHARED 0x01 /* Share changes */
21#define MAP_PRIVATE 0x02 /* Changes are private */
22#define MAP_TYPE 0x0f /* Mask for type of mapping */
23#define MAP_FIXED 0x10 /* Interpret addr exactly */
24#define MAP_ANONYMOUS 0x20 /* don't use a file */
25 13
26#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 14#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 15#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -31,24 +19,7 @@
31#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 19#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
32#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 20#define MAP_NONBLOCK 0x10000 /* do not block on IO */
33 21
34#define MS_ASYNC 1 /* sync memory asynchronously */
35#define MS_INVALIDATE 2 /* invalidate the caches */
36#define MS_SYNC 4 /* synchronous memory sync */
37
38#define MCL_CURRENT 1 /* lock all current mappings */ 22#define MCL_CURRENT 1 /* lock all current mappings */
39#define MCL_FUTURE 2 /* lock all future mappings */ 23#define MCL_FUTURE 2 /* lock all future mappings */
40 24
41#define MADV_NORMAL 0x0 /* default page-in behavior */
42#define MADV_RANDOM 0x1 /* page-in minimum required */
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
47#define MADV_DONTFORK 0x30 /* dont inherit across fork */
48#define MADV_DOFORK 0x31 /* do inherit across fork */
49
50/* compatibility flags */
51#define MAP_ANON MAP_ANONYMOUS
52#define MAP_FILE 0
53
54#endif /* __S390_MMAN_H__ */ 25#endif /* __S390_MMAN_H__ */
diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h
index 0e08d0573abc..156eb0225cf6 100644
--- a/include/asm-sh/mman.h
+++ b/include/asm-sh/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ASM_SH_MMAN_H 1#ifndef __ASM_SH_MMAN_H
2#define __ASM_SH_MMAN_H 2#define __ASM_SH_MMAN_H
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ASM_SH_MMAN_H */ 17#endif /* __ASM_SH_MMAN_H */
diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h
index 4a298b2be859..88d1886abf3b 100644
--- a/include/asm-sparc/mman.h
+++ b/include/asm-sparc/mman.h
@@ -2,21 +2,10 @@
2#ifndef __SPARC_MMAN_H__ 2#ifndef __SPARC_MMAN_H__
3#define __SPARC_MMAN_H__ 3#define __SPARC_MMAN_H__
4 4
5/* SunOS'ified... */ 5#include <asm-generic/mman.h>
6 6
7#define PROT_READ 0x1 /* page can be read */ 7/* SunOS'ified... */
8#define PROT_WRITE 0x2 /* page can be written */
9#define PROT_EXEC 0x4 /* page can be executed */
10#define PROT_SEM 0x8 /* page may be used for atomic ops */
11#define PROT_NONE 0x0 /* page can not be accessed */
12#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
13#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
14 8
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_TYPE 0x0f /* Mask for type of mapping */
18#define MAP_FIXED 0x10 /* Interpret addr exactly */
19#define MAP_ANONYMOUS 0x20 /* don't use a file */
20#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 9#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
21#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 10#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
22#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ 11#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
@@ -27,10 +16,6 @@
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 16#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
28#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 17#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
29 18
30#define MS_ASYNC 1 /* sync memory asynchronously */
31#define MS_INVALIDATE 2 /* invalidate the caches */
32#define MS_SYNC 4 /* synchronous memory sync */
33
34#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 19#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
35#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 20#define MCL_FUTURE 0x4000 /* lock all additions to address space */
36 21
@@ -48,18 +33,6 @@
48#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ 33#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */
49#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ 34#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */
50 35
51#define MADV_NORMAL 0x0 /* default page-in behavior */
52#define MADV_RANDOM 0x1 /* page-in minimum required */
53#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 36#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
58#define MADV_DONTFORK 0x30 /* dont inherit across fork */
59#define MADV_DOFORK 0x31 /* do inherit across fork */
60
61/* compatibility flags */
62#define MAP_ANON MAP_ANONYMOUS
63#define MAP_FILE 0
64 37
65#endif /* __SPARC_MMAN_H__ */ 38#endif /* __SPARC_MMAN_H__ */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index d705ec92da8b..6fd878e61435 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -2,21 +2,10 @@
2#ifndef __SPARC64_MMAN_H__ 2#ifndef __SPARC64_MMAN_H__
3#define __SPARC64_MMAN_H__ 3#define __SPARC64_MMAN_H__
4 4
5/* SunOS'ified... */ 5#include <asm-generic/mman.h>
6 6
7#define PROT_READ 0x1 /* page can be read */ 7/* SunOS'ified... */
8#define PROT_WRITE 0x2 /* page can be written */
9#define PROT_EXEC 0x4 /* page can be executed */
10#define PROT_SEM 0x8 /* page may be used for atomic ops */
11#define PROT_NONE 0x0 /* page can not be accessed */
12#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
13#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
14 8
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_TYPE 0x0f /* Mask for type of mapping */
18#define MAP_FIXED 0x10 /* Interpret addr exactly */
19#define MAP_ANONYMOUS 0x20 /* don't use a file */
20#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 9#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
21#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 10#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
22#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ 11#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
@@ -27,10 +16,6 @@
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 16#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
28#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 17#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
29 18
30#define MS_ASYNC 1 /* sync memory asynchronously */
31#define MS_INVALIDATE 2 /* invalidate the caches */
32#define MS_SYNC 4 /* synchronous memory sync */
33
34#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 19#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
35#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 20#define MCL_FUTURE 0x4000 /* lock all additions to address space */
36 21
@@ -48,18 +33,6 @@
48#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ 33#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */
49#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ 34#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */
50 35
51#define MADV_NORMAL 0x0 /* default page-in behavior */
52#define MADV_RANDOM 0x1 /* page-in minimum required */
53#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 36#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
58#define MADV_DONTFORK 0x30 /* dont inherit across fork */
59#define MADV_DOFORK 0x31 /* do inherit across fork */
60
61/* compatibility flags */
62#define MAP_ANON MAP_ANONYMOUS
63#define MAP_FILE 0
64 37
65#endif /* __SPARC64_MMAN_H__ */ 38#endif /* __SPARC64_MMAN_H__ */
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h
index 7b851c310e41..edbf6edbfb37 100644
--- a/include/asm-v850/mman.h
+++ b/include/asm-v850/mman.h
@@ -1,18 +1,7 @@
1#ifndef __V850_MMAN_H__ 1#ifndef __V850_MMAN_H__
2#define __V850_MMAN_H__ 2#define __V850_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_NONE 0x0 /* page can not be accessed */
8#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
9#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
10
11#define MAP_SHARED 0x01 /* Share changes */
12#define MAP_PRIVATE 0x02 /* Changes are private */
13#define MAP_TYPE 0x0f /* Mask for type of mapping */
14#define MAP_FIXED 0x10 /* Interpret addr exactly */
15#define MAP_ANONYMOUS 0x20 /* don't use a file */
16 5
17#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
18#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -20,24 +9,7 @@
20#define MAP_LOCKED 0x2000 /* pages are locked */ 9#define MAP_LOCKED 0x2000 /* pages are locked */
21#define MAP_NORESERVE 0x4000 /* don't check for reservations */ 10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
22 11
23#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MCL_CURRENT 1 /* lock all current mappings */ 12#define MCL_CURRENT 1 /* lock all current mappings */
28#define MCL_FUTURE 2 /* lock all future mappings */ 13#define MCL_FUTURE 2 /* lock all future mappings */
29 14
30#define MADV_NORMAL 0x0 /* default page-in behavior */
31#define MADV_RANDOM 0x1 /* page-in minimum required */
32#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
33#define MADV_WILLNEED 0x3 /* pre-fault pages */
34#define MADV_DONTNEED 0x4 /* discard these pages */
35#define MADV_REMOVE 0x5 /* remove these pages & resources */
36#define MADV_DONTFORK 0x30 /* dont inherit across fork */
37#define MADV_DOFORK 0x31 /* do inherit across fork */
38
39/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS
41#define MAP_FILE 0
42
43#endif /* __V850_MMAN_H__ */ 15#endif /* __V850_MMAN_H__ */
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index b699a38c1c3c..dd5cb0534d37 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -1,19 +1,8 @@
1#ifndef __X8664_MMAN_H__ 1#ifndef __X8664_MMAN_H__
2#define __X8664_MMAN_H__ 2#define __X8664_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_NONE 0x0 /* page can not be accessed */
8#define PROT_SEM 0x8
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11 5
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17#define MAP_32BIT 0x40 /* only give out 32bit addresses */ 6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
18 7
19#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
@@ -24,24 +13,7 @@
24#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
25#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
26 15
27#define MS_ASYNC 1 /* sync memory asynchronously */
28#define MS_INVALIDATE 2 /* invalidate the caches */
29#define MS_SYNC 4 /* synchronous memory sync */
30
31#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
32#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
33 18
34#define MADV_NORMAL 0x0 /* default page-in behavior */
35#define MADV_RANDOM 0x1 /* page-in minimum required */
36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
37#define MADV_WILLNEED 0x3 /* pre-fault pages */
38#define MADV_DONTNEED 0x4 /* discard these pages */
39#define MADV_REMOVE 0x5 /* remove these pages & resources */
40#define MADV_DONTFORK 0x30 /* dont inherit across fork */
41#define MADV_DOFORK 0x31 /* do inherit across fork */
42
43/* compatibility flags */
44#define MAP_ANON MAP_ANONYMOUS
45#define MAP_FILE 0
46
47#endif 19#endif
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index c99832e7bf3f..eca3f2d633db 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -133,6 +133,7 @@ extern int fix_aperture;
133extern int force_iommu; 133extern int force_iommu;
134 134
135extern int reboot_force; 135extern int reboot_force;
136extern int notsc_setup(char *);
136 137
137extern void smp_local_timer_interrupt(struct pt_regs * regs); 138extern void smp_local_timer_interrupt(struct pt_regs * regs);
138 139
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h
index e2d7afb679c8..ba394cbb4807 100644
--- a/include/asm-xtensa/mman.h
+++ b/include/asm-xtensa/mman.h
@@ -67,17 +67,19 @@
67#define MCL_CURRENT 1 /* lock all current mappings */ 67#define MCL_CURRENT 1 /* lock all current mappings */
68#define MCL_FUTURE 2 /* lock all future mappings */ 68#define MCL_FUTURE 2 /* lock all future mappings */
69 69
70#define MADV_NORMAL 0x0 /* default page-in behavior */ 70#define MADV_NORMAL 0 /* no further special treatment */
71#define MADV_RANDOM 0x1 /* page-in minimum required */ 71#define MADV_RANDOM 1 /* expect random page references */
72#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 72#define MADV_SEQUENTIAL 2 /* expect sequential page references */
73#define MADV_WILLNEED 0x3 /* pre-fault pages */ 73#define MADV_WILLNEED 3 /* will need these pages */
74#define MADV_DONTNEED 0x4 /* discard these pages */ 74#define MADV_DONTNEED 4 /* don't need these pages */
75#define MADV_REMOVE 0x5 /* remove these pages & resources */ 75
76#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 76/* common parameters: try to keep these consistent across architectures */
77#define MADV_DOFORK 0x31 /* do inherit across fork */ 77#define MADV_REMOVE 9 /* remove these pages & resources */
78#define MADV_DONTFORK 10 /* don't inherit across fork */
79#define MADV_DOFORK 11 /* do inherit across fork */
78 80
79/* compatibility flags */ 81/* compatibility flags */
80#define MAP_ANON MAP_ANONYMOUS 82#define MAP_ANON MAP_ANONYMOUS
81#define MAP_FILE 0 83#define MAP_FILE 0
82 84
83#endif /* _XTENSA_MMAN_H */ 85#endif /* _XTENSA_MMAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b49affa0ac5a..3b507bf05d09 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -326,12 +326,6 @@ struct sysinfo {
326/* Force a compilation error if condition is true */ 326/* Force a compilation error if condition is true */
327#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 327#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
328 328
329#ifdef CONFIG_SYSCTL
330extern int randomize_va_space;
331#else
332#define randomize_va_space 1
333#endif
334
335/* Trap pasters of __FUNCTION__ at compile-time */ 329/* Trap pasters of __FUNCTION__ at compile-time */
336#define __FUNCTION__ (__func__) 330#define __FUNCTION__ (__func__)
337 331
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 6aca67a569a2..f3dec45ef874 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -96,10 +96,16 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
96 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) 96 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
97 97
98/* convert a timespec to ktime_t format: */ 98/* convert a timespec to ktime_t format: */
99#define timespec_to_ktime(ts) ktime_set((ts).tv_sec, (ts).tv_nsec) 99static inline ktime_t timespec_to_ktime(struct timespec ts)
100{
101 return ktime_set(ts.tv_sec, ts.tv_nsec);
102}
100 103
101/* convert a timeval to ktime_t format: */ 104/* convert a timeval to ktime_t format: */
102#define timeval_to_ktime(tv) ktime_set((tv).tv_sec, (tv).tv_usec * 1000) 105static inline ktime_t timeval_to_ktime(struct timeval tv)
106{
107 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
108}
103 109
104/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 110/* Map the ktime_t to timespec conversion to ns_to_timespec function */
105#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 111#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75e9f0724997..26e1663a5cbe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1051,5 +1051,7 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1051void drop_pagecache(void); 1051void drop_pagecache(void);
1052void drop_slab(void); 1052void drop_slab(void);
1053 1053
1054extern int randomize_va_space;
1055
1054#endif /* __KERNEL__ */ 1056#endif /* __KERNEL__ */
1055#endif /* _LINUX_MM_H */ 1057#endif /* _LINUX_MM_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 4cf6088625c1..468896939843 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -184,8 +184,11 @@ static inline int nf_hook_thresh(int pf, unsigned int hook,
184 struct sk_buff **pskb, 184 struct sk_buff **pskb,
185 struct net_device *indev, 185 struct net_device *indev,
186 struct net_device *outdev, 186 struct net_device *outdev,
187 int (*okfn)(struct sk_buff *), int thresh) 187 int (*okfn)(struct sk_buff *), int thresh,
188 int cond)
188{ 189{
190 if (!cond)
191 return 1;
189#ifndef CONFIG_NETFILTER_DEBUG 192#ifndef CONFIG_NETFILTER_DEBUG
190 if (list_empty(&nf_hooks[pf][hook])) 193 if (list_empty(&nf_hooks[pf][hook]))
191 return 1; 194 return 1;
@@ -197,7 +200,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
197 struct net_device *indev, struct net_device *outdev, 200 struct net_device *indev, struct net_device *outdev,
198 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
199{ 202{
200 return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN); 203 return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN, 1);
201} 204}
202 205
203/* Activate hook; either okfn or kfree_skb called, unless a hook 206/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -224,7 +227,13 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
224 227
225#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \ 228#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
226({int __ret; \ 229({int __ret; \
227if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1)\ 230if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh, 1)) == 1)\
231 __ret = (okfn)(skb); \
232__ret;})
233
234#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) \
235({int __ret; \
236if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, INT_MIN, cond)) == 1)\
228 __ret = (okfn)(skb); \ 237 __ret = (okfn)(skb); \
229__ret;}) 238__ret;})
230 239
@@ -295,11 +304,13 @@ extern struct proc_dir_entry *proc_net_netfilter;
295 304
296#else /* !CONFIG_NETFILTER */ 305#else /* !CONFIG_NETFILTER */
297#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) 306#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
307#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
298static inline int nf_hook_thresh(int pf, unsigned int hook, 308static inline int nf_hook_thresh(int pf, unsigned int hook,
299 struct sk_buff **pskb, 309 struct sk_buff **pskb,
300 struct net_device *indev, 310 struct net_device *indev,
301 struct net_device *outdev, 311 struct net_device *outdev,
302 int (*okfn)(struct sk_buff *), int thresh) 312 int (*okfn)(struct sk_buff *), int thresh,
313 int cond)
303{ 314{
304 return okfn(*pskb); 315 return okfn(*pskb);
305} 316}
@@ -307,7 +318,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
307 struct net_device *indev, struct net_device *outdev, 318 struct net_device *indev, struct net_device *outdev,
308 int (*okfn)(struct sk_buff *)) 319 int (*okfn)(struct sk_buff *))
309{ 320{
310 return okfn(*pskb); 321 return 1;
311} 322}
312static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 323static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
313struct flowi; 324struct flowi;
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index fdc4a9527343..43c09d790b83 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -79,7 +79,7 @@ enum nf_ip_hook_priorities {
79 79
80#ifdef __KERNEL__ 80#ifdef __KERNEL__
81extern int ip_route_me_harder(struct sk_buff **pskb); 81extern int ip_route_me_harder(struct sk_buff **pskb);
82 82extern int ip_xfrm_me_harder(struct sk_buff **pskb);
83#endif /*__KERNEL__*/ 83#endif /*__KERNEL__*/
84 84
85#endif /*__LINUX_IP_NETFILTER_H*/ 85#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 9d5cd106b344..0d36750fc0f1 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -84,6 +84,7 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
84extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 84extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
85extern int ptrace_attach(struct task_struct *tsk); 85extern int ptrace_attach(struct task_struct *tsk);
86extern int ptrace_detach(struct task_struct *, unsigned int); 86extern int ptrace_detach(struct task_struct *, unsigned int);
87extern void __ptrace_detach(struct task_struct *, unsigned int);
87extern void ptrace_disable(struct task_struct *); 88extern void ptrace_disable(struct task_struct *);
88extern int ptrace_check_attach(struct task_struct *task, int kill); 89extern int ptrace_check_attach(struct task_struct *task, int kill);
89extern int ptrace_request(struct task_struct *child, long request, long addr, long data); 90extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
diff --git a/include/net/ip.h b/include/net/ip.h
index 8de0697b364c..fab3d5b3ab1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -41,6 +41,7 @@ struct inet_skb_parm
41#define IPSKB_XFRM_TUNNEL_SIZE 2 41#define IPSKB_XFRM_TUNNEL_SIZE 2
42#define IPSKB_XFRM_TRANSFORMED 4 42#define IPSKB_XFRM_TRANSFORMED 4
43#define IPSKB_FRAG_COMPLETE 8 43#define IPSKB_FRAG_COMPLETE 8
44#define IPSKB_REROUTED 16
44}; 45};
45 46
46struct ipcm_cookie 47struct ipcm_cookie
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d09ca0e7d139..d6111a2f0a23 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -866,7 +866,6 @@ extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
866extern int xfrm_init_state(struct xfrm_state *x); 866extern int xfrm_init_state(struct xfrm_state *x);
867extern int xfrm4_rcv(struct sk_buff *skb); 867extern int xfrm4_rcv(struct sk_buff *skb);
868extern int xfrm4_output(struct sk_buff *skb); 868extern int xfrm4_output(struct sk_buff *skb);
869extern int xfrm4_output_finish(struct sk_buff *skb);
870extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler); 869extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
871extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler); 870extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
872extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi); 871extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 3e5cb5ab2d34..e5618b90996e 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -163,9 +163,6 @@ enum iscsi_param {
163}; 163};
164#define ISCSI_PARAM_MAX 14 164#define ISCSI_PARAM_MAX 14
165 165
166typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */
167typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */
168
169#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 166#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
170#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 167#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
171#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) 168#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index c60b8ff2f5e4..9c331258bc27 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -433,4 +433,6 @@ struct scsi_lun {
433/* Used to obtain the PCI location of a device */ 433/* Used to obtain the PCI location of a device */
434#define SCSI_IOCTL_GET_PCI 0x5387 434#define SCSI_IOCTL_GET_PCI 0x5387
435 435
436int scsi_execute_in_process_context(void (*fn)(void *data), void *data);
437
436#endif /* _SCSI_SCSI_H */ 438#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 16602a547a63..b41cf077e54b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -63,25 +63,28 @@ struct iscsi_transport {
63 int max_lun; 63 int max_lun;
64 unsigned int max_conn; 64 unsigned int max_conn;
65 unsigned int max_cmd_len; 65 unsigned int max_cmd_len;
66 struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, 66 struct iscsi_cls_session *(*create_session)
67 uint32_t initial_cmdsn); 67 (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid);
68 void (*destroy_session) (struct Scsi_Host *shost); 68 void (*destroy_session) (struct iscsi_cls_session *session);
69 struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, 69 struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
70 uint32_t cid); 70 uint32_t cid);
71 int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, 71 int (*bind_conn) (struct iscsi_cls_session *session,
72 struct iscsi_cls_conn *cls_conn,
72 uint32_t transport_fd, int is_leading); 73 uint32_t transport_fd, int is_leading);
73 int (*start_conn) (iscsi_connh_t conn); 74 int (*start_conn) (struct iscsi_cls_conn *conn);
74 void (*stop_conn) (iscsi_connh_t conn, int flag); 75 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag);
75 void (*destroy_conn) (struct iscsi_cls_conn *conn); 76 void (*destroy_conn) (struct iscsi_cls_conn *conn);
76 int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, 77 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
77 uint32_t value); 78 uint32_t value);
78 int (*get_conn_param) (void *conndata, enum iscsi_param param, 79 int (*get_conn_param) (struct iscsi_cls_conn *conn,
80 enum iscsi_param param,
79 uint32_t *value); 81 uint32_t *value);
80 int (*get_session_param) (struct Scsi_Host *shost, 82 int (*get_session_param) (struct iscsi_cls_session *session,
81 enum iscsi_param param, uint32_t *value); 83 enum iscsi_param param, uint32_t *value);
82 int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, 84 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
83 char *data, uint32_t data_size); 85 char *data, uint32_t data_size);
84 void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); 86 void (*get_stats) (struct iscsi_cls_conn *conn,
87 struct iscsi_stats *stats);
85}; 88};
86 89
87/* 90/*
@@ -93,15 +96,14 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt);
93/* 96/*
94 * control plane upcalls 97 * control plane upcalls
95 */ 98 */
96extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); 99extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
97extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, 100extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
98 char *data, uint32_t data_size); 101 char *data, uint32_t data_size);
99 102
100struct iscsi_cls_conn { 103struct iscsi_cls_conn {
101 struct list_head conn_list; /* item in connlist */ 104 struct list_head conn_list; /* item in connlist */
102 void *dd_data; /* LLD private data */ 105 void *dd_data; /* LLD private data */
103 struct iscsi_transport *transport; 106 struct iscsi_transport *transport;
104 iscsi_connh_t connh;
105 int active; /* must be accessed with the connlock */ 107 int active; /* must be accessed with the connlock */
106 struct device dev; /* sysfs transport/container device */ 108 struct device dev; /* sysfs transport/container device */
107 struct mempool_zone *z_error; 109 struct mempool_zone *z_error;
@@ -113,7 +115,7 @@ struct iscsi_cls_conn {
113 container_of(_dev, struct iscsi_cls_conn, dev) 115 container_of(_dev, struct iscsi_cls_conn, dev)
114 116
115struct iscsi_cls_session { 117struct iscsi_cls_session {
116 struct list_head list; /* item in session_list */ 118 struct list_head sess_list; /* item in session_list */
117 struct iscsi_transport *transport; 119 struct iscsi_transport *transport;
118 struct device dev; /* sysfs transport/container device */ 120 struct device dev; /* sysfs transport/container device */
119}; 121};
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index 1d69049bd4c1..78b1f15a538f 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,6 +159,7 @@ struct neofb_par {
159 unsigned char PanelDispCntlReg1; 159 unsigned char PanelDispCntlReg1;
160 unsigned char PanelDispCntlReg2; 160 unsigned char PanelDispCntlReg2;
161 unsigned char PanelDispCntlReg3; 161 unsigned char PanelDispCntlReg3;
162 unsigned char PanelDispCntlRegRead;
162 unsigned char PanelVertCenterReg1; 163 unsigned char PanelVertCenterReg1;
163 unsigned char PanelVertCenterReg2; 164 unsigned char PanelVertCenterReg2;
164 unsigned char PanelVertCenterReg3; 165 unsigned char PanelVertCenterReg3;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ba42b0a76961..12815d3f1a05 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1977,6 +1977,39 @@ void cpuset_fork(struct task_struct *child)
1977 * We don't need to task_lock() this reference to tsk->cpuset, 1977 * We don't need to task_lock() this reference to tsk->cpuset,
1978 * because tsk is already marked PF_EXITING, so attach_task() won't 1978 * because tsk is already marked PF_EXITING, so attach_task() won't
1979 * mess with it, or task is a failed fork, never visible to attach_task. 1979 * mess with it, or task is a failed fork, never visible to attach_task.
1980 *
1981 * Hack:
1982 *
1983 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
1984 *
1985 * Don't leave a task unable to allocate memory, as that is an
1986 * accident waiting to happen should someone add a callout in
1987 * do_exit() after the cpuset_exit() call that might allocate.
1988 * If a task tries to allocate memory with an invalid cpuset,
1989 * it will oops in cpuset_update_task_memory_state().
1990 *
1991 * We call cpuset_exit() while the task is still competent to
1992 * handle notify_on_release(), then leave the task attached to
1993 * the root cpuset (top_cpuset) for the remainder of its exit.
1994 *
1995 * To do this properly, we would increment the reference count on
1996 * top_cpuset, and near the very end of the kernel/exit.c do_exit()
1997 * code we would add a second cpuset function call, to drop that
1998 * reference. This would just create an unnecessary hot spot on
1999 * the top_cpuset reference count, to no avail.
2000 *
2001 * Normally, holding a reference to a cpuset without bumping its
2002 * count is unsafe. The cpuset could go away, or someone could
2003 * attach us to a different cpuset, decrementing the count on
2004 * the first cpuset that we never incremented. But in this case,
2005 * top_cpuset isn't going away, and either task has PF_EXITING set,
2006 * which wards off any attach_task() attempts, or task is a failed
2007 * fork, never visible to attach_task.
2008 *
2009 * Another way to do this would be to set the cpuset pointer
2010 * to NULL here, and check in cpuset_update_task_memory_state()
2011 * for a NULL pointer. This hack avoids that NULL check, for no
2012 * cost (other than this way too long comment ;).
1980 **/ 2013 **/
1981 2014
1982void cpuset_exit(struct task_struct *tsk) 2015void cpuset_exit(struct task_struct *tsk)
@@ -1984,7 +2017,7 @@ void cpuset_exit(struct task_struct *tsk)
1984 struct cpuset *cs; 2017 struct cpuset *cs;
1985 2018
1986 cs = tsk->cpuset; 2019 cs = tsk->cpuset;
1987 tsk->cpuset = NULL; 2020 tsk->cpuset = &top_cpuset; /* Hack - see comment above */
1988 2021
1989 if (notify_on_release(cs)) { 2022 if (notify_on_release(cs)) {
1990 char *pathbuf = NULL; 2023 char *pathbuf = NULL;
diff --git a/kernel/fork.c b/kernel/fork.c
index 8e88b374cee9..fbea12d7a943 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1123,8 +1123,8 @@ static task_t *copy_process(unsigned long clone_flags,
1123 p->real_parent = current; 1123 p->real_parent = current;
1124 p->parent = p->real_parent; 1124 p->parent = p->real_parent;
1125 1125
1126 spin_lock(&current->sighand->siglock);
1126 if (clone_flags & CLONE_THREAD) { 1127 if (clone_flags & CLONE_THREAD) {
1127 spin_lock(&current->sighand->siglock);
1128 /* 1128 /*
1129 * Important: if an exit-all has been started then 1129 * Important: if an exit-all has been started then
1130 * do not create this new thread - the whole thread 1130 * do not create this new thread - the whole thread
@@ -1162,8 +1162,6 @@ static task_t *copy_process(unsigned long clone_flags,
1162 */ 1162 */
1163 p->it_prof_expires = jiffies_to_cputime(1); 1163 p->it_prof_expires = jiffies_to_cputime(1);
1164 } 1164 }
1165
1166 spin_unlock(&current->sighand->siglock);
1167 } 1165 }
1168 1166
1169 /* 1167 /*
@@ -1175,8 +1173,6 @@ static task_t *copy_process(unsigned long clone_flags,
1175 if (unlikely(p->ptrace & PT_PTRACED)) 1173 if (unlikely(p->ptrace & PT_PTRACED))
1176 __ptrace_link(p, current->parent); 1174 __ptrace_link(p, current->parent);
1177 1175
1178 attach_pid(p, PIDTYPE_PID, p->pid);
1179 attach_pid(p, PIDTYPE_TGID, p->tgid);
1180 if (thread_group_leader(p)) { 1176 if (thread_group_leader(p)) {
1181 p->signal->tty = current->signal->tty; 1177 p->signal->tty = current->signal->tty;
1182 p->signal->pgrp = process_group(current); 1178 p->signal->pgrp = process_group(current);
@@ -1186,9 +1182,12 @@ static task_t *copy_process(unsigned long clone_flags,
1186 if (p->pid) 1182 if (p->pid)
1187 __get_cpu_var(process_counts)++; 1183 __get_cpu_var(process_counts)++;
1188 } 1184 }
1185 attach_pid(p, PIDTYPE_TGID, p->tgid);
1186 attach_pid(p, PIDTYPE_PID, p->pid);
1189 1187
1190 nr_threads++; 1188 nr_threads++;
1191 total_forks++; 1189 total_forks++;
1190 spin_unlock(&current->sighand->siglock);
1192 write_unlock_irq(&tasklist_lock); 1191 write_unlock_irq(&tasklist_lock);
1193 proc_fork_connector(p); 1192 proc_fork_connector(p);
1194 return p; 1193 return p;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 41f66365f0d8..8d5a5986d621 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -91,10 +91,8 @@ static int save_highmem_zone(struct zone *zone)
91 * corrected eventually when the cases giving rise to this 91 * corrected eventually when the cases giving rise to this
92 * are better understood. 92 * are better understood.
93 */ 93 */
94 if (PageReserved(page)) { 94 if (PageReserved(page))
95 printk("highmem reserved page?!\n");
96 continue; 95 continue;
97 }
98 BUG_ON(PageNosave(page)); 96 BUG_ON(PageNosave(page));
99 if (PageNosaveFree(page)) 97 if (PageNosaveFree(page))
100 continue; 98 continue;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index d2cf144d0af5..d95a72c9279d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -72,8 +72,8 @@ void ptrace_untrace(task_t *child)
72 */ 72 */
73void __ptrace_unlink(task_t *child) 73void __ptrace_unlink(task_t *child)
74{ 74{
75 if (!child->ptrace) 75 BUG_ON(!child->ptrace);
76 BUG(); 76
77 child->ptrace = 0; 77 child->ptrace = 0;
78 if (!list_empty(&child->ptrace_list)) { 78 if (!list_empty(&child->ptrace_list)) {
79 list_del_init(&child->ptrace_list); 79 list_del_init(&child->ptrace_list);
@@ -184,22 +184,27 @@ bad:
184 return retval; 184 return retval;
185} 185}
186 186
187void __ptrace_detach(struct task_struct *child, unsigned int data)
188{
189 child->exit_code = data;
190 /* .. re-parent .. */
191 __ptrace_unlink(child);
192 /* .. and wake it up. */
193 if (child->exit_state != EXIT_ZOMBIE)
194 wake_up_process(child);
195}
196
187int ptrace_detach(struct task_struct *child, unsigned int data) 197int ptrace_detach(struct task_struct *child, unsigned int data)
188{ 198{
189 if (!valid_signal(data)) 199 if (!valid_signal(data))
190 return -EIO; 200 return -EIO;
191 201
192 /* Architecture-specific hardware disable .. */ 202 /* Architecture-specific hardware disable .. */
193 ptrace_disable(child); 203 ptrace_disable(child);
194 204
195 /* .. re-parent .. */
196 child->exit_code = data;
197
198 write_lock_irq(&tasklist_lock); 205 write_lock_irq(&tasklist_lock);
199 __ptrace_unlink(child); 206 if (child->ptrace)
200 /* .. and wake it up. */ 207 __ptrace_detach(child, data);
201 if (child->exit_state != EXIT_ZOMBIE)
202 wake_up_process(child);
203 write_unlock_irq(&tasklist_lock); 208 write_unlock_irq(&tasklist_lock);
204 209
205 return 0; 210 return 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 71dd6f62efec..7654d55c47f5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -126,8 +126,6 @@ extern int sysctl_hz_timer;
126extern int acct_parm[]; 126extern int acct_parm[];
127#endif 127#endif
128 128
129int randomize_va_space = 1;
130
131static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 129static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
132 ctl_table *, void **); 130 ctl_table *, void **);
133static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 131static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c0bd4a914803..1e5b17dc7e3d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -752,12 +752,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
752 */ 752 */
753 nr_cleared_tags = 0; 753 nr_cleared_tags = 0;
754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
755 tags[tag] = 1;
755 if (tag_get(pathp->node, tag, pathp->offset)) { 756 if (tag_get(pathp->node, tag, pathp->offset)) {
756 tag_clear(pathp->node, tag, pathp->offset); 757 tag_clear(pathp->node, tag, pathp->offset);
757 tags[tag] = 0; 758 if (!any_tag_set(pathp->node, tag)) {
758 nr_cleared_tags++; 759 tags[tag] = 0;
759 } else 760 nr_cleared_tags++;
760 tags[tag] = 1; 761 }
762 }
761 } 763 }
762 764
763 for (pathp--; nr_cleared_tags && pathp->node; pathp--) { 765 for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
diff --git a/mm/memory.c b/mm/memory.c
index 2bee1f21aa8a..9abc6008544b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages);
82EXPORT_SYMBOL(high_memory); 82EXPORT_SYMBOL(high_memory);
83EXPORT_SYMBOL(vmalloc_earlyreserve); 83EXPORT_SYMBOL(vmalloc_earlyreserve);
84 84
85int randomize_va_space __read_mostly = 1;
86
87static int __init disable_randmaps(char *s)
88{
89 randomize_va_space = 0;
90 return 0;
91}
92__setup("norandmaps", disable_randmaps);
93
94
85/* 95/*
86 * If a p?d_bad entry is found while walking page tables, report 96 * If a p?d_bad entry is found while walking page tables, report
87 * the error, before resetting entry to p?d_none. Usually (but 97 * the error, before resetting entry to p?d_none. Usually (but
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index cc047f7fb6ef..35cf3a074087 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -67,7 +67,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
67{ 67{
68 struct net_bridge_port *p; 68 struct net_bridge_port *p;
69 69
70 spin_lock(&br->lock); 70 spin_lock_bh(&br->lock);
71 list_for_each_entry(p, &br->port_list, list) { 71 list_for_each_entry(p, &br->port_list, list) {
72 if (p->state != BR_STATE_DISABLED) 72 if (p->state != BR_STATE_DISABLED)
73 br_stp_disable_port(p); 73 br_stp_disable_port(p);
@@ -76,7 +76,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
76 76
77 br->topology_change = 0; 77 br->topology_change = 0;
78 br->topology_change_detected = 0; 78 br->topology_change_detected = 0;
79 spin_unlock(&br->lock); 79 spin_unlock_bh(&br->lock);
80 80
81 del_timer_sync(&br->hello_timer); 81 del_timer_sync(&br->hello_timer);
82 del_timer_sync(&br->topology_change_timer); 82 del_timer_sync(&br->topology_change_timer);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index abe23923e4e7..9981dcd68f11 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -830,7 +830,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
830 skb->h.raw = skb->nh.raw; 830 skb->h.raw = skb->nh.raw;
831 skb->nh.raw = skb_push(skb, gre_hlen); 831 skb->nh.raw = skb_push(skb, gre_hlen);
832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
834 IPSKB_REROUTED);
834 dst_release(skb->dst); 835 dst_release(skb->dst);
835 skb->dst = &rt->u.dst; 836 skb->dst = &rt->u.dst;
836 837
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3324fbfe528a..57d290d89ec2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -207,8 +207,10 @@ static inline int ip_finish_output(struct sk_buff *skb)
207{ 207{
208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209 /* Policy lookup after SNAT yielded a new policy */ 209 /* Policy lookup after SNAT yielded a new policy */
210 if (skb->dst->xfrm != NULL) 210 if (skb->dst->xfrm != NULL) {
211 return xfrm4_output_finish(skb); 211 IPCB(skb)->flags |= IPSKB_REROUTED;
212 return dst_output(skb);
213 }
212#endif 214#endif
213 if (skb->len > dst_mtu(skb->dst) && 215 if (skb->len > dst_mtu(skb->dst) &&
214 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 216 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
@@ -271,8 +273,9 @@ int ip_mc_output(struct sk_buff *skb)
271 newskb->dev, ip_dev_loopback_xmit); 273 newskb->dev, ip_dev_loopback_xmit);
272 } 274 }
273 275
274 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, 276 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275 ip_finish_output); 277 ip_finish_output,
278 !(IPCB(skb)->flags & IPSKB_REROUTED));
276} 279}
277 280
278int ip_output(struct sk_buff *skb) 281int ip_output(struct sk_buff *skb)
@@ -284,8 +287,9 @@ int ip_output(struct sk_buff *skb)
284 skb->dev = dev; 287 skb->dev = dev;
285 skb->protocol = htons(ETH_P_IP); 288 skb->protocol = htons(ETH_P_IP);
286 289
287 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, 290 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 ip_finish_output); 291 ip_finish_output,
292 !(IPCB(skb)->flags & IPSKB_REROUTED));
289} 293}
290 294
291int ip_queue_xmit(struct sk_buff *skb, int ipfragok) 295int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e5cbe72c6b80..03d13742a4b8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -622,7 +622,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
622 skb->h.raw = skb->nh.raw; 622 skb->h.raw = skb->nh.raw;
623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
626 IPSKB_REROUTED);
626 dst_release(skb->dst); 627 dst_release(skb->dst);
627 skb->dst = &rt->u.dst; 628 skb->dst = &rt->u.dst;
628 629
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 52a3d7c57907..ed42cdc57cd9 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -78,6 +78,47 @@ int ip_route_me_harder(struct sk_buff **pskb)
78} 78}
79EXPORT_SYMBOL(ip_route_me_harder); 79EXPORT_SYMBOL(ip_route_me_harder);
80 80
81#ifdef CONFIG_XFRM
82int ip_xfrm_me_harder(struct sk_buff **pskb)
83{
84 struct flowi fl;
85 unsigned int hh_len;
86 struct dst_entry *dst;
87
88 if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED)
89 return 0;
90 if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0)
91 return -1;
92
93 dst = (*pskb)->dst;
94 if (dst->xfrm)
95 dst = ((struct xfrm_dst *)dst)->route;
96 dst_hold(dst);
97
98 if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0)
99 return -1;
100
101 dst_release((*pskb)->dst);
102 (*pskb)->dst = dst;
103
104 /* Change in oif may mean change in hh_len. */
105 hh_len = (*pskb)->dst->dev->hard_header_len;
106 if (skb_headroom(*pskb) < hh_len) {
107 struct sk_buff *nskb;
108
109 nskb = skb_realloc_headroom(*pskb, hh_len);
110 if (!nskb)
111 return -1;
112 if ((*pskb)->sk)
113 skb_set_owner_w(nskb, (*pskb)->sk);
114 kfree_skb(*pskb);
115 *pskb = nskb;
116 }
117 return 0;
118}
119EXPORT_SYMBOL(ip_xfrm_me_harder);
120#endif
121
81void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); 122void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
82EXPORT_SYMBOL(ip_nat_decode_session); 123EXPORT_SYMBOL(ip_nat_decode_session);
83 124
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 92c54999a19d..7c3f7d380240 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -235,19 +235,19 @@ ip_nat_out(unsigned int hooknum,
235 return NF_ACCEPT; 235 return NF_ACCEPT;
236 236
237 ret = ip_nat_fn(hooknum, pskb, in, out, okfn); 237 ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
238#ifdef CONFIG_XFRM
238 if (ret != NF_DROP && ret != NF_STOLEN 239 if (ret != NF_DROP && ret != NF_STOLEN
239 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 240 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
240 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 241 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
241 242
242 if (ct->tuplehash[dir].tuple.src.ip != 243 if (ct->tuplehash[dir].tuple.src.ip !=
243 ct->tuplehash[!dir].tuple.dst.ip 244 ct->tuplehash[!dir].tuple.dst.ip
244#ifdef CONFIG_XFRM
245 || ct->tuplehash[dir].tuple.src.u.all != 245 || ct->tuplehash[dir].tuple.src.u.all !=
246 ct->tuplehash[!dir].tuple.dst.u.all 246 ct->tuplehash[!dir].tuple.dst.u.all
247#endif
248 ) 247 )
249 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 248 return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP;
250 } 249 }
250#endif
251 return ret; 251 return ret;
252} 252}
253 253
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 167619f638c6..6c8624a54933 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -529,15 +529,10 @@ static int init_or_cleanup(int init)
529 goto cleanup_localinops; 529 goto cleanup_localinops;
530 } 530 }
531#endif 531#endif
532
533 /* For use by REJECT target */
534 ip_ct_attach = __nf_conntrack_attach;
535
536 return ret; 532 return ret;
537 533
538 cleanup: 534 cleanup:
539 synchronize_net(); 535 synchronize_net();
540 ip_ct_attach = NULL;
541#ifdef CONFIG_SYSCTL 536#ifdef CONFIG_SYSCTL
542 unregister_sysctl_table(nf_ct_ipv4_sysctl_header); 537 unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
543 cleanup_localinops: 538 cleanup_localinops:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d4df0ddd424b..32ad229b4fed 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -152,10 +152,16 @@ error_nolock:
152 goto out_exit; 152 goto out_exit;
153} 153}
154 154
155int xfrm4_output_finish(struct sk_buff *skb) 155static int xfrm4_output_finish(struct sk_buff *skb)
156{ 156{
157 int err; 157 int err;
158 158
159#ifdef CONFIG_NETFILTER
160 if (!skb->dst->xfrm) {
161 IPCB(skb)->flags |= IPSKB_REROUTED;
162 return dst_output(skb);
163 }
164#endif
159 while (likely((err = xfrm4_output_one(skb)) == 0)) { 165 while (likely((err = xfrm4_output_one(skb)) == 0)) {
160 nf_reset(skb); 166 nf_reset(skb);
161 167
@@ -178,6 +184,7 @@ int xfrm4_output_finish(struct sk_buff *skb)
178 184
179int xfrm4_output(struct sk_buff *skb) 185int xfrm4_output(struct sk_buff *skb)
180{ 186{
181 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, 187 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
182 xfrm4_output_finish); 188 xfrm4_output_finish,
189 !(IPCB(skb)->flags & IPSKB_REROUTED));
183} 190}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fcf883183cef..21eb725e885f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/netfilter.h>
45 46
46#ifdef CONFIG_SYSCTL 47#ifdef CONFIG_SYSCTL
47#include <linux/sysctl.h> 48#include <linux/sysctl.h>
@@ -255,6 +256,7 @@ out:
255struct icmpv6_msg { 256struct icmpv6_msg {
256 struct sk_buff *skb; 257 struct sk_buff *skb;
257 int offset; 258 int offset;
259 uint8_t type;
258}; 260};
259 261
260static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 262static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -266,6 +268,8 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
266 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, 268 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
267 to, len, csum); 269 to, len, csum);
268 skb->csum = csum_block_add(skb->csum, csum, odd); 270 skb->csum = csum_block_add(skb->csum, csum, odd);
271 if (!(msg->type & ICMPV6_INFOMSG_MASK))
272 nf_ct_attach(skb, org_skb);
269 return 0; 273 return 0;
270} 274}
271 275
@@ -403,6 +407,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
403 407
404 msg.skb = skb; 408 msg.skb = skb;
405 msg.offset = skb->nh.raw - skb->data; 409 msg.offset = skb->nh.raw - skb->data;
410 msg.type = type;
406 411
407 len = skb->len - msg.offset; 412 len = skb->len - msg.offset;
408 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); 413 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
@@ -500,6 +505,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
500 505
501 msg.skb = skb; 506 msg.skb = skb;
502 msg.offset = 0; 507 msg.offset = 0;
508 msg.type = ICMPV6_ECHO_REPLY;
503 509
504 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 510 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
505 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, 511 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index c745717b4ce2..0e6d1d4bbd5c 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -160,6 +160,8 @@ static void send_reset(struct sk_buff *oldskb)
160 csum_partial((char *)tcph, 160 csum_partial((char *)tcph,
161 sizeof(struct tcphdr), 0)); 161 sizeof(struct tcphdr), 0));
162 162
163 nf_ct_attach(nskb, oldskb);
164
163 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, 165 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
164 dst_output); 166 dst_output);
165} 167}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0e550127fa7e..a8e5544da93e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -126,7 +126,7 @@ config NETFILTER_XT_TARGET_CONNMARK
126 tristate '"CONNMARK" target support' 126 tristate '"CONNMARK" target support'
127 depends on NETFILTER_XTABLES 127 depends on NETFILTER_XTABLES
128 depends on IP_NF_MANGLE || IP6_NF_MANGLE 128 depends on IP_NF_MANGLE || IP6_NF_MANGLE
129 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) 129 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
130 help 130 help
131 This option adds a `CONNMARK' target, which allows one to manipulate 131 This option adds a `CONNMARK' target, which allows one to manipulate
132 the connection mark value. Similar to the MARK target, but 132 the connection mark value. Similar to the MARK target, but
@@ -187,7 +187,7 @@ config NETFILTER_XT_MATCH_COMMENT
187config NETFILTER_XT_MATCH_CONNBYTES 187config NETFILTER_XT_MATCH_CONNBYTES
188 tristate '"connbytes" per-connection counter match support' 188 tristate '"connbytes" per-connection counter match support'
189 depends on NETFILTER_XTABLES 189 depends on NETFILTER_XTABLES
190 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT 190 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK)
191 help 191 help
192 This option adds a `connbytes' match, which allows you to match the 192 This option adds a `connbytes' match, which allows you to match the
193 number of bytes and/or packets for each direction within a connection. 193 number of bytes and/or packets for each direction within a connection.
@@ -198,7 +198,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
198config NETFILTER_XT_MATCH_CONNMARK 198config NETFILTER_XT_MATCH_CONNMARK
199 tristate '"connmark" connection mark match support' 199 tristate '"connmark" connection mark match support'
200 depends on NETFILTER_XTABLES 200 depends on NETFILTER_XTABLES
201 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK 201 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
202 help 202 help
203 This option adds a `connmark' match, which allows you to match the 203 This option adds a `connmark' match, which allows you to match the
204 connection mark value previously set for the session by `CONNMARK'. 204 connection mark value previously set for the session by `CONNMARK'.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0ce337a1d974..d622ddf08bb0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1556,6 +1556,8 @@ void nf_conntrack_cleanup(void)
1556{ 1556{
1557 int i; 1557 int i;
1558 1558
1559 ip_ct_attach = NULL;
1560
1559 /* This makes sure all current packets have passed through 1561 /* This makes sure all current packets have passed through
1560 netfilter framework. Roll on, two-stage module 1562 netfilter framework. Roll on, two-stage module
1561 delete... */ 1563 delete... */
@@ -1715,6 +1717,9 @@ int __init nf_conntrack_init(void)
1715 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; 1717 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
1716 write_unlock_bh(&nf_conntrack_lock); 1718 write_unlock_bh(&nf_conntrack_lock);
1717 1719
1720 /* For use by REJECT target */
1721 ip_ct_attach = __nf_conntrack_attach;
1722
1718 /* Set up fake conntrack: 1723 /* Set up fake conntrack:
1719 - to never be deleted, not in any hashes */ 1724 - to never be deleted, not in any hashes */
1720 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1725 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index df99138c3b3b..6492ed66fb3c 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -864,7 +864,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
864{ 864{
865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
866 skb->len - dataoff, IPPROTO_TCP, 866 skb->len - dataoff, IPPROTO_TCP,
867 skb->ip_summed == CHECKSUM_HW ? skb->csum 867 skb->ip_summed == CHECKSUM_HW
868 ? csum_sub(skb->csum,
869 skb_checksum(skb, 0, dataoff, 0))
868 : skb_checksum(skb, dataoff, skb->len - dataoff, 870 : skb_checksum(skb, dataoff, skb->len - dataoff,
869 0)); 871 0));
870} 872}
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 4264dd079a16..831d206344e0 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -161,7 +161,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
161{ 161{
162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
163 skb->len - dataoff, IPPROTO_UDP, 163 skb->len - dataoff, IPPROTO_UDP,
164 skb->ip_summed == CHECKSUM_HW ? skb->csum 164 skb->ip_summed == CHECKSUM_HW
165 ? csum_sub(skb->csum,
166 skb_checksum(skb, 0, dataoff, 0))
165 : skb_checksum(skb, dataoff, skb->len - dataoff, 167 : skb_checksum(skb, dataoff, skb->len - dataoff,
166 0)); 168 0));
167} 169}