aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-hotplug.txt22
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas23
-rw-r--r--Makefile9
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c30
-rw-r--r--arch/arm/mach-integrator/platsmp.c21
-rw-r--r--arch/arm/mach-iop3xx/iop321-setup.c1
-rw-r--r--arch/arm/mach-iop3xx/iop331-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c6
-rw-r--r--arch/arm/mach-realview/platsmp.c21
-rw-r--r--arch/arm/plat-omap/pm.c1
-rw-r--r--arch/i386/kernel/cpu/transmeta.c1
-rw-r--r--arch/ia64/kernel/acpi.c53
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c15
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/time.c39
-rw-r--r--arch/ia64/kernel/traps.c8
-rw-r--r--arch/ia64/sn/kernel/io_init.c97
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c25
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c35
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c22
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c19
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c7
-rw-r--r--arch/ia64/sn/kernel/tiocx.c4
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c8
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c20
-rw-r--r--arch/ia64/sn/pci/pci_dma.c16
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c29
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c14
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c9
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c69
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/x86_64/defconfig42
-rw-r--r--arch/x86_64/kernel/apic.c1
-rw-r--r--arch/x86_64/kernel/entry.S1
-rw-r--r--arch/x86_64/kernel/head.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c16
-rw-r--r--arch/x86_64/kernel/mpparse.c4
-rw-r--r--arch/x86_64/kernel/nmi.c19
-rw-r--r--arch/x86_64/kernel/time.c3
-rw-r--r--arch/x86_64/mm/k8topology.c2
-rw-r--r--arch/x86_64/mm/numa.c2
-rw-r--r--arch/x86_64/mm/srat.c5
-rw-r--r--drivers/acpi/resources/rscalc.c6
-rw-r--r--drivers/char/tpm/tpm_infineon.c48
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/pci/sgiioc4.c5
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c1
-rw-r--r--drivers/input/mouse/logips2pp.c1
-rw-r--r--drivers/input/mouse/trackpoint.c20
-rw-r--r--drivers/input/mouse/trackpoint.h4
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c147
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/message/fusion/mptbase.c115
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptctl.c241
-rw-r--r--drivers/message/fusion/mptctl.h4
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/mmci.c7
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/sis190.c4
-rw-r--r--drivers/net/skge.c10
-rw-r--r--drivers/net/sky2.c31
-rw-r--r--drivers/net/tokenring/smctr.h2
-rw-r--r--drivers/net/wireless/atmel.c98
-rw-r--r--drivers/net/wireless/wavelan_cs.c16
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c1
-rw-r--r--drivers/s390/net/lcs.c31
-rw-r--r--drivers/s390/net/lcs.h2
-rw-r--r--drivers/s390/net/qeth.h112
-rw-r--r--drivers/s390/net/qeth_eddp.c11
-rw-r--r--drivers/s390/net/qeth_main.c17
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c76
-rw-r--r--drivers/s390/scsi/zfcp_def.h13
-rw-r--r--drivers/s390/scsi/zfcp_erp.c82
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c80
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c4
-rw-r--r--drivers/scsi/3w-9xxx.c7
-rw-r--r--drivers/scsi/aacraid/aachba.c217
-rw-r--r--drivers/scsi/aacraid/aacraid.h18
-rw-r--r--drivers/scsi/aacraid/commctrl.c22
-rw-r--r--drivers/scsi/aacraid/comminit.c12
-rw-r--r--drivers/scsi/aacraid/commsup.c50
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c50
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/ipr.c49
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/iscsi_tcp.c78
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libata-core.c7
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c101
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h53
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c276
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h44
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c108
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c963
-rw-r--r--drivers/scsi/sata_mv.c1
-rw-r--r--drivers/scsi/sata_vsc.c30
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c260
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/video/gbefb.c2
-rw-r--r--drivers/video/neofb.c15
-rw-r--r--drivers/video/s3c2410fb.c1
-rw-r--r--fs/compat.c6
-rw-r--r--fs/ext2/xattr.c6
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/fuse/file.c11
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c12
-rw-r--r--fs/ocfs2/dlm/dlmlock.c25
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c7
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c42
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/select.c6
-rw-r--r--include/asm-alpha/mman.h8
-rw-r--r--include/asm-arm/mman.h31
-rw-r--r--include/asm-arm/smp.h5
-rw-r--r--include/asm-arm/unistd.h6
-rw-r--r--include/asm-arm26/mman.h31
-rw-r--r--include/asm-cris/mman.h31
-rw-r--r--include/asm-frv/mman.h31
-rw-r--r--include/asm-generic/mman.h42
-rw-r--r--include/asm-h8300/mman.h31
-rw-r--r--include/asm-i386/mman.h31
-rw-r--r--include/asm-i386/thread_info.h4
-rw-r--r--include/asm-ia64/acpi.h2
-rw-r--r--include/asm-ia64/machvec_sn2.h7
-rw-r--r--include/asm-ia64/mman.h31
-rw-r--r--include/asm-ia64/sn/arch.h2
-rw-r--r--include/asm-ia64/sn/bte.h6
-rw-r--r--include/asm-ia64/sn/pcibr_provider.h14
-rw-r--r--include/asm-ia64/sn/sn_feature_sets.h3
-rw-r--r--include/asm-ia64/sn/xpc.h31
-rw-r--r--include/asm-ia64/timex.h2
-rw-r--r--include/asm-m32r/mman.h33
-rw-r--r--include/asm-m68k/mman.h31
-rw-r--r--include/asm-mips/mman.h22
-rw-r--r--include/asm-parisc/mman.h8
-rw-r--r--include/asm-powerpc/mman.h32
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-s390/mman.h31
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--include/asm-sh/mman.h31
-rw-r--r--include/asm-sparc/mman.h31
-rw-r--r--include/asm-sparc64/mman.h31
-rw-r--r--include/asm-v850/mman.h30
-rw-r--r--include/asm-x86_64/mman.h30
-rw-r--r--include/asm-x86_64/proto.h1
-rw-r--r--include/asm-xtensa/mman.h22
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/ktime.h10
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netfilter.h21
-rw-r--r--include/linux/timex.h3
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/scsi/iscsi_if.h3
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h34
-rw-r--r--include/video/neomagic.h1
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/power/swsusp.c4
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/timer.c39
-rw-r--r--lib/radix-tree.c10
-rw-r--r--mm/memory.c10
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/page_alloc.c22
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c16
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/xfrm4_output.c13
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
216 files changed, 3716 insertions, 1986 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 08c5d04f3086..e71bc6cbbc5e 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -11,6 +11,8 @@
11 Joel Schopp <jschopp@austin.ibm.com> 11 Joel Schopp <jschopp@austin.ibm.com>
12 ia64/x86_64: 12 ia64/x86_64:
13 Ashok Raj <ashok.raj@intel.com> 13 Ashok Raj <ashok.raj@intel.com>
14 s390:
15 Heiko Carstens <heiko.carstens@de.ibm.com>
14 16
15Authors: Ashok Raj <ashok.raj@intel.com> 17Authors: Ashok Raj <ashok.raj@intel.com>
16Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>, 18Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>,
@@ -44,9 +46,23 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
44 maxcpus=2 will only boot 2. You can choose to bring the 46 maxcpus=2 will only boot 2. You can choose to bring the
45 other cpus later online, read FAQ's for more info. 47 other cpus later online, read FAQ's for more info.
46 48
47additional_cpus=n [x86_64 only] use this to limit hotpluggable cpus. 49additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus.
48 This option sets 50 This option sets
49 cpu_possible_map = cpu_present_map + additional_cpus 51 cpu_possible_map = cpu_present_map + additional_cpus
52
53ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
54to determine the number of potentially hot-pluggable cpus. The implementation
55should only rely on this to count the #of cpus, but *MUST* not rely on the
56apicid values in those tables for disabled apics. In the event BIOS doesnt
57mark such hot-pluggable cpus as disabled entries, one could use this
58parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
59
60
61possible_cpus=n [s390 only] use this to set hotpluggable cpus.
62 This option sets possible_cpus bits in
63 cpu_possible_map. Thus keeping the numbers of bits set
64 constant even if the machine gets rebooted.
65 This option overrides additional_cpus.
50 66
51CPU maps and such 67CPU maps and such
52----------------- 68-----------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 84370363da80..b874771385cd 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1133,6 +1133,8 @@ running once the system is up.
1133 Mechanism 1. 1133 Mechanism 1.
1134 conf2 [IA-32] Force use of PCI Configuration 1134 conf2 [IA-32] Force use of PCI Configuration
1135 Mechanism 2. 1135 Mechanism 2.
1136 nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI
1137 Configuration
1136 nosort [IA-32] Don't sort PCI devices according to 1138 nosort [IA-32] Don't sort PCI devices according to
1137 order given by the PCI BIOS. This sorting is 1139 order given by the PCI BIOS. This sorting is
1138 done to get a device order compatible with 1140 done to get a device order compatible with
@@ -1636,6 +1638,9 @@ running once the system is up.
1636 Format: 1638 Format:
1637 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 1639 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
1638 1640
1641 norandmaps Don't use address space randomization
1642 Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space
1643
1639 1644
1640______________________________________________________________________ 1645______________________________________________________________________
1641Changelog: 1646Changelog:
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index f8c16cbf56ba..2dafa63bd370 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,26 @@
11 Release Date : Wed Feb 03 14:31:44 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.04
33 Older Version : 00.00.02.04
4
5i. Support for 1078 type (ppc IOP) controller, device id : 0x60 added.
6 During initialization, depending on the device id, the template members
7 are initialized with function pointers specific to the ppc or
8 xscale controllers.
9
10 -Sumant Patro <Sumant.Patro@lsil.com>
11
121 Release Date : Fri Feb 03 14:16:25 PST 2006 - Sumant Patro
13 <Sumant.Patro@lsil.com>
142 Current Version : 00.00.02.04
153 Older Version : 00.00.02.02
16i. Register 16 byte CDB capability with scsi midlayer
17
18 "Ths patch properly registers the 16 byte command length capability of the
19 megaraid_sas controlled hardware with the scsi midlayer. All megaraid_sas
20 hardware supports 16 byte CDB's."
21
22 -Joshua Giles <joshua_giles@dell.com>
23
11 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com> 241 Release Date : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
22 Current Version : 00.00.02.02 252 Current Version : 00.00.02.02
33 Older Version : 00.00.02.01 263 Older Version : 00.00.02.01
diff --git a/Makefile b/Makefile
index 74d67b2c35d9..77a448c8e776 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 16 3SUBLEVEL = 16
4EXTRAVERSION =-rc3 4EXTRAVERSION =-rc4
5NAME=Sliding Snow Leopard 5NAME=Sliding Snow Leopard
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -106,13 +106,12 @@ KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd)
106$(if $(KBUILD_OUTPUT),, \ 106$(if $(KBUILD_OUTPUT),, \
107 $(error output directory "$(saved-output)" does not exist)) 107 $(error output directory "$(saved-output)" does not exist))
108 108
109.PHONY: $(MAKECMDGOALS) cdbuilddir 109.PHONY: $(MAKECMDGOALS)
110$(MAKECMDGOALS) _all: cdbuilddir
111 110
112cdbuilddir: 111$(filter-out _all,$(MAKECMDGOALS)) _all:
113 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ 112 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
114 KBUILD_SRC=$(CURDIR) \ 113 KBUILD_SRC=$(CURDIR) \
115 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $(MAKECMDGOALS) 114 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile $@
116 115
117# Leave processing to above invocation of make 116# Leave processing to above invocation of make
118skip-makefile := 1 117skip-makefile := 1
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 8c3035d5ffc9..3173924a9b60 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -111,7 +111,7 @@
111 CALL(sys_statfs) 111 CALL(sys_statfs)
112/* 100 */ CALL(sys_fstatfs) 112/* 100 */ CALL(sys_fstatfs)
113 CALL(sys_ni_syscall) 113 CALL(sys_ni_syscall)
114 CALL(OBSOLETE(sys_socketcall)) 114 CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall)))
115 CALL(sys_syslog) 115 CALL(sys_syslog)
116 CALL(sys_setitimer) 116 CALL(sys_setitimer)
117/* 105 */ CALL(sys_getitimer) 117/* 105 */ CALL(sys_getitimer)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c45d10d07bde..68273b4dc882 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -23,6 +23,7 @@
23#include <linux/root_dev.h> 23#include <linux/root_dev.h>
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/smp.h>
26 27
27#include <asm/cpu.h> 28#include <asm/cpu.h>
28#include <asm/elf.h> 29#include <asm/elf.h>
@@ -771,6 +772,10 @@ void __init setup_arch(char **cmdline_p)
771 paging_init(&meminfo, mdesc); 772 paging_init(&meminfo, mdesc);
772 request_standard_resources(&meminfo, mdesc); 773 request_standard_resources(&meminfo, mdesc);
773 774
775#ifdef CONFIG_SMP
776 smp_init_cpus();
777#endif
778
774 cpu_init(); 779 cpu_init();
775 780
776 /* 781 /*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7338948bd7d3..02aa300c4633 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -338,7 +338,6 @@ void __init smp_prepare_boot_cpu(void)
338 338
339 per_cpu(cpu_data, cpu).idle = current; 339 per_cpu(cpu_data, cpu).idle = current;
340 340
341 cpu_set(cpu, cpu_possible_map);
342 cpu_set(cpu, cpu_present_map); 341 cpu_set(cpu, cpu_present_map);
343 cpu_set(cpu, cpu_online_map); 342 cpu_set(cpu, cpu_online_map);
344} 343}
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index 9d4b76409c64..8e2f9bc3368b 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -64,6 +64,7 @@
64 * sys_connect: 64 * sys_connect:
65 * sys_sendmsg: 65 * sys_sendmsg:
66 * sys_sendto: 66 * sys_sendto:
67 * sys_socketcall:
67 * 68 *
68 * struct sockaddr_un loses its padding with EABI. Since the size of the 69 * struct sockaddr_un loses its padding with EABI. Since the size of the
69 * structure is used as a validation test in unix_mkname(), we need to 70 * structure is used as a validation test in unix_mkname(), we need to
@@ -78,6 +79,7 @@
78#include <linux/eventpoll.h> 79#include <linux/eventpoll.h>
79#include <linux/sem.h> 80#include <linux/sem.h>
80#include <linux/socket.h> 81#include <linux/socket.h>
82#include <linux/net.h>
81#include <asm/ipc.h> 83#include <asm/ipc.h>
82#include <asm/uaccess.h> 84#include <asm/uaccess.h>
83 85
@@ -408,3 +410,31 @@ asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned fla
408 return sys_sendmsg(fd, msg, flags); 410 return sys_sendmsg(fd, msg, flags);
409} 411}
410 412
413asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
414{
415 unsigned long r = -EFAULT, a[6];
416
417 switch (call) {
418 case SYS_BIND:
419 if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
420 r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]);
421 break;
422 case SYS_CONNECT:
423 if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
424 r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]);
425 break;
426 case SYS_SENDTO:
427 if (copy_from_user(a, args, 6 * sizeof(long)) == 0)
428 r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3],
429 (struct sockaddr __user *)a[4], a[5]);
430 break;
431 case SYS_SENDMSG:
432 if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
433 r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
434 break;
435 default:
436 r = sys_socketcall(call, args);
437 }
438
439 return r;
440}
diff --git a/arch/arm/mach-integrator/platsmp.c b/arch/arm/mach-integrator/platsmp.c
index ea10bd8c972c..1bc8534ef0c6 100644
--- a/arch/arm/mach-integrator/platsmp.c
+++ b/arch/arm/mach-integrator/platsmp.c
@@ -140,6 +140,18 @@ static void __init poke_milo(void)
140 mb(); 140 mb();
141} 141}
142 142
143/*
144 * Initialise the CPU possible map early - this describes the CPUs
145 * which may be present or become present in the system.
146 */
147void __init smp_init_cpus(void)
148{
149 unsigned int i, ncores = get_core_count();
150
151 for (i = 0; i < ncores; i++)
152 cpu_set(i, cpu_possible_map);
153}
154
143void __init smp_prepare_cpus(unsigned int max_cpus) 155void __init smp_prepare_cpus(unsigned int max_cpus)
144{ 156{
145 unsigned int ncores = get_core_count(); 157 unsigned int ncores = get_core_count();
@@ -176,14 +188,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
176 max_cpus = ncores; 188 max_cpus = ncores;
177 189
178 /* 190 /*
179 * Initialise the possible/present maps. 191 * Initialise the present map, which describes the set of CPUs
180 * cpu_possible_map describes the set of CPUs which may be present 192 * actually populated at the present time.
181 * cpu_present_map describes the set of CPUs populated
182 */ 193 */
183 for (i = 0; i < max_cpus; i++) { 194 for (i = 0; i < max_cpus; i++)
184 cpu_set(i, cpu_possible_map);
185 cpu_set(i, cpu_present_map); 195 cpu_set(i, cpu_present_map);
186 }
187 196
188 /* 197 /*
189 * Do we need any more CPUs? If so, then let them know where 198 * Do we need any more CPUs? If so, then let them know where
diff --git a/arch/arm/mach-iop3xx/iop321-setup.c b/arch/arm/mach-iop3xx/iop321-setup.c
index e4f4c52d93d4..0ebbcb20c6ae 100644
--- a/arch/arm/mach-iop3xx/iop321-setup.c
+++ b/arch/arm/mach-iop3xx/iop321-setup.c
@@ -13,7 +13,6 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/init.h>
17#include <linux/major.h> 16#include <linux/major.h>
18#include <linux/fs.h> 17#include <linux/fs.h>
19#include <linux/platform_device.h> 18#include <linux/platform_device.h>
diff --git a/arch/arm/mach-iop3xx/iop331-setup.c b/arch/arm/mach-iop3xx/iop331-setup.c
index 63585485123e..2d6abe5be14d 100644
--- a/arch/arm/mach-iop3xx/iop331-setup.c
+++ b/arch/arm/mach-iop3xx/iop331-setup.c
@@ -12,7 +12,6 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/init.h>
16#include <linux/major.h> 15#include <linux/major.h>
17#include <linux/fs.h> 16#include <linux/fs.h>
18#include <linux/platform_device.h> 17#include <linux/platform_device.h>
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index da9340a53434..f260a9d34f70 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -27,8 +27,6 @@ static struct flash_platform_data nslu2_flash_data = {
27}; 27};
28 28
29static struct resource nslu2_flash_resource = { 29static struct resource nslu2_flash_resource = {
30 .start = NSLU2_FLASH_BASE,
31 .end = NSLU2_FLASH_BASE + NSLU2_FLASH_SIZE,
32 .flags = IORESOURCE_MEM, 30 .flags = IORESOURCE_MEM,
33}; 31};
34 32
@@ -116,6 +114,10 @@ static void __init nslu2_init(void)
116{ 114{
117 ixp4xx_sys_init(); 115 ixp4xx_sys_init();
118 116
117 nslu2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
118 nslu2_flash_resource.end =
119 IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
120
119 pm_power_off = nslu2_power_off; 121 pm_power_off = nslu2_power_off;
120 122
121 platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices)); 123 platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index a8fbd76d8be5..b8484e15dacb 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -143,6 +143,18 @@ static void __init poke_milo(void)
143 mb(); 143 mb();
144} 144}
145 145
146/*
147 * Initialise the CPU possible map early - this describes the CPUs
148 * which may be present or become present in the system.
149 */
150void __init smp_init_cpus(void)
151{
152 unsigned int i, ncores = get_core_count();
153
154 for (i = 0; i < ncores; i++)
155 cpu_set(i, cpu_possible_map);
156}
157
146void __init smp_prepare_cpus(unsigned int max_cpus) 158void __init smp_prepare_cpus(unsigned int max_cpus)
147{ 159{
148 unsigned int ncores = get_core_count(); 160 unsigned int ncores = get_core_count();
@@ -179,14 +191,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
179 local_timer_setup(cpu); 191 local_timer_setup(cpu);
180 192
181 /* 193 /*
182 * Initialise the possible/present maps. 194 * Initialise the present map, which describes the set of CPUs
183 * cpu_possible_map describes the set of CPUs which may be present 195 * actually populated at the present time.
184 * cpu_present_map describes the set of CPUs populated
185 */ 196 */
186 for (i = 0; i < max_cpus; i++) { 197 for (i = 0; i < max_cpus; i++)
187 cpu_set(i, cpu_possible_map);
188 cpu_set(i, cpu_present_map); 198 cpu_set(i, cpu_present_map);
189 }
190 199
191 /* 200 /*
192 * Do we need any more CPUs? If so, then let them know where 201 * Do we need any more CPUs? If so, then let them know where
diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c
index 1a24e2c10714..093efd786f21 100644
--- a/arch/arm/plat-omap/pm.c
+++ b/arch/arm/plat-omap/pm.c
@@ -38,7 +38,6 @@
38#include <linux/pm.h> 38#include <linux/pm.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/pm.h>
42#include <linux/interrupt.h> 41#include <linux/interrupt.h>
43 42
44#include <asm/io.h> 43#include <asm/io.h>
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index bdbeb77f4e22..7214c9b577ab 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -1,4 +1,5 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/mm.h>
2#include <linux/init.h> 3#include <linux/init.h>
3#include <asm/processor.h> 4#include <asm/processor.h>
4#include <asm/msr.h> 5#include <asm/msr.h>
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c419cf8..ecd44bdc8394 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -761,6 +761,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
761 return (0); 761 return (0);
762} 762}
763 763
764int additional_cpus __initdata = -1;
765
766static __init int setup_additional_cpus(char *s)
767{
768 if (s)
769 additional_cpus = simple_strtol(s, NULL, 0);
770
771 return 0;
772}
773
774early_param("additional_cpus", setup_additional_cpus);
775
776/*
777 * cpu_possible_map should be static, it cannot change as cpu's
778 * are onlined, or offlined. The reason is per-cpu data-structures
779 * are allocated by some modules at init time, and dont expect to
780 * do this dynamically on cpu arrival/departure.
781 * cpu_present_map on the other hand can change dynamically.
782 * In case when cpu_hotplug is not compiled, then we resort to current
783 * behaviour, which is cpu_possible == cpu_present.
784 * - Ashok Raj
785 *
786 * Three ways to find out the number of additional hotplug CPUs:
787 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
788 * - The user can overwrite it with additional_cpus=NUM
789 * - Otherwise don't reserve additional CPUs.
790 */
791__init void prefill_possible_map(void)
792{
793 int i;
794 int possible, disabled_cpus;
795
796 disabled_cpus = total_cpus - available_cpus;
797
798 if (additional_cpus == -1) {
799 if (disabled_cpus > 0)
800 additional_cpus = disabled_cpus;
801 else
802 additional_cpus = 0;
803 }
804
805 possible = available_cpus + additional_cpus;
806
807 if (possible > NR_CPUS)
808 possible = NR_CPUS;
809
810 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
811 possible, max((possible - available_cpus), 0));
812
813 for (i = 0; i < possible; i++)
814 cpu_set(i, cpu_possible_map);
815}
816
764int acpi_map_lsapic(acpi_handle handle, int *pcpu) 817int acpi_map_lsapic(acpi_handle handle, int *pcpu)
765{ 818{
766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 819 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 27b222c277e4..930fdfca6ddb 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall)
569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value 571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
572.ret3: br.cond.sptk .work_pending_syscall_end 572.ret3:
573(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
574 br.cond.sptk .work_pending_syscall_end
573 575
574strace_error: 576strace_error:
575 ld8 r3=[r2] // load pt_regs.r8 577 ld8 r3=[r2] // load pt_regs.r8
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index e72de580ebbf..bbcfd08378a6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -10,23 +10,8 @@
10 10
11#include <linux/string.h> 11#include <linux/string.h>
12EXPORT_SYMBOL(memset); 12EXPORT_SYMBOL(memset);
13EXPORT_SYMBOL(memchr);
14EXPORT_SYMBOL(memcmp);
15EXPORT_SYMBOL(memcpy); 13EXPORT_SYMBOL(memcpy);
16EXPORT_SYMBOL(memmove);
17EXPORT_SYMBOL(memscan);
18EXPORT_SYMBOL(strcat);
19EXPORT_SYMBOL(strchr);
20EXPORT_SYMBOL(strcmp);
21EXPORT_SYMBOL(strcpy);
22EXPORT_SYMBOL(strlen); 14EXPORT_SYMBOL(strlen);
23EXPORT_SYMBOL(strncat);
24EXPORT_SYMBOL(strncmp);
25EXPORT_SYMBOL(strncpy);
26EXPORT_SYMBOL(strnlen);
27EXPORT_SYMBOL(strrchr);
28EXPORT_SYMBOL(strstr);
29EXPORT_SYMBOL(strpbrk);
30 15
31#include <asm/checksum.h> 16#include <asm/checksum.h>
32EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 35f7835294a3..3258e09278d0 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -430,6 +430,7 @@ setup_arch (char **cmdline_p)
430 if (early_console_setup(*cmdline_p) == 0) 430 if (early_console_setup(*cmdline_p) == 0)
431 mark_bsp_online(); 431 mark_bsp_online();
432 432
433 parse_early_param();
433#ifdef CONFIG_ACPI 434#ifdef CONFIG_ACPI
434 /* Initialize the ACPI boot-time table parser */ 435 /* Initialize the ACPI boot-time table parser */
435 acpi_table_init(); 436 acpi_table_init();
@@ -688,6 +689,9 @@ void
688setup_per_cpu_areas (void) 689setup_per_cpu_areas (void)
689{ 690{
690 /* start_kernel() requires this... */ 691 /* start_kernel() requires this... */
692#ifdef CONFIG_ACPI_HOTPLUG_CPU
693 prefill_possible_map();
694#endif
691} 695}
692 696
693/* 697/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d2df66..b681ef34a86e 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state);
129/* Bitmasks of currently online, and possible CPUs */ 129/* Bitmasks of currently online, and possible CPUs */
130cpumask_t cpu_online_map; 130cpumask_t cpu_online_map;
131EXPORT_SYMBOL(cpu_online_map); 131EXPORT_SYMBOL(cpu_online_map);
132cpumask_t cpu_possible_map; 132cpumask_t cpu_possible_map = CPU_MASK_NONE;
133EXPORT_SYMBOL(cpu_possible_map); 133EXPORT_SYMBOL(cpu_possible_map);
134 134
135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -506,9 +506,6 @@ smp_build_cpu_map (void)
506 506
507 for (cpu = 0; cpu < NR_CPUS; cpu++) { 507 for (cpu = 0; cpu < NR_CPUS; cpu++) {
508 ia64_cpu_to_sapicid[cpu] = -1; 508 ia64_cpu_to_sapicid[cpu] = -1;
509#ifdef CONFIG_HOTPLUG_CPU
510 cpu_set(cpu, cpu_possible_map);
511#endif
512 } 509 }
513 510
514 ia64_cpu_to_sapicid[0] = boot_cpu_id; 511 ia64_cpu_to_sapicid[0] = boot_cpu_id;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a094ec49ccfa..307d01e15b2e 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -250,32 +250,27 @@ time_init (void)
250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
251} 251}
252 252
253#define SMALLUSECS 100 253/*
254 254 * Generic udelay assumes that if preemption is allowed and the thread
255void 255 * migrates to another CPU, that the ITC values are synchronized across
256udelay (unsigned long usecs) 256 * all CPUs.
257 */
258static void
259ia64_itc_udelay (unsigned long usecs)
257{ 260{
258 unsigned long start; 261 unsigned long start = ia64_get_itc();
259 unsigned long cycles; 262 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
260 unsigned long smallusecs;
261 263
262 /* 264 while (time_before(ia64_get_itc(), end))
263 * Execute the non-preemptible delay loop (because the ITC might 265 cpu_relax();
264 * not be synchronized between CPUS) in relatively short time 266}
265 * chunks, allowing preemption between the chunks.
266 */
267 while (usecs > 0) {
268 smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
269 preempt_disable();
270 cycles = smallusecs*local_cpu_data->cyc_per_usec;
271 start = ia64_get_itc();
272 267
273 while (ia64_get_itc() - start < cycles) 268void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
274 cpu_relax();
275 269
276 preempt_enable(); 270void
277 usecs -= smallusecs; 271udelay (unsigned long usecs)
278 } 272{
273 (*ia64_udelay)(usecs);
279} 274}
280EXPORT_SYMBOL(udelay); 275EXPORT_SYMBOL(udelay);
281 276
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 55391901b013..dabd6c32641e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h> 18#include <linux/kprobes.h>
19#include <linux/delay.h> /* for ssleep() */
19 20
20#include <asm/fpswa.h> 21#include <asm/fpswa.h>
21#include <asm/ia32.h> 22#include <asm/ia32.h>
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err)
116 bust_spinlocks(0); 117 bust_spinlocks(0);
117 die.lock_owner = -1; 118 die.lock_owner = -1;
118 spin_unlock_irq(&die.lock); 119 spin_unlock_irq(&die.lock);
120
121 if (panic_on_oops) {
122 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
123 ssleep(5);
124 panic("Fatal exception");
125 }
126
119 do_exit(SIGSEGV); 127 do_exit(SIGSEGV);
120} 128}
121 129
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 3437c2390429..3edef0d32f86 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -23,6 +23,10 @@
23#include "xtalk/hubdev.h" 23#include "xtalk/hubdev.h"
24#include "xtalk/xwidgetdev.h" 24#include "xtalk/xwidgetdev.h"
25 25
26
27extern void sn_init_cpei_timer(void);
28extern void register_sn_procfs(void);
29
26static struct list_head sn_sysdata_list; 30static struct list_head sn_sysdata_list;
27 31
28/* sysdata list struct */ 32/* sysdata list struct */
@@ -40,12 +44,12 @@ struct brick {
40 struct slab_info slab_info[MAX_SLABS + 1]; 44 struct slab_info slab_info[MAX_SLABS + 1];
41}; 45};
42 46
43int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ 47int sn_ioif_inited; /* SN I/O infrastructure initialized? */
44 48
45struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 49struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
46 50
47static int max_segment_number = 0; /* Default highest segment number */ 51static int max_segment_number; /* Default highest segment number */
48static int max_pcibus_number = 255; /* Default highest pci bus number */ 52static int max_pcibus_number = 255; /* Default highest pci bus number */
49 53
50/* 54/*
51 * Hooks and struct for unsupported pci providers 55 * Hooks and struct for unsupported pci providers
@@ -84,7 +88,6 @@ static inline u64
84sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, 88sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
85 u64 address) 89 u64 address)
86{ 90{
87
88 struct ia64_sal_retval ret_stuff; 91 struct ia64_sal_retval ret_stuff;
89 ret_stuff.status = 0; 92 ret_stuff.status = 0;
90 ret_stuff.v0 = 0; 93 ret_stuff.v0 = 0;
@@ -94,7 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
94 (u64) nasid, (u64) widget_num, 97 (u64) nasid, (u64) widget_num,
95 (u64) device_num, (u64) address, 0, 0, 0); 98 (u64) device_num, (u64) address, 0, 0, 0);
96 return ret_stuff.status; 99 return ret_stuff.status;
97
98} 100}
99 101
100/* 102/*
@@ -102,7 +104,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
102 */ 104 */
103static inline u64 sal_get_hubdev_info(u64 handle, u64 address) 105static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
104{ 106{
105
106 struct ia64_sal_retval ret_stuff; 107 struct ia64_sal_retval ret_stuff;
107 ret_stuff.status = 0; 108 ret_stuff.status = 0;
108 ret_stuff.v0 = 0; 109 ret_stuff.v0 = 0;
@@ -118,7 +119,6 @@ static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
118 */ 119 */
119static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) 120static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
120{ 121{
121
122 struct ia64_sal_retval ret_stuff; 122 struct ia64_sal_retval ret_stuff;
123 ret_stuff.status = 0; 123 ret_stuff.status = 0;
124 ret_stuff.v0 = 0; 124 ret_stuff.v0 = 0;
@@ -215,7 +215,7 @@ static void __init sn_fixup_ionodes(void)
215 struct hubdev_info *hubdev; 215 struct hubdev_info *hubdev;
216 u64 status; 216 u64 status;
217 u64 nasid; 217 u64 nasid;
218 int i, widget, device; 218 int i, widget, device, size;
219 219
220 /* 220 /*
221 * Get SGI Specific HUB chipset information. 221 * Get SGI Specific HUB chipset information.
@@ -251,48 +251,37 @@ static void __init sn_fixup_ionodes(void)
251 if (!hubdev->hdi_flush_nasid_list.widget_p) 251 if (!hubdev->hdi_flush_nasid_list.widget_p)
252 continue; 252 continue;
253 253
254 size = (HUB_WIDGET_ID_MAX + 1) *
255 sizeof(struct sn_flush_device_kernel *);
254 hubdev->hdi_flush_nasid_list.widget_p = 256 hubdev->hdi_flush_nasid_list.widget_p =
255 kmalloc((HUB_WIDGET_ID_MAX + 1) * 257 kzalloc(size, GFP_KERNEL);
256 sizeof(struct sn_flush_device_kernel *), 258 if (!hubdev->hdi_flush_nasid_list.widget_p)
257 GFP_KERNEL); 259 BUG();
258 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
259 (HUB_WIDGET_ID_MAX + 1) *
260 sizeof(struct sn_flush_device_kernel *));
261 260
262 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 261 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
263 sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET * 262 size = DEV_PER_WIDGET *
264 sizeof(struct 263 sizeof(struct sn_flush_device_kernel);
265 sn_flush_device_kernel), 264 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
266 GFP_KERNEL);
267 if (!sn_flush_device_kernel) 265 if (!sn_flush_device_kernel)
268 BUG(); 266 BUG();
269 memset(sn_flush_device_kernel, 0x0,
270 DEV_PER_WIDGET *
271 sizeof(struct sn_flush_device_kernel));
272 267
273 dev_entry = sn_flush_device_kernel; 268 dev_entry = sn_flush_device_kernel;
274 for (device = 0; device < DEV_PER_WIDGET; 269 for (device = 0; device < DEV_PER_WIDGET;
275 device++,dev_entry++) { 270 device++,dev_entry++) {
276 dev_entry->common = kmalloc(sizeof(struct 271 size = sizeof(struct sn_flush_device_common);
277 sn_flush_device_common), 272 dev_entry->common = kzalloc(size, GFP_KERNEL);
278 GFP_KERNEL);
279 if (!dev_entry->common) 273 if (!dev_entry->common)
280 BUG(); 274 BUG();
281 memset(dev_entry->common, 0x0, sizeof(struct
282 sn_flush_device_common));
283 275
284 if (sn_prom_feature_available( 276 if (sn_prom_feature_available(
285 PRF_DEVICE_FLUSH_LIST)) 277 PRF_DEVICE_FLUSH_LIST))
286 status = sal_get_device_dmaflush_list( 278 status = sal_get_device_dmaflush_list(
287 nasid, 279 nasid, widget, device,
288 widget, 280 (u64)(dev_entry->common));
289 device,
290 (u64)(dev_entry->common));
291 else 281 else
292 status = sn_device_fixup_war(nasid, 282 status = sn_device_fixup_war(nasid,
293 widget, 283 widget, device,
294 device, 284 dev_entry->common);
295 dev_entry->common);
296 if (status != SALRET_OK) 285 if (status != SALRET_OK)
297 panic("SAL call failed: %s\n", 286 panic("SAL call failed: %s\n",
298 ia64_sal_strerror(status)); 287 ia64_sal_strerror(status));
@@ -383,13 +372,12 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
383 372
384 pci_dev_get(dev); /* for the sysdata pointer */ 373 pci_dev_get(dev); /* for the sysdata pointer */
385 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); 374 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
386 if (pcidev_info <= 0) 375 if (!pcidev_info)
387 BUG(); /* Cannot afford to run out of memory */ 376 BUG(); /* Cannot afford to run out of memory */
388 377
389 sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 378 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
390 if (sn_irq_info <= 0) 379 if (!sn_irq_info)
391 BUG(); /* Cannot afford to run out of memory */ 380 BUG(); /* Cannot afford to run out of memory */
392 memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
393 381
394 /* Call to retrieve pci device information needed by kernel. */ 382 /* Call to retrieve pci device information needed by kernel. */
395 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 383 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
@@ -482,13 +470,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
482 */ 470 */
483void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) 471void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
484{ 472{
485 int status = 0; 473 int status;
486 int nasid, cnode; 474 int nasid, cnode;
487 struct pci_controller *controller; 475 struct pci_controller *controller;
488 struct sn_pci_controller *sn_controller; 476 struct sn_pci_controller *sn_controller;
489 struct pcibus_bussoft *prom_bussoft_ptr; 477 struct pcibus_bussoft *prom_bussoft_ptr;
490 struct hubdev_info *hubdev_info; 478 struct hubdev_info *hubdev_info;
491 void *provider_soft = NULL; 479 void *provider_soft;
492 struct sn_pcibus_provider *provider; 480 struct sn_pcibus_provider *provider;
493 481
494 status = sal_get_pcibus_info((u64) segment, (u64) busnum, 482 status = sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -535,6 +523,8 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
535 bus->sysdata = controller; 523 bus->sysdata = controller;
536 if (provider->bus_fixup) 524 if (provider->bus_fixup)
537 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); 525 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
526 else
527 provider_soft = NULL;
538 528
539 if (provider_soft == NULL) { 529 if (provider_soft == NULL) {
540 /* fixup failed or not applicable */ 530 /* fixup failed or not applicable */
@@ -638,13 +628,8 @@ void sn_bus_free_sysdata(void)
638 628
639static int __init sn_pci_init(void) 629static int __init sn_pci_init(void)
640{ 630{
641 int i = 0; 631 int i, j;
642 int j = 0;
643 struct pci_dev *pci_dev = NULL; 632 struct pci_dev *pci_dev = NULL;
644 extern void sn_init_cpei_timer(void);
645#ifdef CONFIG_PROC_FS
646 extern void register_sn_procfs(void);
647#endif
648 633
649 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 634 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
650 return 0; 635 return 0;
@@ -700,32 +685,29 @@ static int __init sn_pci_init(void)
700 */ 685 */
701void hubdev_init_node(nodepda_t * npda, cnodeid_t node) 686void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
702{ 687{
703
704 struct hubdev_info *hubdev_info; 688 struct hubdev_info *hubdev_info;
689 int size;
690 pg_data_t *pg;
691
692 size = sizeof(struct hubdev_info);
705 693
706 if (node >= num_online_nodes()) /* Headless/memless IO nodes */ 694 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
707 hubdev_info = 695 pg = NODE_DATA(0);
708 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
709 sizeof(struct
710 hubdev_info));
711 else 696 else
712 hubdev_info = 697 pg = NODE_DATA(node);
713 (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
714 sizeof(struct
715 hubdev_info));
716 npda->pdinfo = (void *)hubdev_info;
717 698
699 hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
700
701 npda->pdinfo = (void *)hubdev_info;
718} 702}
719 703
720geoid_t 704geoid_t
721cnodeid_get_geoid(cnodeid_t cnode) 705cnodeid_get_geoid(cnodeid_t cnode)
722{ 706{
723
724 struct hubdev_info *hubdev; 707 struct hubdev_info *hubdev;
725 708
726 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); 709 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
727 return hubdev->hdi_geoid; 710 return hubdev->hdi_geoid;
728
729} 711}
730 712
731subsys_initcall(sn_pci_init); 713subsys_initcall(sn_pci_init);
@@ -734,3 +716,4 @@ EXPORT_SYMBOL(sn_pci_unfixup_slot);
734EXPORT_SYMBOL(sn_pci_controller_fixup); 716EXPORT_SYMBOL(sn_pci_controller_fixup);
735EXPORT_SYMBOL(sn_bus_store_sysdata); 717EXPORT_SYMBOL(sn_bus_store_sysdata);
736EXPORT_SYMBOL(sn_bus_free_sysdata); 718EXPORT_SYMBOL(sn_bus_free_sysdata);
719EXPORT_SYMBOL(sn_pcidev_info_get);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 48645ac120fc..5b84836c2171 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
75DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 75DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
76EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 76EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
77 77
78DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); 78DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
79EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 79EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
80 80
81DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 81DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
@@ -317,6 +317,7 @@ struct pcdp_vga_device {
317#define PCDP_PCI_TRANS_IOPORT 0x02 317#define PCDP_PCI_TRANS_IOPORT 0x02
318#define PCDP_PCI_TRANS_MMIO 0x01 318#define PCDP_PCI_TRANS_MMIO 0x01
319 319
320#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
320static void 321static void
321sn_scan_pcdp(void) 322sn_scan_pcdp(void)
322{ 323{
@@ -358,6 +359,7 @@ sn_scan_pcdp(void)
358 break; /* once we find the primary, we're done */ 359 break; /* once we find the primary, we're done */
359 } 360 }
360} 361}
362#endif
361 363
362static unsigned long sn2_rtc_initial; 364static unsigned long sn2_rtc_initial;
363 365
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 81c63b2f8ae9..6ae276d5d50c 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved.
7 * 7 *
8 * Module to export the system's Firmware Interface Tables, including 8 * Module to export the system's Firmware Interface Tables, including
9 * PROM revision numbers and banners, in /proc 9 * PROM revision numbers and banners, in /proc
@@ -190,7 +190,7 @@ static int
190read_version_entry(char *page, char **start, off_t off, int count, int *eof, 190read_version_entry(char *page, char **start, off_t off, int count, int *eof,
191 void *data) 191 void *data)
192{ 192{
193 int len = 0; 193 int len;
194 194
195 /* data holds the NASID of the node */ 195 /* data holds the NASID of the node */
196 len = dump_version(page, (unsigned long)data); 196 len = dump_version(page, (unsigned long)data);
@@ -202,7 +202,7 @@ static int
202read_fit_entry(char *page, char **start, off_t off, int count, int *eof, 202read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
203 void *data) 203 void *data)
204{ 204{
205 int len = 0; 205 int len;
206 206
207 /* data holds the NASID of the node */ 207 /* data holds the NASID of the node */
208 len = dump_fit(page, (unsigned long)data); 208 len = dump_fit(page, (unsigned long)data);
@@ -229,13 +229,16 @@ int __init prominfo_init(void)
229 struct proc_dir_entry *p; 229 struct proc_dir_entry *p;
230 cnodeid_t cnodeid; 230 cnodeid_t cnodeid;
231 unsigned long nasid; 231 unsigned long nasid;
232 int size;
232 char name[NODE_NAME_LEN]; 233 char name[NODE_NAME_LEN];
233 234
234 if (!ia64_platform_is("sn2")) 235 if (!ia64_platform_is("sn2"))
235 return 0; 236 return 0;
236 237
237 proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *), 238 size = num_online_nodes() * sizeof(struct proc_dir_entry *);
238 GFP_KERNEL); 239 proc_entries = kzalloc(size, GFP_KERNEL);
240 if (!proc_entries)
241 return -ENOMEM;
239 242
240 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); 243 sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
241 244
@@ -244,14 +247,12 @@ int __init prominfo_init(void)
244 sprintf(name, "node%d", cnodeid); 247 sprintf(name, "node%d", cnodeid);
245 *entp = proc_mkdir(name, sgi_prominfo_entry); 248 *entp = proc_mkdir(name, sgi_prominfo_entry);
246 nasid = cnodeid_to_nasid(cnodeid); 249 nasid = cnodeid_to_nasid(cnodeid);
247 p = create_proc_read_entry( 250 p = create_proc_read_entry("fit", 0, *entp, read_fit_entry,
248 "fit", 0, *entp, read_fit_entry, 251 (void *)nasid);
249 (void *)nasid);
250 if (p) 252 if (p)
251 p->owner = THIS_MODULE; 253 p->owner = THIS_MODULE;
252 p = create_proc_read_entry( 254 p = create_proc_read_entry("version", 0, *entp,
253 "version", 0, *entp, read_version_entry, 255 read_version_entry, (void *)nasid);
254 (void *)nasid);
255 if (p) 256 if (p)
256 p->owner = THIS_MODULE; 257 p->owner = THIS_MODULE;
257 entp++; 258 entp++;
@@ -263,7 +264,7 @@ int __init prominfo_init(void)
263void __exit prominfo_exit(void) 264void __exit prominfo_exit(void)
264{ 265{
265 struct proc_dir_entry **entp; 266 struct proc_dir_entry **entp;
266 unsigned cnodeid; 267 unsigned int cnodeid;
267 char name[NODE_NAME_LEN]; 268 char name[NODE_NAME_LEN];
268 269
269 entp = proc_entries; 270 entp = proc_entries;
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index f153a4c35c70..24eefb2fc55f 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -46,8 +46,14 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats);
46 46
47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); 47static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
48 48
49void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, 49extern unsigned long
50 volatile unsigned long *, unsigned long); 50sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
51 volatile unsigned long *, unsigned long,
52 volatile unsigned long *, unsigned long);
53void
54sn2_ptc_deadlock_recovery(short *, short, short, int,
55 volatile unsigned long *, unsigned long,
56 volatile unsigned long *, unsigned long);
51 57
52/* 58/*
53 * Note: some is the following is captured here to make degugging easier 59 * Note: some is the following is captured here to make degugging easier
@@ -59,16 +65,6 @@ void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned lon
59#define reset_max_active_on_deadlock() 1 65#define reset_max_active_on_deadlock() 1
60#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) 66#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
61 67
62static inline void ptc_lock(int sh1, unsigned long *flagp)
63{
64 spin_lock_irqsave(PTC_LOCK(sh1), *flagp);
65}
66
67static inline void ptc_unlock(int sh1, unsigned long flags)
68{
69 spin_unlock_irqrestore(PTC_LOCK(sh1), flags);
70}
71
72struct ptc_stats { 68struct ptc_stats {
73 unsigned long ptc_l; 69 unsigned long ptc_l;
74 unsigned long change_rid; 70 unsigned long change_rid;
@@ -82,6 +78,8 @@ struct ptc_stats {
82 unsigned long shub_ptc_flushes_not_my_mm; 78 unsigned long shub_ptc_flushes_not_my_mm;
83}; 79};
84 80
81#define sn2_ptctest 0
82
85static inline unsigned long wait_piowc(void) 83static inline unsigned long wait_piowc(void)
86{ 84{
87 volatile unsigned long *piows; 85 volatile unsigned long *piows;
@@ -200,7 +198,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
200 max_active = max_active_pio(shub1); 198 max_active = max_active_pio(shub1);
201 199
202 itc = ia64_get_itc(); 200 itc = ia64_get_itc();
203 ptc_lock(shub1, &flags); 201 spin_lock_irqsave(PTC_LOCK(shub1), flags);
204 itc2 = ia64_get_itc(); 202 itc2 = ia64_get_itc();
205 203
206 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 204 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
@@ -258,7 +256,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
258 ia64_srlz_d(); 256 ia64_srlz_d();
259 } 257 }
260 258
261 ptc_unlock(shub1, flags); 259 spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
262 260
263 preempt_enable(); 261 preempt_enable();
264} 262}
@@ -270,11 +268,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
270 * TLB flush transaction. The recovery sequence is somewhat tricky & is 268 * TLB flush transaction. The recovery sequence is somewhat tricky & is
271 * coded in assembly language. 269 * coded in assembly language.
272 */ 270 */
273void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, 271
274 volatile unsigned long *ptc1, unsigned long data1) 272void
273sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
274 volatile unsigned long *ptc0, unsigned long data0,
275 volatile unsigned long *ptc1, unsigned long data1)
275{ 276{
276 extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
277 volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
278 short nasid, i; 277 short nasid, i;
279 unsigned long *piows, zeroval, n; 278 unsigned long *piows, zeroval, n;
280 279
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index a06719d752a0..c686d9c12f7b 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -6,11 +6,11 @@
6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#include <linux/config.h> 8#include <linux/config.h>
9#include <asm/uaccess.h>
10 9
11#ifdef CONFIG_PROC_FS 10#ifdef CONFIG_PROC_FS
12#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
13#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <asm/uaccess.h>
14#include <asm/sn/sn_sal.h> 14#include <asm/sn/sn_sal.h>
15 15
16static int partition_id_show(struct seq_file *s, void *p) 16static int partition_id_show(struct seq_file *s, void *p)
@@ -90,10 +90,10 @@ static int coherence_id_open(struct inode *inode, struct file *file)
90 return single_open(file, coherence_id_show, NULL); 90 return single_open(file, coherence_id_show, NULL);
91} 91}
92 92
93static struct proc_dir_entry *sn_procfs_create_entry( 93static struct proc_dir_entry
94 const char *name, struct proc_dir_entry *parent, 94*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent,
95 int (*openfunc)(struct inode *, struct file *), 95 int (*openfunc)(struct inode *, struct file *),
96 int (*releasefunc)(struct inode *, struct file *)) 96 int (*releasefunc)(struct inode *, struct file *))
97{ 97{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); 98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99 99
@@ -126,24 +126,24 @@ void register_sn_procfs(void)
126 return; 126 return;
127 127
128 sn_procfs_create_entry("partition_id", sgi_proc_dir, 128 sn_procfs_create_entry("partition_id", sgi_proc_dir,
129 partition_id_open, single_release); 129 partition_id_open, single_release);
130 130
131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir, 131 sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
132 system_serial_number_open, single_release); 132 system_serial_number_open, single_release);
133 133
134 sn_procfs_create_entry("licenseID", sgi_proc_dir, 134 sn_procfs_create_entry("licenseID", sgi_proc_dir,
135 licenseID_open, single_release); 135 licenseID_open, single_release);
136 136
137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 137 e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
138 sn_force_interrupt_open, single_release); 138 sn_force_interrupt_open, single_release);
139 if (e) 139 if (e)
140 e->proc_fops->write = sn_force_interrupt_write_proc; 140 e->proc_fops->write = sn_force_interrupt_write_proc;
141 141
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir, 142 sn_procfs_create_entry("coherence_id", sgi_proc_dir,
143 coherence_id_open, single_release); 143 coherence_id_open, single_release);
144 144
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir, 145 sn_procfs_create_entry("sn_topology", sgi_proc_dir,
146 sn_topology_open, sn_topology_release); 146 sn_topology_open, sn_topology_release);
147} 147}
148 148
149#endif /* CONFIG_PROC_FS */ 149#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index deb9baf4d473..56a88b6df4b4 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/timex.h>
17 18
18#include <asm/sn/leds.h> 19#include <asm/sn/leds.h>
19#include <asm/sn/shub_mmr.h> 20#include <asm/sn/shub_mmr.h>
@@ -28,9 +29,27 @@ static struct time_interpolator sn2_interpolator = {
28 .source = TIME_SOURCE_MMIO64 29 .source = TIME_SOURCE_MMIO64
29}; 30};
30 31
32/*
33 * sn udelay uses the RTC instead of the ITC because the ITC is not
34 * synchronized across all CPUs, and the thread may migrate to another CPU
35 * if preemption is enabled.
36 */
37static void
38ia64_sn_udelay (unsigned long usecs)
39{
40 unsigned long start = rtc_time();
41 unsigned long end = start +
42 usecs * sn_rtc_cycles_per_second / 1000000;
43
44 while (time_before((unsigned long)rtc_time(), end))
45 cpu_relax();
46}
47
31void __init sn_timer_init(void) 48void __init sn_timer_init(void)
32{ 49{
33 sn2_interpolator.frequency = sn_rtc_cycles_per_second; 50 sn2_interpolator.frequency = sn_rtc_cycles_per_second;
34 sn2_interpolator.addr = RTC_COUNTER_ADDR; 51 sn2_interpolator.addr = RTC_COUNTER_ADDR;
35 register_time_interpolator(&sn2_interpolator); 52 register_time_interpolator(&sn2_interpolator);
53
54 ia64_udelay = &ia64_sn_udelay;
36} 55}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
index adf5db2e2afe..fa7f69945917 100644
--- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * 2 *
3 * 3 *
4 * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. 4 * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -22,11 +22,6 @@
22 * License along with this program; if not, write the Free Software 22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 * 24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see: 25 * For further information regarding this notice, see:
31 * 26 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan 27 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index d263d3e8fbb9..8a56f8b5ffa2 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -284,12 +284,10 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
284 if ((nasid & 1) == 0) 284 if ((nasid & 1) == 0)
285 return NULL; 285 return NULL;
286 286
287 sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL); 287 sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL);
288 if (sn_irq_info == NULL) 288 if (sn_irq_info == NULL)
289 return NULL; 289 return NULL;
290 290
291 memset(sn_irq_info, 0x0, sn_irq_size);
292
293 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, 291 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
294 req_nasid, slice); 292 req_nasid, slice);
295 if (status) { 293 if (status) {
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 36e5437a0fb6..cdf6856ce089 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -738,7 +738,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
738 738
739 /* make sure all activity has settled down first */ 739 /* make sure all activity has settled down first */
740 740
741 if (atomic_read(&ch->references) > 0) { 741 if (atomic_read(&ch->references) > 0 ||
742 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
743 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
742 return; 744 return;
743 } 745 }
744 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 746 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
@@ -775,7 +777,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
775 777
776 /* both sides are disconnected now */ 778 /* both sides are disconnected now */
777 779
778 if (ch->flags & XPC_C_CONNECTCALLOUT) { 780 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
779 spin_unlock_irqrestore(&ch->lock, *irq_flags); 781 spin_unlock_irqrestore(&ch->lock, *irq_flags);
780 xpc_disconnect_callout(ch, xpcDisconnected); 782 xpc_disconnect_callout(ch, xpcDisconnected);
781 spin_lock_irqsave(&ch->lock, *irq_flags); 783 spin_lock_irqsave(&ch->lock, *irq_flags);
@@ -1300,7 +1302,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1300 "delivered=%d, partid=%d, channel=%d\n", 1302 "delivered=%d, partid=%d, channel=%d\n",
1301 nmsgs_sent, ch->partid, ch->number); 1303 nmsgs_sent, ch->partid, ch->number);
1302 1304
1303 if (ch->flags & XPC_C_CONNECTCALLOUT) { 1305 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1304 xpc_activate_kthreads(ch, nmsgs_sent); 1306 xpc_activate_kthreads(ch, nmsgs_sent);
1305 } 1307 }
1306 } 1308 }
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 9cd460dfe27e..8cbf16432570 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -750,12 +750,16 @@ xpc_daemonize_kthread(void *args)
750 /* let registerer know that connection has been established */ 750 /* let registerer know that connection has been established */
751 751
752 spin_lock_irqsave(&ch->lock, irq_flags); 752 spin_lock_irqsave(&ch->lock, irq_flags);
753 if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { 753 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
754 ch->flags |= XPC_C_CONNECTCALLOUT; 754 ch->flags |= XPC_C_CONNECTEDCALLOUT;
755 spin_unlock_irqrestore(&ch->lock, irq_flags); 755 spin_unlock_irqrestore(&ch->lock, irq_flags);
756 756
757 xpc_connected_callout(ch); 757 xpc_connected_callout(ch);
758 758
759 spin_lock_irqsave(&ch->lock, irq_flags);
760 ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
761 spin_unlock_irqrestore(&ch->lock, irq_flags);
762
759 /* 763 /*
760 * It is possible that while the callout was being 764 * It is possible that while the callout was being
761 * made that the remote partition sent some messages. 765 * made that the remote partition sent some messages.
@@ -777,15 +781,17 @@ xpc_daemonize_kthread(void *args)
777 781
778 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 782 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
779 spin_lock_irqsave(&ch->lock, irq_flags); 783 spin_lock_irqsave(&ch->lock, irq_flags);
780 if ((ch->flags & XPC_C_CONNECTCALLOUT) && 784 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
781 !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { 785 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
782 ch->flags |= XPC_C_DISCONNECTCALLOUT; 786 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
783 spin_unlock_irqrestore(&ch->lock, irq_flags); 787 spin_unlock_irqrestore(&ch->lock, irq_flags);
784 788
785 xpc_disconnect_callout(ch, xpcDisconnecting); 789 xpc_disconnect_callout(ch, xpcDisconnecting);
786 } else { 790
787 spin_unlock_irqrestore(&ch->lock, irq_flags); 791 spin_lock_irqsave(&ch->lock, irq_flags);
792 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
788 } 793 }
794 spin_unlock_irqrestore(&ch->lock, irq_flags);
789 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 795 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
790 xpc_mark_partition_disengaged(part); 796 xpc_mark_partition_disengaged(part);
791 xpc_IPI_send_disengage(part); 797 xpc_IPI_send_disengage(part);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 5a36292388eb..b4b84c269210 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -335,10 +335,10 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
335 */ 335 */
336 336
337 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 337 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
338 pci_domain_nr(bus), bus->number, 338 pci_domain_nr(bus), bus->number,
339 0, /* io */ 339 0, /* io */
340 0, /* read */ 340 0, /* read */
341 port, size, __pa(val)); 341 port, size, __pa(val));
342 342
343 if (isrv.status == 0) 343 if (isrv.status == 0)
344 return size; 344 return size;
@@ -381,10 +381,10 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
381 */ 381 */
382 382
383 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, 383 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
384 pci_domain_nr(bus), bus->number, 384 pci_domain_nr(bus), bus->number,
385 0, /* io */ 385 0, /* io */
386 1, /* write */ 386 1, /* write */
387 port, size, __pa(&val)); 387 port, size, __pa(&val));
388 388
389 if (isrv.status == 0) 389 if (isrv.status == 0)
390 return size; 390 return size;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index aa3fa5152a32..1f0253bfe0a0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -12,7 +12,7 @@
12#include <asm/sn/pcibus_provider_defs.h> 12#include <asm/sn/pcibus_provider_defs.h>
13#include <asm/sn/pcidev.h> 13#include <asm/sn/pcidev.h>
14 14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ 15int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
16 16
17/* 17/*
18 * mark_ate: Mark the ate as either free or inuse. 18 * mark_ate: Mark the ate as either free or inuse.
@@ -20,14 +20,12 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
20static void mark_ate(struct ate_resource *ate_resource, int start, int number, 20static void mark_ate(struct ate_resource *ate_resource, int start, int number,
21 u64 value) 21 u64 value)
22{ 22{
23
24 u64 *ate = ate_resource->ate; 23 u64 *ate = ate_resource->ate;
25 int index; 24 int index;
26 int length = 0; 25 int length = 0;
27 26
28 for (index = start; length < number; index++, length++) 27 for (index = start; length < number; index++, length++)
29 ate[index] = value; 28 ate[index] = value;
30
31} 29}
32 30
33/* 31/*
@@ -37,7 +35,6 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number,
37static int find_free_ate(struct ate_resource *ate_resource, int start, 35static int find_free_ate(struct ate_resource *ate_resource, int start,
38 int count) 36 int count)
39{ 37{
40
41 u64 *ate = ate_resource->ate; 38 u64 *ate = ate_resource->ate;
42 int index; 39 int index;
43 int start_free; 40 int start_free;
@@ -70,12 +67,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
70static inline void free_ate_resource(struct ate_resource *ate_resource, 67static inline void free_ate_resource(struct ate_resource *ate_resource,
71 int start) 68 int start)
72{ 69{
73
74 mark_ate(ate_resource, start, ate_resource->ate[start], 0); 70 mark_ate(ate_resource, start, ate_resource->ate[start], 0);
75 if ((ate_resource->lowest_free_index > start) || 71 if ((ate_resource->lowest_free_index > start) ||
76 (ate_resource->lowest_free_index < 0)) 72 (ate_resource->lowest_free_index < 0))
77 ate_resource->lowest_free_index = start; 73 ate_resource->lowest_free_index = start;
78
79} 74}
80 75
81/* 76/*
@@ -84,7 +79,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource,
84static inline int alloc_ate_resource(struct ate_resource *ate_resource, 79static inline int alloc_ate_resource(struct ate_resource *ate_resource,
85 int ate_needed) 80 int ate_needed)
86{ 81{
87
88 int start_index; 82 int start_index;
89 83
90 /* 84 /*
@@ -118,19 +112,12 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
118 */ 112 */
119int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) 113int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
120{ 114{
121 int status = 0; 115 int status;
122 u64 flag; 116 unsigned long flags;
123 117
124 flag = pcibr_lock(pcibus_info); 118 spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
125 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); 119 status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
126 120 spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
127 if (status < 0) {
128 /* Failed to allocate */
129 pcibr_unlock(pcibus_info, flag);
130 return -1;
131 }
132
133 pcibr_unlock(pcibus_info, flag);
134 121
135 return status; 122 return status;
136} 123}
@@ -182,7 +169,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
182 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); 169 ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
183 } 170 }
184 171
185 flags = pcibr_lock(pcibus_info); 172 spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
186 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); 173 free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
187 pcibr_unlock(pcibus_info, flags); 174 spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
188} 175}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 54ce5b7ceed2..9f86bb6519aa 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -137,14 +137,12 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
137 pci_addr |= PCI64_ATTR_VIRTUAL; 137 pci_addr |= PCI64_ATTR_VIRTUAL;
138 138
139 return pci_addr; 139 return pci_addr;
140
141} 140}
142 141
143static dma_addr_t 142static dma_addr_t
144pcibr_dmatrans_direct32(struct pcidev_info * info, 143pcibr_dmatrans_direct32(struct pcidev_info * info,
145 u64 paddr, size_t req_size, u64 flags) 144 u64 paddr, size_t req_size, u64 flags)
146{ 145{
147
148 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; 146 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
149 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 147 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
150 pdi_pcibus_info; 148 pdi_pcibus_info;
@@ -171,7 +169,6 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
171 } 169 }
172 170
173 return PCI32_DIRECT_BASE | offset; 171 return PCI32_DIRECT_BASE | offset;
174
175} 172}
176 173
177/* 174/*
@@ -218,9 +215,8 @@ void sn_dma_flush(u64 addr)
218 u64 flags; 215 u64 flags;
219 u64 itte; 216 u64 itte;
220 struct hubdev_info *hubinfo; 217 struct hubdev_info *hubinfo;
221 volatile struct sn_flush_device_kernel *p; 218 struct sn_flush_device_kernel *p;
222 volatile struct sn_flush_device_common *common; 219 struct sn_flush_device_common *common;
223
224 struct sn_flush_nasid_entry *flush_nasid_list; 220 struct sn_flush_nasid_entry *flush_nasid_list;
225 221
226 if (!sn_ioif_inited) 222 if (!sn_ioif_inited)
@@ -310,8 +306,7 @@ void sn_dma_flush(u64 addr)
310 (common->sfdl_slot - 1)); 306 (common->sfdl_slot - 1));
311 } 307 }
312 } else { 308 } else {
313 spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock, 309 spin_lock_irqsave(&p->sfdl_flush_lock, flags);
314 flags);
315 *common->sfdl_flush_addr = 0; 310 *common->sfdl_flush_addr = 0;
316 311
317 /* force an interrupt. */ 312 /* force an interrupt. */
@@ -322,8 +317,7 @@ void sn_dma_flush(u64 addr)
322 cpu_relax(); 317 cpu_relax();
323 318
324 /* okay, everything is synched up. */ 319 /* okay, everything is synched up. */
325 spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, 320 spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
326 flags);
327 } 321 }
328 return; 322 return;
329} 323}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 2fac27049bf6..98f716bd92f0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -163,9 +163,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
163 /* Setup the PMU ATE map */ 163 /* Setup the PMU ATE map */
164 soft->pbi_int_ate_resource.lowest_free_index = 0; 164 soft->pbi_int_ate_resource.lowest_free_index = 0;
165 soft->pbi_int_ate_resource.ate = 165 soft->pbi_int_ate_resource.ate =
166 kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); 166 kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
167 memset(soft->pbi_int_ate_resource.ate, 0, 167
168 (soft->pbi_int_ate_size * sizeof(u64))); 168 if (!soft->pbi_int_ate_resource.ate) {
169 kfree(soft);
170 return NULL;
171 }
169 172
170 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { 173 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
171 /* TIO PCI Bridge: find nearest node with CPUs */ 174 /* TIO PCI Bridge: find nearest node with CPUs */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b66602ad7b33..b7ca5bf9acfc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -80,6 +80,10 @@ config HOTPLUG_CPU
80 can be controlled through /sys/devices/system/cpu/cpu#. 80 can be controlled through /sys/devices/system/cpu/cpu#.
81 Say N if you want to disable CPU hotplug. 81 Say N if you want to disable CPU hotplug.
82 82
83config DEFAULT_MIGRATION_COST
84 int
85 default "1000000"
86
83config MATHEMU 87config MATHEMU
84 bool "IEEE FPU emulation" 88 bool "IEEE FPU emulation"
85 depends on MARCH_G5 89 depends on MARCH_G5
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 2d021626c1a6..cc058dc3bc8b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -905,8 +905,8 @@ asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * sta
905 return ret; 905 return ret;
906} 906}
907 907
908asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename, 908asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename,
909 struct stat64_emu31 __user* statbuf, int flag) 909 struct stat64_emu31 __user* statbuf, int flag)
910{ 910{
911 struct kstat stat; 911 struct kstat stat;
912 int error = -EINVAL; 912 int error = -EINVAL;
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index dd2d6c3e8df8..615964cca15f 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1523,13 +1523,13 @@ compat_sys_futimesat_wrapper:
1523 llgtr %r4,%r4 # struct timeval * 1523 llgtr %r4,%r4 # struct timeval *
1524 jg compat_sys_futimesat 1524 jg compat_sys_futimesat
1525 1525
1526 .globl sys32_fstatat_wrapper 1526 .globl sys32_fstatat64_wrapper
1527sys32_fstatat_wrapper: 1527sys32_fstatat64_wrapper:
1528 llgfr %r2,%r2 # unsigned int 1528 llgfr %r2,%r2 # unsigned int
1529 llgtr %r3,%r3 # char * 1529 llgtr %r3,%r3 # char *
1530 llgtr %r4,%r4 # struct stat64 * 1530 llgtr %r4,%r4 # struct stat64 *
1531 lgfr %r5,%r5 # int 1531 lgfr %r5,%r5 # int
1532 jg sys32_fstatat 1532 jg sys32_fstatat64
1533 1533
1534 .globl sys_unlinkat_wrapper 1534 .globl sys_unlinkat_wrapper
1535sys_unlinkat_wrapper: 1535sys_unlinkat_wrapper:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 008c74526fd3..da6fbae8df91 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -128,8 +128,10 @@ void default_idle(void)
128 __ctl_set_bit(8, 15); 128 __ctl_set_bit(8, 15);
129 129
130#ifdef CONFIG_HOTPLUG_CPU 130#ifdef CONFIG_HOTPLUG_CPU
131 if (cpu_is_offline(cpu)) 131 if (cpu_is_offline(cpu)) {
132 preempt_enable_no_resched();
132 cpu_die(); 133 cpu_die();
134 }
133#endif 135#endif
134 136
135 local_mcck_disable(); 137 local_mcck_disable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index de8784267473..24f62f16c0e5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -600,6 +600,7 @@ setup_arch(char **cmdline_p)
600 init_mm.brk = (unsigned long) &_end; 600 init_mm.brk = (unsigned long) &_end;
601 601
602 parse_cmdline_early(cmdline_p); 602 parse_cmdline_early(cmdline_p);
603 parse_early_param();
603 604
604 setup_memory(); 605 setup_memory();
605 setup_resources(); 606 setup_resources();
@@ -607,6 +608,7 @@ setup_arch(char **cmdline_p)
607 608
608 cpu_init(); 609 cpu_init();
609 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 610 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
611 smp_setup_cpu_possible_map();
610 612
611 /* 613 /*
612 * Create kernel page tables and switch to virtual addressing. 614 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0d1ad5dbe2b1..7dbe00c76c6b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * S390 version 4 * Copyright (C) IBM Corp. 1999,2006
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
@@ -41,8 +40,6 @@
41#include <asm/cpcmd.h> 40#include <asm/cpcmd.h>
42#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
43 42
44/* prototypes */
45
46extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
47 44
48/* 45/*
@@ -51,13 +48,11 @@ extern volatile int __cpu_logical_map[];
51 48
52struct _lowcore *lowcore_ptr[NR_CPUS]; 49struct _lowcore *lowcore_ptr[NR_CPUS];
53 50
54cpumask_t cpu_online_map; 51cpumask_t cpu_online_map = CPU_MASK_NONE;
55cpumask_t cpu_possible_map = CPU_MASK_ALL; 52cpumask_t cpu_possible_map = CPU_MASK_NONE;
56 53
57static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
58 55
59EXPORT_SYMBOL(cpu_online_map);
60
61/* 56/*
62 * Reboot, halt and power_off routines for SMP. 57 * Reboot, halt and power_off routines for SMP.
63 */ 58 */
@@ -490,10 +485,10 @@ void smp_ctl_clear_bit(int cr, int bit) {
490 * Lets check how many CPUs we have. 485 * Lets check how many CPUs we have.
491 */ 486 */
492 487
493void 488static unsigned int
494__init smp_check_cpus(unsigned int max_cpus) 489__init smp_count_cpus(void)
495{ 490{
496 int cpu, num_cpus; 491 unsigned int cpu, num_cpus;
497 __u16 boot_cpu_addr; 492 __u16 boot_cpu_addr;
498 493
499 /* 494 /*
@@ -503,19 +498,20 @@ __init smp_check_cpus(unsigned int max_cpus)
503 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 498 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
504 current_thread_info()->cpu = 0; 499 current_thread_info()->cpu = 0;
505 num_cpus = 1; 500 num_cpus = 1;
506 for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { 501 for (cpu = 0; cpu <= 65535; cpu++) {
507 if ((__u16) cpu == boot_cpu_addr) 502 if ((__u16) cpu == boot_cpu_addr)
508 continue; 503 continue;
509 __cpu_logical_map[num_cpus] = (__u16) cpu; 504 __cpu_logical_map[1] = (__u16) cpu;
510 if (signal_processor(num_cpus, sigp_sense) == 505 if (signal_processor(1, sigp_sense) ==
511 sigp_not_operational) 506 sigp_not_operational)
512 continue; 507 continue;
513 cpu_set(num_cpus, cpu_present_map);
514 num_cpus++; 508 num_cpus++;
515 } 509 }
516 510
517 printk("Detected %d CPU's\n",(int) num_cpus); 511 printk("Detected %d CPU's\n",(int) num_cpus);
518 printk("Boot cpu address %2X\n", boot_cpu_addr); 512 printk("Boot cpu address %2X\n", boot_cpu_addr);
513
514 return num_cpus;
519} 515}
520 516
521/* 517/*
@@ -676,6 +672,44 @@ __cpu_up(unsigned int cpu)
676 return 0; 672 return 0;
677} 673}
678 674
675static unsigned int __initdata additional_cpus;
676static unsigned int __initdata possible_cpus;
677
678void __init smp_setup_cpu_possible_map(void)
679{
680 unsigned int phy_cpus, pos_cpus, cpu;
681
682 phy_cpus = smp_count_cpus();
683 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
684
685 if (possible_cpus)
686 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
687
688 for (cpu = 0; cpu < pos_cpus; cpu++)
689 cpu_set(cpu, cpu_possible_map);
690
691 phy_cpus = min(phy_cpus, pos_cpus);
692
693 for (cpu = 0; cpu < phy_cpus; cpu++)
694 cpu_set(cpu, cpu_present_map);
695}
696
697#ifdef CONFIG_HOTPLUG_CPU
698
699static int __init setup_additional_cpus(char *s)
700{
701 additional_cpus = simple_strtoul(s, NULL, 0);
702 return 0;
703}
704early_param("additional_cpus", setup_additional_cpus);
705
706static int __init setup_possible_cpus(char *s)
707{
708 possible_cpus = simple_strtoul(s, NULL, 0);
709 return 0;
710}
711early_param("possible_cpus", setup_possible_cpus);
712
679int 713int
680__cpu_disable(void) 714__cpu_disable(void)
681{ 715{
@@ -744,6 +778,8 @@ cpu_die(void)
744 for(;;); 778 for(;;);
745} 779}
746 780
781#endif /* CONFIG_HOTPLUG_CPU */
782
747/* 783/*
748 * Cycle through the processors and setup structures. 784 * Cycle through the processors and setup structures.
749 */ 785 */
@@ -757,7 +793,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
757 /* request the 0x1201 emergency signal external interrupt */ 793 /* request the 0x1201 emergency signal external interrupt */
758 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 794 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
759 panic("Couldn't request external interrupt 0x1201"); 795 panic("Couldn't request external interrupt 0x1201");
760 smp_check_cpus(max_cpus);
761 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 796 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
762 /* 797 /*
763 * Initialize prefix pages and stacks for all possible cpus 798 * Initialize prefix pages and stacks for all possible cpus
@@ -806,7 +841,6 @@ void __devinit smp_prepare_boot_cpu(void)
806 BUG_ON(smp_processor_id() != 0); 841 BUG_ON(smp_processor_id() != 0);
807 842
808 cpu_set(0, cpu_online_map); 843 cpu_set(0, cpu_online_map);
809 cpu_set(0, cpu_present_map);
810 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 844 S390_lowcore.percpu_offset = __per_cpu_offset[0];
811 current_set[0] = current; 845 current_set[0] = current;
812} 846}
@@ -845,6 +879,7 @@ static int __init topology_init(void)
845 879
846subsys_initcall(topology_init); 880subsys_initcall(topology_init);
847 881
882EXPORT_SYMBOL(cpu_online_map);
848EXPORT_SYMBOL(cpu_possible_map); 883EXPORT_SYMBOL(cpu_possible_map);
849EXPORT_SYMBOL(lowcore_ptr); 884EXPORT_SYMBOL(lowcore_ptr);
850EXPORT_SYMBOL(smp_ctl_set_bit); 885EXPORT_SYMBOL(smp_ctl_set_bit);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 84921fe8d266..7c88d85c3597 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -301,7 +301,7 @@ SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
301SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */ 301SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */
302SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper) 302SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
303SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper) 303SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper)
304SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat_wrapper) 304SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper)
305SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper) 305SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper)
306SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */ 306SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */
307SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper) 307SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 504d56f8ca7f..e73621d03a28 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -446,7 +446,7 @@ endmenu
446 446
447config ISA_DMA_API 447config ISA_DMA_API
448 bool 448 bool
449 depends on MPC1211 449 depends on SH_MPC1211
450 default y 450 default y
451 451
452menu "Kernel features" 452menu "Kernel features"
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 56832929a543..b337136f28b6 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.16-rc1-git2 3# Linux kernel version: 2.6.16-rc3
4# Thu Jan 19 10:05:21 2006 4# Mon Feb 13 22:31:24 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -21,7 +21,6 @@ CONFIG_DMI=y
21# Code maturity level options 21# Code maturity level options
22# 22#
23CONFIG_EXPERIMENTAL=y 23CONFIG_EXPERIMENTAL=y
24CONFIG_CLEAN_COMPILE=y
25CONFIG_LOCK_KERNEL=y 24CONFIG_LOCK_KERNEL=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 25CONFIG_INIT_ENV_ARG_LIMIT=32
27 26
@@ -267,6 +266,7 @@ CONFIG_NET=y
267# 266#
268# Networking options 267# Networking options
269# 268#
269# CONFIG_NETDEBUG is not set
270CONFIG_PACKET=y 270CONFIG_PACKET=y
271# CONFIG_PACKET_MMAP is not set 271# CONFIG_PACKET_MMAP is not set
272CONFIG_UNIX=y 272CONFIG_UNIX=y
@@ -446,7 +446,6 @@ CONFIG_BLK_DEV_PIIX=y
446# CONFIG_BLK_DEV_NS87415 is not set 446# CONFIG_BLK_DEV_NS87415 is not set
447# CONFIG_BLK_DEV_PDC202XX_OLD is not set 447# CONFIG_BLK_DEV_PDC202XX_OLD is not set
448CONFIG_BLK_DEV_PDC202XX_NEW=y 448CONFIG_BLK_DEV_PDC202XX_NEW=y
449# CONFIG_PDC202XX_FORCE is not set
450# CONFIG_BLK_DEV_SVWKS is not set 449# CONFIG_BLK_DEV_SVWKS is not set
451# CONFIG_BLK_DEV_SIIMAGE is not set 450# CONFIG_BLK_DEV_SIIMAGE is not set
452# CONFIG_BLK_DEV_SIS5513 is not set 451# CONFIG_BLK_DEV_SIS5513 is not set
@@ -573,7 +572,33 @@ CONFIG_FUSION_MAX_SGE=128
573# 572#
574# IEEE 1394 (FireWire) support 573# IEEE 1394 (FireWire) support
575# 574#
576# CONFIG_IEEE1394 is not set 575CONFIG_IEEE1394=y
576
577#
578# Subsystem Options
579#
580# CONFIG_IEEE1394_VERBOSEDEBUG is not set
581# CONFIG_IEEE1394_OUI_DB is not set
582# CONFIG_IEEE1394_EXTRA_CONFIG_ROMS is not set
583# CONFIG_IEEE1394_EXPORT_FULL_API is not set
584
585#
586# Device Drivers
587#
588
589#
590# Texas Instruments PCILynx requires I2C
591#
592CONFIG_IEEE1394_OHCI1394=y
593
594#
595# Protocol Drivers
596#
597# CONFIG_IEEE1394_VIDEO1394 is not set
598# CONFIG_IEEE1394_SBP2 is not set
599# CONFIG_IEEE1394_ETH1394 is not set
600# CONFIG_IEEE1394_DV1394 is not set
601CONFIG_IEEE1394_RAWIO=y
577 602
578# 603#
579# I2O device support 604# I2O device support
@@ -772,6 +797,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
772# 797#
773CONFIG_SERIAL_CORE=y 798CONFIG_SERIAL_CORE=y
774CONFIG_SERIAL_CORE_CONSOLE=y 799CONFIG_SERIAL_CORE_CONSOLE=y
800# CONFIG_SERIAL_JSM is not set
775CONFIG_UNIX98_PTYS=y 801CONFIG_UNIX98_PTYS=y
776CONFIG_LEGACY_PTYS=y 802CONFIG_LEGACY_PTYS=y
777CONFIG_LEGACY_PTY_COUNT=256 803CONFIG_LEGACY_PTY_COUNT=256
@@ -871,6 +897,7 @@ CONFIG_HPET_MMAP=y
871# 897#
872CONFIG_HWMON=y 898CONFIG_HWMON=y
873# CONFIG_HWMON_VID is not set 899# CONFIG_HWMON_VID is not set
900# CONFIG_SENSORS_F71805F is not set
874# CONFIG_SENSORS_HDAPS is not set 901# CONFIG_SENSORS_HDAPS is not set
875# CONFIG_HWMON_DEBUG_CHIP is not set 902# CONFIG_HWMON_DEBUG_CHIP is not set
876 903
@@ -1101,7 +1128,6 @@ CONFIG_USB_MON=y
1101# EDAC - error detection and reporting (RAS) 1128# EDAC - error detection and reporting (RAS)
1102# 1129#
1103# CONFIG_EDAC is not set 1130# CONFIG_EDAC is not set
1104# CONFIG_EDAC_POLL is not set
1105 1131
1106# 1132#
1107# Firmware Drivers 1133# Firmware Drivers
@@ -1291,14 +1317,12 @@ CONFIG_DETECT_SOFTLOCKUP=y
1291# CONFIG_DEBUG_SPINLOCK is not set 1317# CONFIG_DEBUG_SPINLOCK is not set
1292# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1318# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1293# CONFIG_DEBUG_KOBJECT is not set 1319# CONFIG_DEBUG_KOBJECT is not set
1294# CONFIG_DEBUG_INFO is not set 1320CONFIG_DEBUG_INFO=y
1295CONFIG_DEBUG_FS=y 1321CONFIG_DEBUG_FS=y
1296# CONFIG_DEBUG_VM is not set 1322# CONFIG_DEBUG_VM is not set
1297# CONFIG_FRAME_POINTER is not set 1323# CONFIG_FRAME_POINTER is not set
1298# CONFIG_FORCED_INLINING is not set 1324# CONFIG_FORCED_INLINING is not set
1299# CONFIG_UNWIND_INFO is not set
1300# CONFIG_RCU_TORTURE_TEST is not set 1325# CONFIG_RCU_TORTURE_TEST is not set
1301CONFIG_INIT_DEBUG=y
1302# CONFIG_DEBUG_RODATA is not set 1326# CONFIG_DEBUG_RODATA is not set
1303# CONFIG_IOMMU_DEBUG is not set 1327# CONFIG_IOMMU_DEBUG is not set
1304 1328
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 7a0a3e8d5d72..e5b14c57eaa0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -1152,6 +1152,7 @@ __setup("noapicmaintimer", setup_noapicmaintimer);
1152static __init int setup_apicpmtimer(char *s) 1152static __init int setup_apicpmtimer(char *s)
1153{ 1153{
1154 apic_calibrate_pmtmr = 1; 1154 apic_calibrate_pmtmr = 1;
1155 notsc_setup(NULL);
1155 return setup_apicmaintimer(NULL); 1156 return setup_apicmaintimer(NULL);
1156} 1157}
1157__setup("apicpmtimer", setup_apicpmtimer); 1158__setup("apicpmtimer", setup_apicpmtimer);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index b150c87a08c6..7c10e9009d61 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -554,6 +554,7 @@ iret_label:
554 /* running with kernel gs */ 554 /* running with kernel gs */
555bad_iret: 555bad_iret:
556 movq $-9999,%rdi /* better code? */ 556 movq $-9999,%rdi /* better code? */
557 sti
557 jmp do_exit 558 jmp do_exit
558 .previous 559 .previous
559 560
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 692c737feddb..02fc7fa0ea28 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -213,6 +213,11 @@ ENTRY(early_idt_handler)
213 cmpl $2,early_recursion_flag(%rip) 213 cmpl $2,early_recursion_flag(%rip)
214 jz 1f 214 jz 1f
215 call dump_stack 215 call dump_stack
216#ifdef CONFIG_KALLSYMS
217 leaq early_idt_ripmsg(%rip),%rdi
218 movq 8(%rsp),%rsi # get rip again
219 call __print_symbol
220#endif
2161: hlt 2211: hlt
217 jmp 1b 222 jmp 1b
218early_recursion_flag: 223early_recursion_flag:
@@ -220,6 +225,8 @@ early_recursion_flag:
220 225
221early_idt_msg: 226early_idt_msg:
222 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n" 227 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
228early_idt_ripmsg:
229 .asciz "RIP %s\n"
223 230
224.code32 231.code32
225ENTRY(no_long_mode) 232ENTRY(no_long_mode)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 4282d72b2a26..2585c1d92b26 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -30,6 +30,9 @@
30#include <linux/mc146818rtc.h> 30#include <linux/mc146818rtc.h>
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/sysdev.h> 32#include <linux/sysdev.h>
33#ifdef CONFIG_ACPI
34#include <acpi/acpi_bus.h>
35#endif
33 36
34#include <asm/io.h> 37#include <asm/io.h>
35#include <asm/smp.h> 38#include <asm/smp.h>
@@ -260,6 +263,8 @@ __setup("apic", enable_ioapic_setup);
260 263
261 And another hack to disable the IOMMU on VIA chipsets. 264 And another hack to disable the IOMMU on VIA chipsets.
262 265
266 ... and others. Really should move this somewhere else.
267
263 Kludge-O-Rama. */ 268 Kludge-O-Rama. */
264void __init check_ioapic(void) 269void __init check_ioapic(void)
265{ 270{
@@ -307,6 +312,17 @@ void __init check_ioapic(void)
307 case PCI_VENDOR_ID_ATI: 312 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0) 313 if (apic_runs_main_timer != 0)
309 break; 314 break;
315#ifdef CONFIG_ACPI
316 /* Don't do this for laptops right
317 right now because their timer
318 doesn't necessarily tick in C2/3 */
319 if (acpi_fadt.revision >= 3 &&
320 (acpi_fadt.plvl2_lat + acpi_fadt.plvl3_lat) < 1100) {
321 printk(KERN_INFO
322"ATI board detected, but seems to be a laptop. Timer might be shakey, sorry\n");
323 break;
324 }
325#endif
310 printk(KERN_INFO 326 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n"); 327 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1; 328 apic_runs_main_timer = 1;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index dc49bfb6db0a..9013a90b5c2e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -288,9 +288,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
288 288
289 memcpy(str,mpc->mpc_productid,12); 289 memcpy(str,mpc->mpc_productid,12);
290 str[12]=0; 290 str[12]=0;
291 printk(KERN_INFO "Product ID: %s ",str); 291 printk("Product ID: %s ",str);
292 292
293 printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic); 293 printk("APIC at: 0x%X\n",mpc->mpc_lapic);
294 294
295 /* save the local APIC address, it might be non-default */ 295 /* save the local APIC address, it might be non-default */
296 if (!acpi_lapic) 296 if (!acpi_lapic)
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 8be407a1f62d..5bf17e41cd2d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -236,6 +236,7 @@ static void enable_lapic_nmi_watchdog(void)
236{ 236{
237 if (nmi_active < 0) { 237 if (nmi_active < 0) {
238 nmi_watchdog = NMI_LOCAL_APIC; 238 nmi_watchdog = NMI_LOCAL_APIC;
239 touch_nmi_watchdog();
239 setup_apic_nmi_watchdog(); 240 setup_apic_nmi_watchdog();
240 } 241 }
241} 242}
@@ -456,15 +457,17 @@ static DEFINE_PER_CPU(int, nmi_touch);
456 457
457void touch_nmi_watchdog (void) 458void touch_nmi_watchdog (void)
458{ 459{
459 int i; 460 if (nmi_watchdog > 0) {
461 unsigned cpu;
460 462
461 /* 463 /*
462 * Tell other CPUs to reset their alert counters. We cannot 464 * Tell other CPUs to reset their alert counters. We cannot
463 * do it ourselves because the alert count increase is not 465 * do it ourselves because the alert count increase is not
464 * atomic. 466 * atomic.
465 */ 467 */
466 for (i = 0; i < NR_CPUS; i++) 468 for_each_present_cpu (cpu)
467 per_cpu(nmi_touch, i) = 1; 469 per_cpu(nmi_touch, cpu) = 1;
470 }
468 471
469 touch_softlockup_watchdog(); 472 touch_softlockup_watchdog();
470} 473}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 3c58c30506a1..67841d11ed1f 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -1327,8 +1327,7 @@ static int __init nohpet_setup(char *s)
1327 1327
1328__setup("nohpet", nohpet_setup); 1328__setup("nohpet", nohpet_setup);
1329 1329
1330 1330int __init notsc_setup(char *s)
1331static int __init notsc_setup(char *s)
1332{ 1331{
1333 notsc = 1; 1332 notsc = 1;
1334 return 0; 1333 return 0;
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index a5663e0bb01c..dd60e71fdba6 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -155,7 +155,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
155 if (!found) 155 if (!found)
156 return -1; 156 return -1;
157 157
158 memnode_shift = compute_hash_shift(nodes, numnodes); 158 memnode_shift = compute_hash_shift(nodes, 8);
159 if (memnode_shift < 0) { 159 if (memnode_shift < 0) {
160 printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n"); 160 printk(KERN_ERR "No NUMA node hash function found. Contact maintainer\n");
161 return -1; 161 return -1;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 6ef9f9a76235..22e51beee8d3 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -351,7 +351,7 @@ void __init init_cpu_to_node(void)
351 continue; 351 continue;
352 if (apicid_to_node[apicid] == NUMA_NO_NODE) 352 if (apicid_to_node[apicid] == NUMA_NO_NODE)
353 continue; 353 continue;
354 cpu_to_node[i] = apicid_to_node[apicid]; 354 numa_set_node(i,apicid_to_node[apicid]);
355 } 355 }
356} 356}
357 357
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index cd25300726fc..482c25767369 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -228,7 +228,8 @@ static int nodes_cover_memory(void)
228 } 228 }
229 229
230 e820ram = end_pfn - e820_hole_size(0, end_pfn); 230 e820ram = end_pfn - e820_hole_size(0, end_pfn);
231 if (pxmram < e820ram) { 231 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
232 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
232 printk(KERN_ERR 233 printk(KERN_ERR
233 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", 234 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
234 (pxmram << PAGE_SHIFT) >> 20, 235 (pxmram << PAGE_SHIFT) >> 20,
@@ -270,7 +271,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
270 return -1; 271 return -1;
271 } 272 }
272 273
273 memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed)); 274 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
274 if (memnode_shift < 0) { 275 if (memnode_shift < 0) {
275 printk(KERN_ERR 276 printk(KERN_ERR
276 "SRAT: No NUMA node hash function found. Contact maintainer\n"); 277 "SRAT: No NUMA node hash function found. Contact maintainer\n");
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 7d6481d9fbec..4038dbfa63a0 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -391,8 +391,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
391 * Ensure a 32-bit boundary for the structure 391 * Ensure a 32-bit boundary for the structure
392 */ 392 */
393 extra_struct_bytes = 393 extra_struct_bytes =
394 ACPI_ROUND_UP_to_32_bITS(resource_length) - 394 ACPI_ROUND_UP_to_32_bITS(resource_length);
395 resource_length;
396 break; 395 break;
397 396
398 case ACPI_RESOURCE_NAME_END_TAG: 397 case ACPI_RESOURCE_NAME_END_TAG:
@@ -408,8 +407,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
408 * Add vendor data and ensure a 32-bit boundary for the structure 407 * Add vendor data and ensure a 32-bit boundary for the structure
409 */ 408 */
410 extra_struct_bytes = 409 extra_struct_bytes =
411 ACPI_ROUND_UP_to_32_bITS(resource_length) - 410 ACPI_ROUND_UP_to_32_bITS(resource_length);
412 resource_length;
413 break; 411 break;
414 412
415 case ACPI_RESOURCE_NAME_ADDRESS32: 413 case ACPI_RESOURCE_NAME_ADDRESS32:
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ec7590951af5..24095f6ee6da 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -33,6 +33,7 @@
33static int TPM_INF_DATA; 33static int TPM_INF_DATA;
34static int TPM_INF_ADDR; 34static int TPM_INF_ADDR;
35static int TPM_INF_BASE; 35static int TPM_INF_BASE;
36static int TPM_INF_ADDR_LEN;
36static int TPM_INF_PORT_LEN; 37static int TPM_INF_PORT_LEN;
37 38
38/* TPM header definitions */ 39/* TPM header definitions */
@@ -195,6 +196,7 @@ static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
195 int i; 196 int i;
196 int ret; 197 int ret;
197 u32 size = 0; 198 u32 size = 0;
199 number_of_wtx = 0;
198 200
199recv_begin: 201recv_begin:
200 /* start receiving header */ 202 /* start receiving header */
@@ -378,24 +380,35 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
378 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 380 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
379 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 381 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
380 TPM_INF_ADDR = pnp_port_start(dev, 0); 382 TPM_INF_ADDR = pnp_port_start(dev, 0);
383 TPM_INF_ADDR_LEN = pnp_port_len(dev, 0);
381 TPM_INF_DATA = (TPM_INF_ADDR + 1); 384 TPM_INF_DATA = (TPM_INF_ADDR + 1);
382 TPM_INF_BASE = pnp_port_start(dev, 1); 385 TPM_INF_BASE = pnp_port_start(dev, 1);
383 TPM_INF_PORT_LEN = pnp_port_len(dev, 1); 386 TPM_INF_PORT_LEN = pnp_port_len(dev, 1);
384 if (!TPM_INF_PORT_LEN) 387 if ((TPM_INF_PORT_LEN < 4) || (TPM_INF_ADDR_LEN < 2)) {
385 return -EINVAL; 388 rc = -EINVAL;
389 goto err_last;
390 }
386 dev_info(&dev->dev, "Found %s with ID %s\n", 391 dev_info(&dev->dev, "Found %s with ID %s\n",
387 dev->name, dev_id->id); 392 dev->name, dev_id->id);
388 if (!((TPM_INF_BASE >> 8) & 0xff)) 393 if (!((TPM_INF_BASE >> 8) & 0xff)) {
389 return -EINVAL; 394 rc = -EINVAL;
395 goto err_last;
396 }
390 /* publish my base address and request region */ 397 /* publish my base address and request region */
391 tpm_inf.base = TPM_INF_BASE; 398 tpm_inf.base = TPM_INF_BASE;
392 if (request_region 399 if (request_region
393 (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) { 400 (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
394 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 401 rc = -EINVAL;
395 return -EINVAL; 402 goto err_last;
403 }
404 if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN,
405 "tpm_infineon0") == NULL) {
406 rc = -EINVAL;
407 goto err_last;
396 } 408 }
397 } else { 409 } else {
398 return -EINVAL; 410 rc = -EINVAL;
411 goto err_last;
399 } 412 }
400 413
401 /* query chip for its vendor, its version number a.s.o. */ 414 /* query chip for its vendor, its version number a.s.o. */
@@ -443,8 +456,8 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
443 dev_err(&dev->dev, 456 dev_err(&dev->dev,
444 "Could not set IO-ports to 0x%lx\n", 457 "Could not set IO-ports to 0x%lx\n",
445 tpm_inf.base); 458 tpm_inf.base);
446 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 459 rc = -EIO;
447 return -EIO; 460 goto err_release_region;
448 } 461 }
449 462
450 /* activate register */ 463 /* activate register */
@@ -471,14 +484,21 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
471 484
472 rc = tpm_register_hardware(&dev->dev, &tpm_inf); 485 rc = tpm_register_hardware(&dev->dev, &tpm_inf);
473 if (rc < 0) { 486 if (rc < 0) {
474 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 487 rc = -ENODEV;
475 return -ENODEV; 488 goto err_release_region;
476 } 489 }
477 return 0; 490 return 0;
478 } else { 491 } else {
479 dev_info(&dev->dev, "No Infineon TPM found!\n"); 492 rc = -ENODEV;
480 return -ENODEV; 493 goto err_release_region;
481 } 494 }
495
496err_release_region:
497 release_region(tpm_inf.base, TPM_INF_PORT_LEN);
498 release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
499
500err_last:
501 return rc;
482} 502}
483 503
484static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) 504static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
@@ -518,5 +538,5 @@ module_exit(cleanup_inf);
518 538
519MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>"); 539MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
520MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 540MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
521MODULE_VERSION("1.6"); 541MODULE_VERSION("1.7");
522MODULE_LICENSE("GPL"); 542MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9834dce4e20f..0606bd2f6020 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -34,6 +34,7 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/sched.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
38#include <linux/major.h> 39#include <linux/major.h>
39#include <linux/errno.h> 40#include <linux/errno.h>
@@ -314,6 +315,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
314 if (rq->bio) /* fs request */ 315 if (rq->bio) /* fs request */
315 rq->errors = 0; 316 rq->errors = 0;
316 317
318 touch_softlockup_watchdog();
319
317 switch (drive->hwif->data_phase) { 320 switch (drive->hwif->data_phase) {
318 case TASKFILE_MULTI_IN: 321 case TASKFILE_MULTI_IN:
319 case TASKFILE_MULTI_OUT: 322 case TASKFILE_MULTI_OUT:
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 2b286e865163..43b96e298363 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -13,11 +13,6 @@
13 * License along with this program; if not, write the Free Software 13 * License along with this program; if not, write the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
15 * 15 *
16 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
17 * Mountain View, CA 94043, or:
18 *
19 * http://www.sgi.com
20 *
21 * For further information regarding this notice, see: 16 * For further information regarding this notice, see:
22 * 17 *
23 * http://oss.sgi.com/projects/GenInfo/NoticeExplan 18 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 6e0afbb22383..2708167ba175 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
11obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o 11obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
12obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o 12obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
13obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o 13obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
14obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o
15obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o 14obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
16obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o 15obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
17obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 16obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 184c4129470d..415c49178985 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -7,7 +7,6 @@
7obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o 7obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
8obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o 8obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
9obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 9obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
10obj-$(CONFIG_INPUT_98SPKR) += 98spkr.o
11obj-$(CONFIG_INPUT_UINPUT) += uinput.o 10obj-$(CONFIG_INPUT_UINPUT) += uinput.o
12obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o 11obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
13obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 12obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index d448bb5e4869..3a6ae85cd69c 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -19,6 +19,7 @@
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/interrupt.h>
22#include <asm/hardware.h> 23#include <asm/hardware.h>
23 24
24MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); 25MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index c88520d3d13c..40333d61093c 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -232,6 +232,7 @@ static struct ps2pp_info *get_model_info(unsigned char model)
232 { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 232 { 88, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
233 { 96, 0, 0 }, 233 { 96, 0, 0 },
234 { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL }, 234 { 97, PS2PP_KIND_TP3, PS2PP_WHEEL | PS2PP_HWHEEL },
235 { 99, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
235 { 100, PS2PP_KIND_MX, /* MX510 */ 236 { 100, PS2PP_KIND_MX, /* MX510 */
236 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 237 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
237 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN }, 238 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN },
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index b4898d8a68e2..6d9ec9ab1b90 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -68,15 +68,19 @@ struct trackpoint_attr_data {
68 size_t field_offset; 68 size_t field_offset;
69 unsigned char command; 69 unsigned char command;
70 unsigned char mask; 70 unsigned char mask;
71 unsigned char inverted;
71}; 72};
72 73
73static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) 74static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
74{ 75{
75 struct trackpoint_data *tp = psmouse->private; 76 struct trackpoint_data *tp = psmouse->private;
76 struct trackpoint_attr_data *attr = data; 77 struct trackpoint_attr_data *attr = data;
77 unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); 78 unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
79
80 if (attr->inverted)
81 value = !value;
78 82
79 return sprintf(buf, "%u\n", *field); 83 return sprintf(buf, "%u\n", value);
80} 84}
81 85
82static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, 86static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
@@ -120,6 +124,9 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
120 if (*rest || value > 1) 124 if (*rest || value > 1)
121 return -EINVAL; 125 return -EINVAL;
122 126
127 if (attr->inverted)
128 value = !value;
129
123 if (*field != value) { 130 if (*field != value) {
124 *field = value; 131 *field = value;
125 trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask); 132 trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask);
@@ -129,11 +136,12 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
129} 136}
130 137
131 138
132#define TRACKPOINT_BIT_ATTR(_name, _command, _mask) \ 139#define TRACKPOINT_BIT_ATTR(_name, _command, _mask, _inv) \
133 static struct trackpoint_attr_data trackpoint_attr_##_name = { \ 140 static struct trackpoint_attr_data trackpoint_attr_##_name = { \
134 .field_offset = offsetof(struct trackpoint_data, _name), \ 141 .field_offset = offsetof(struct trackpoint_data, _name), \
135 .command = _command, \ 142 .command = _command, \
136 .mask = _mask, \ 143 .mask = _mask, \
144 .inverted = _inv, \
137 }; \ 145 }; \
138 PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ 146 PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
139 &trackpoint_attr_##_name, \ 147 &trackpoint_attr_##_name, \
@@ -150,9 +158,9 @@ TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH);
150TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME); 158TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME);
151TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV); 159TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV);
152 160
153TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON); 161TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0);
154TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK); 162TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0);
155TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV); 163TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1);
156 164
157static struct attribute *trackpoint_attrs[] = { 165static struct attribute *trackpoint_attrs[] = {
158 &psmouse_attr_sensitivity.dattr.attr, 166 &psmouse_attr_sensitivity.dattr.attr,
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 9857d8b6ad66..050298b1a09d 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -78,7 +78,7 @@
78 78
79#define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */ 79#define TP_TOGGLE_MB 0x23 /* Disable/Enable Middle Button */
80#define TP_MASK_MB 0x01 80#define TP_MASK_MB 0x01
81#define TP_TOGGLE_EXT_DEV 0x23 /* Toggle external device */ 81#define TP_TOGGLE_EXT_DEV 0x23 /* Disable external device */
82#define TP_MASK_EXT_DEV 0x02 82#define TP_MASK_EXT_DEV 0x02
83#define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */ 83#define TP_TOGGLE_DRIFT 0x23 /* Drift Correction */
84#define TP_MASK_DRIFT 0x80 84#define TP_MASK_DRIFT 0x80
@@ -125,7 +125,7 @@
125#define TP_DEF_MB 0x00 125#define TP_DEF_MB 0x00
126#define TP_DEF_PTSON 0x00 126#define TP_DEF_PTSON 0x00
127#define TP_DEF_SKIPBACK 0x00 127#define TP_DEF_SKIPBACK 0x00
128#define TP_DEF_EXT_DEV 0x01 128#define TP_DEF_EXT_DEV 0x00 /* 0 means enabled */
129 129
130#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) 130#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
131 131
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 678a8599f9ff..4155197867a3 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_SERIO_RPCKBD) += rpckbd.o
13obj-$(CONFIG_SERIO_SA1111) += sa1111ps2.o 13obj-$(CONFIG_SERIO_SA1111) += sa1111ps2.o
14obj-$(CONFIG_SERIO_AMBAKMI) += ambakmi.o 14obj-$(CONFIG_SERIO_AMBAKMI) += ambakmi.o
15obj-$(CONFIG_SERIO_Q40KBD) += q40kbd.o 15obj-$(CONFIG_SERIO_Q40KBD) += q40kbd.o
16obj-$(CONFIG_SERIO_98KBD) += 98kbd-io.o
17obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o 16obj-$(CONFIG_SERIO_GSCPS2) += gscps2.o
18obj-$(CONFIG_HP_SDC) += hp_sdc.o 17obj-$(CONFIG_HP_SDC) += hp_sdc.o
19obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o 18obj-$(CONFIG_HIL_MLC) += hp_sdc_mlc.o hil_mlc.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index b45a45ca7cc9..8c12a974b411 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -48,10 +48,13 @@
48 48
49#define TS_POLL_PERIOD msecs_to_jiffies(10) 49#define TS_POLL_PERIOD msecs_to_jiffies(10)
50 50
51/* this driver doesn't aim at the peak continuous sample rate */
52#define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */)
53
51struct ts_event { 54struct ts_event {
52 /* For portability, we can't read 12 bit values using SPI (which 55 /* For portability, we can't read 12 bit values using SPI (which
53 * would make the controller deliver them as native byteorder u16 56 * would make the controller deliver them as native byteorder u16
54 * with msbs zeroed). Instead, we read them as two 8-byte values, 57 * with msbs zeroed). Instead, we read them as two 8-bit values,
55 * which need byteswapping then range adjustment. 58 * which need byteswapping then range adjustment.
56 */ 59 */
57 __be16 x; 60 __be16 x;
@@ -60,7 +63,7 @@ struct ts_event {
60}; 63};
61 64
62struct ads7846 { 65struct ads7846 {
63 struct input_dev input; 66 struct input_dev *input;
64 char phys[32]; 67 char phys[32];
65 68
66 struct spi_device *spi; 69 struct spi_device *spi;
@@ -68,6 +71,7 @@ struct ads7846 {
68 u16 vref_delay_usecs; 71 u16 vref_delay_usecs;
69 u16 x_plate_ohms; 72 u16 x_plate_ohms;
70 73
74 u8 read_x, read_y, read_z1, read_z2;
71 struct ts_event tc; 75 struct ts_event tc;
72 76
73 struct spi_transfer xfer[8]; 77 struct spi_transfer xfer[8];
@@ -117,10 +121,10 @@ struct ads7846 {
117#define READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \ 121#define READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \
118 | ADS_12_BIT | ADS_DFR) 122 | ADS_12_BIT | ADS_DFR)
119 123
120static const u8 read_y = READ_12BIT_DFR(y) | ADS_PD10_ADC_ON; 124#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON)
121static const u8 read_z1 = READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON; 125#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON)
122static const u8 read_z2 = READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON; 126#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON)
123static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */ 127#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_PDOWN) /* LAST */
124 128
125/* single-ended samples need to first power up reference voltage; 129/* single-ended samples need to first power up reference voltage;
126 * we leave both ADC and VREF powered 130 * we leave both ADC and VREF powered
@@ -128,8 +132,8 @@ static const u8 read_x = READ_12BIT_DFR(x) | ADS_PD10_PDOWN; /* LAST */
128#define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \ 132#define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \
129 | ADS_12_BIT | ADS_SER) 133 | ADS_12_BIT | ADS_SER)
130 134
131static const u8 ref_on = READ_12BIT_DFR(x) | ADS_PD10_ALL_ON; 135#define REF_ON (READ_12BIT_DFR(x) | ADS_PD10_ALL_ON)
132static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN; 136#define REF_OFF (READ_12BIT_DFR(y) | ADS_PD10_PDOWN)
133 137
134/*--------------------------------------------------------------------------*/ 138/*--------------------------------------------------------------------------*/
135 139
@@ -138,7 +142,9 @@ static const u8 ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN;
138 */ 142 */
139 143
140struct ser_req { 144struct ser_req {
145 u8 ref_on;
141 u8 command; 146 u8 command;
147 u8 ref_off;
142 u16 scratch; 148 u16 scratch;
143 __be16 sample; 149 __be16 sample;
144 struct spi_message msg; 150 struct spi_message msg;
@@ -152,7 +158,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
152 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 158 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL);
153 int status; 159 int status;
154 int sample; 160 int sample;
155 int i; 161 int i;
156 162
157 if (!req) 163 if (!req)
158 return -ENOMEM; 164 return -ENOMEM;
@@ -160,7 +166,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
160 INIT_LIST_HEAD(&req->msg.transfers); 166 INIT_LIST_HEAD(&req->msg.transfers);
161 167
162 /* activate reference, so it has time to settle; */ 168 /* activate reference, so it has time to settle; */
163 req->xfer[0].tx_buf = &ref_on; 169 req->ref_on = REF_ON;
170 req->xfer[0].tx_buf = &req->ref_on;
164 req->xfer[0].len = 1; 171 req->xfer[0].len = 1;
165 req->xfer[1].rx_buf = &req->scratch; 172 req->xfer[1].rx_buf = &req->scratch;
166 req->xfer[1].len = 2; 173 req->xfer[1].len = 2;
@@ -182,7 +189,8 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
182 /* REVISIT: take a few more samples, and compare ... */ 189 /* REVISIT: take a few more samples, and compare ... */
183 190
184 /* turn off reference */ 191 /* turn off reference */
185 req->xfer[4].tx_buf = &ref_off; 192 req->ref_off = REF_OFF;
193 req->xfer[4].tx_buf = &req->ref_off;
186 req->xfer[4].len = 1; 194 req->xfer[4].len = 1;
187 req->xfer[5].rx_buf = &req->scratch; 195 req->xfer[5].rx_buf = &req->scratch;
188 req->xfer[5].len = 2; 196 req->xfer[5].len = 2;
@@ -236,11 +244,12 @@ SHOW(vbatt)
236 244
237static void ads7846_rx(void *ads) 245static void ads7846_rx(void *ads)
238{ 246{
239 struct ads7846 *ts = ads; 247 struct ads7846 *ts = ads;
240 unsigned Rt; 248 struct input_dev *input_dev = ts->input;
241 unsigned sync = 0; 249 unsigned Rt;
242 u16 x, y, z1, z2; 250 unsigned sync = 0;
243 unsigned long flags; 251 u16 x, y, z1, z2;
252 unsigned long flags;
244 253
245 /* adjust: 12 bit samples (left aligned), built from 254 /* adjust: 12 bit samples (left aligned), built from
246 * two 8 bit values writen msb-first. 255 * two 8 bit values writen msb-first.
@@ -276,21 +285,21 @@ static void ads7846_rx(void *ads)
276 * won't notice that, even if nPENIRQ never fires ... 285 * won't notice that, even if nPENIRQ never fires ...
277 */ 286 */
278 if (!ts->pendown && Rt != 0) { 287 if (!ts->pendown && Rt != 0) {
279 input_report_key(&ts->input, BTN_TOUCH, 1); 288 input_report_key(input_dev, BTN_TOUCH, 1);
280 sync = 1; 289 sync = 1;
281 } else if (ts->pendown && Rt == 0) { 290 } else if (ts->pendown && Rt == 0) {
282 input_report_key(&ts->input, BTN_TOUCH, 0); 291 input_report_key(input_dev, BTN_TOUCH, 0);
283 sync = 1; 292 sync = 1;
284 } 293 }
285 294
286 if (Rt) { 295 if (Rt) {
287 input_report_abs(&ts->input, ABS_X, x); 296 input_report_abs(input_dev, ABS_X, x);
288 input_report_abs(&ts->input, ABS_Y, y); 297 input_report_abs(input_dev, ABS_Y, y);
289 input_report_abs(&ts->input, ABS_PRESSURE, Rt); 298 input_report_abs(input_dev, ABS_PRESSURE, Rt);
290 sync = 1; 299 sync = 1;
291 } 300 }
292 if (sync) 301 if (sync)
293 input_sync(&ts->input); 302 input_sync(input_dev);
294 303
295#ifdef VERBOSE 304#ifdef VERBOSE
296 if (Rt || ts->pendown) 305 if (Rt || ts->pendown)
@@ -396,9 +405,10 @@ static int ads7846_resume(struct spi_device *spi)
396static int __devinit ads7846_probe(struct spi_device *spi) 405static int __devinit ads7846_probe(struct spi_device *spi)
397{ 406{
398 struct ads7846 *ts; 407 struct ads7846 *ts;
408 struct input_dev *input_dev;
399 struct ads7846_platform_data *pdata = spi->dev.platform_data; 409 struct ads7846_platform_data *pdata = spi->dev.platform_data;
400 struct spi_transfer *x; 410 struct spi_transfer *x;
401 int i; 411 int err;
402 412
403 if (!spi->irq) { 413 if (!spi->irq) {
404 dev_dbg(&spi->dev, "no IRQ?\n"); 414 dev_dbg(&spi->dev, "no IRQ?\n");
@@ -411,9 +421,9 @@ static int __devinit ads7846_probe(struct spi_device *spi)
411 } 421 }
412 422
413 /* don't exceed max specified sample rate */ 423 /* don't exceed max specified sample rate */
414 if (spi->max_speed_hz > (125000 * 16)) { 424 if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) {
415 dev_dbg(&spi->dev, "f(sample) %d KHz?\n", 425 dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
416 (spi->max_speed_hz/16)/1000); 426 (spi->max_speed_hz/SAMPLE_BITS)/1000);
417 return -EINVAL; 427 return -EINVAL;
418 } 428 }
419 429
@@ -423,13 +433,18 @@ static int __devinit ads7846_probe(struct spi_device *spi)
423 * to discard the four garbage LSBs. 433 * to discard the four garbage LSBs.
424 */ 434 */
425 435
426 if (!(ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL))) 436 ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL);
427 return -ENOMEM; 437 input_dev = input_allocate_device();
438 if (!ts || !input_dev) {
439 err = -ENOMEM;
440 goto err_free_mem;
441 }
428 442
429 dev_set_drvdata(&spi->dev, ts); 443 dev_set_drvdata(&spi->dev, ts);
444 spi->dev.power.power_state = PMSG_ON;
430 445
431 ts->spi = spi; 446 ts->spi = spi;
432 spi->dev.power.power_state = PMSG_ON; 447 ts->input = input_dev;
433 448
434 init_timer(&ts->timer); 449 init_timer(&ts->timer);
435 ts->timer.data = (unsigned long) ts; 450 ts->timer.data = (unsigned long) ts;
@@ -439,70 +454,80 @@ static int __devinit ads7846_probe(struct spi_device *spi)
439 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; 454 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
440 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; 455 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
441 456
442 init_input_dev(&ts->input); 457 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id);
443 458
444 ts->input.dev = &spi->dev; 459 input_dev->name = "ADS784x Touchscreen";
445 ts->input.name = "ADS784x Touchscreen"; 460 input_dev->phys = ts->phys;
446 snprintf(ts->phys, sizeof ts->phys, "%s/input0", spi->dev.bus_id); 461 input_dev->cdev.dev = &spi->dev;
447 ts->input.phys = ts->phys;
448 462
449 ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 463 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
450 ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); 464 input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
451 input_set_abs_params(&ts->input, ABS_X, 465 input_set_abs_params(input_dev, ABS_X,
452 pdata->x_min ? : 0, 466 pdata->x_min ? : 0,
453 pdata->x_max ? : MAX_12BIT, 467 pdata->x_max ? : MAX_12BIT,
454 0, 0); 468 0, 0);
455 input_set_abs_params(&ts->input, ABS_Y, 469 input_set_abs_params(input_dev, ABS_Y,
456 pdata->y_min ? : 0, 470 pdata->y_min ? : 0,
457 pdata->y_max ? : MAX_12BIT, 471 pdata->y_max ? : MAX_12BIT,
458 0, 0); 472 0, 0);
459 input_set_abs_params(&ts->input, ABS_PRESSURE, 473 input_set_abs_params(input_dev, ABS_PRESSURE,
460 pdata->pressure_min, pdata->pressure_max, 0, 0); 474 pdata->pressure_min, pdata->pressure_max, 0, 0);
461 475
462 input_register_device(&ts->input);
463
464 /* set up the transfers to read touchscreen state; this assumes we 476 /* set up the transfers to read touchscreen state; this assumes we
465 * use formula #2 for pressure, not #3. 477 * use formula #2 for pressure, not #3.
466 */ 478 */
479 INIT_LIST_HEAD(&ts->msg.transfers);
467 x = ts->xfer; 480 x = ts->xfer;
468 481
469 /* y- still on; turn on only y+ (and ADC) */ 482 /* y- still on; turn on only y+ (and ADC) */
470 x->tx_buf = &read_y; 483 ts->read_y = READ_Y;
484 x->tx_buf = &ts->read_y;
471 x->len = 1; 485 x->len = 1;
486 spi_message_add_tail(x, &ts->msg);
487
472 x++; 488 x++;
473 x->rx_buf = &ts->tc.y; 489 x->rx_buf = &ts->tc.y;
474 x->len = 2; 490 x->len = 2;
475 x++; 491 spi_message_add_tail(x, &ts->msg);
476 492
477 /* turn y+ off, x- on; we'll use formula #2 */ 493 /* turn y+ off, x- on; we'll use formula #2 */
478 if (ts->model == 7846) { 494 if (ts->model == 7846) {
479 x->tx_buf = &read_z1; 495 x++;
496 ts->read_z1 = READ_Z1;
497 x->tx_buf = &ts->read_z1;
480 x->len = 1; 498 x->len = 1;
499 spi_message_add_tail(x, &ts->msg);
500
481 x++; 501 x++;
482 x->rx_buf = &ts->tc.z1; 502 x->rx_buf = &ts->tc.z1;
483 x->len = 2; 503 x->len = 2;
484 x++; 504 spi_message_add_tail(x, &ts->msg);
485 505
486 x->tx_buf = &read_z2; 506 x++;
507 ts->read_z2 = READ_Z2;
508 x->tx_buf = &ts->read_z2;
487 x->len = 1; 509 x->len = 1;
510 spi_message_add_tail(x, &ts->msg);
511
488 x++; 512 x++;
489 x->rx_buf = &ts->tc.z2; 513 x->rx_buf = &ts->tc.z2;
490 x->len = 2; 514 x->len = 2;
491 x++; 515 spi_message_add_tail(x, &ts->msg);
492 } 516 }
493 517
494 /* turn y- off, x+ on, then leave in lowpower */ 518 /* turn y- off, x+ on, then leave in lowpower */
495 x->tx_buf = &read_x; 519 x++;
520 ts->read_x = READ_X;
521 x->tx_buf = &ts->read_x;
496 x->len = 1; 522 x->len = 1;
523 spi_message_add_tail(x, &ts->msg);
524
497 x++; 525 x++;
498 x->rx_buf = &ts->tc.x; 526 x->rx_buf = &ts->tc.x;
499 x->len = 2; 527 x->len = 2;
500 x++; 528 CS_CHANGE(*x);
501 529 spi_message_add_tail(x, &ts->msg);
502 CS_CHANGE(x[-1]);
503 530
504 for (i = 0; i < x - ts->xfer; i++)
505 spi_message_add_tail(&ts->xfer[i], &ts->msg);
506 ts->msg.complete = ads7846_rx; 531 ts->msg.complete = ads7846_rx;
507 ts->msg.context = ts; 532 ts->msg.context = ts;
508 533
@@ -510,9 +535,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
510 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING, 535 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
511 spi->dev.bus_id, ts)) { 536 spi->dev.bus_id, ts)) {
512 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); 537 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
513 input_unregister_device(&ts->input); 538 err = -EBUSY;
514 kfree(ts); 539 goto err_free_mem;
515 return -EBUSY;
516 } 540 }
517 541
518 dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); 542 dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
@@ -534,7 +558,18 @@ static int __devinit ads7846_probe(struct spi_device *spi)
534 device_create_file(&spi->dev, &dev_attr_vbatt); 558 device_create_file(&spi->dev, &dev_attr_vbatt);
535 device_create_file(&spi->dev, &dev_attr_vaux); 559 device_create_file(&spi->dev, &dev_attr_vaux);
536 560
561 err = input_register_device(input_dev);
562 if (err)
563 goto err_free_irq;
564
537 return 0; 565 return 0;
566
567 err_free_irq:
568 free_irq(spi->irq, ts);
569 err_free_mem:
570 input_free_device(input_dev);
571 kfree(ts);
572 return err;
538} 573}
539 574
540static int __devexit ads7846_remove(struct spi_device *spi) 575static int __devexit ads7846_remove(struct spi_device *spi)
@@ -554,7 +589,7 @@ static int __devexit ads7846_remove(struct spi_device *spi)
554 device_remove_file(&spi->dev, &dev_attr_vbatt); 589 device_remove_file(&spi->dev, &dev_attr_vbatt);
555 device_remove_file(&spi->dev, &dev_attr_vaux); 590 device_remove_file(&spi->dev, &dev_attr_vaux);
556 591
557 input_unregister_device(&ts->input); 592 input_unregister_device(ts->input);
558 kfree(ts); 593 kfree(ts);
559 594
560 dev_dbg(&spi->dev, "unregistered touchscreen\n"); 595 dev_dbg(&spi->dev, "unregistered touchscreen\n");
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 3a32c59494f2..24e51d5e97fc 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -151,6 +151,7 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
151 kfree(buf); 151 kfree(buf);
152 return NULL; 152 return NULL;
153} 153}
154EXPORT_SYMBOL_GPL(smu_sat_get_sdb_partition);
154 155
155/* refresh the cache */ 156/* refresh the cache */
156static int wf_sat_read_cache(struct wf_sat *sat) 157static int wf_sat_read_cache(struct wf_sat *sat)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9a2c7605d49c..642a61b6d0a4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -452,8 +452,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
452 } else if (func == MPI_FUNCTION_EVENT_ACK) { 452 } else if (func == MPI_FUNCTION_EVENT_ACK) {
453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", 453 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n",
454 ioc->name)); 454 ioc->name));
455 } else if (func == MPI_FUNCTION_CONFIG || 455 } else if (func == MPI_FUNCTION_CONFIG) {
456 func == MPI_FUNCTION_TOOLBOX) {
457 CONFIGPARMS *pCfg; 456 CONFIGPARMS *pCfg;
458 unsigned long flags; 457 unsigned long flags;
459 458
@@ -5327,115 +5326,6 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5327} 5326}
5328 5327
5329/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5328/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5330/**
5331 * mpt_toolbox - Generic function to issue toolbox message
5332 * @ioc - Pointer to an adapter structure
5333 * @cfg - Pointer to a toolbox structure. Struct contains
5334 * action, page address, direction, physical address
5335 * and pointer to a configuration page header
5336 * Page header is updated.
5337 *
5338 * Returns 0 for success
5339 * -EPERM if not allowed due to ISR context
5340 * -EAGAIN if no msg frames currently available
5341 * -EFAULT for non-successful reply or no reply (timeout)
5342 */
5343int
5344mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5345{
5346 ToolboxIstwiReadWriteRequest_t *pReq;
5347 MPT_FRAME_HDR *mf;
5348 struct pci_dev *pdev;
5349 unsigned long flags;
5350 int rc;
5351 u32 flagsLength;
5352 int in_isr;
5353
5354 /* Prevent calling wait_event() (below), if caller happens
5355 * to be in ISR context, because that is fatal!
5356 */
5357 in_isr = in_interrupt();
5358 if (in_isr) {
5359 dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n",
5360 ioc->name));
5361 return -EPERM;
5362 }
5363
5364 /* Get and Populate a free Frame
5365 */
5366 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5367 dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n",
5368 ioc->name));
5369 return -EAGAIN;
5370 }
5371 pReq = (ToolboxIstwiReadWriteRequest_t *)mf;
5372 pReq->Tool = pCfg->action;
5373 pReq->Reserved = 0;
5374 pReq->ChainOffset = 0;
5375 pReq->Function = MPI_FUNCTION_TOOLBOX;
5376 pReq->Reserved1 = 0;
5377 pReq->Reserved2 = 0;
5378 pReq->MsgFlags = 0;
5379 pReq->Flags = pCfg->dir;
5380 pReq->BusNum = 0;
5381 pReq->Reserved3 = 0;
5382 pReq->NumAddressBytes = 0x01;
5383 pReq->Reserved4 = 0;
5384 pReq->DataLength = cpu_to_le16(0x04);
5385 pdev = ioc->pcidev;
5386 if (pdev->devfn & 1)
5387 pReq->DeviceAddr = 0xB2;
5388 else
5389 pReq->DeviceAddr = 0xB0;
5390 pReq->Addr1 = 0;
5391 pReq->Addr2 = 0;
5392 pReq->Addr3 = 0;
5393 pReq->Reserved5 = 0;
5394
5395 /* Add a SGE to the config request.
5396 */
5397
5398 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4;
5399
5400 mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr);
5401
5402 dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n",
5403 ioc->name, pReq->Tool));
5404
5405 /* Append pCfg pointer to end of mf
5406 */
5407 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5408
5409 /* Initalize the timer
5410 */
5411 init_timer(&pCfg->timer);
5412 pCfg->timer.data = (unsigned long) ioc;
5413 pCfg->timer.function = mpt_timer_expired;
5414 pCfg->wait_done = 0;
5415
5416 /* Set the timer; ensure 10 second minimum */
5417 if (pCfg->timeout < 10)
5418 pCfg->timer.expires = jiffies + HZ*10;
5419 else
5420 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5421
5422 /* Add to end of Q, set timer and then issue this command */
5423 spin_lock_irqsave(&ioc->FreeQlock, flags);
5424 list_add_tail(&pCfg->linkage, &ioc->configQ);
5425 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5426
5427 add_timer(&pCfg->timer);
5428 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5429 wait_event(mpt_waitq, pCfg->wait_done);
5430
5431 /* mf has been freed - do not access */
5432
5433 rc = pCfg->status;
5434
5435 return rc;
5436}
5437
5438/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5439/* 5329/*
5440 * mpt_timer_expired - Call back for timer process. 5330 * mpt_timer_expired - Call back for timer process.
5441 * Used only internal config functionality. 5331 * Used only internal config functionality.
@@ -6142,7 +6032,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6142 if (ioc->events && (ioc->eventTypes & ( 1 << event))) { 6032 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
6143 int idx; 6033 int idx;
6144 6034
6145 idx = ioc->eventContext % ioc->eventLogSize; 6035 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
6146 6036
6147 ioc->events[idx].event = event; 6037 ioc->events[idx].event = event;
6148 ioc->events[idx].eventContext = ioc->eventContext; 6038 ioc->events[idx].eventContext = ioc->eventContext;
@@ -6540,7 +6430,6 @@ EXPORT_SYMBOL(mpt_lan_index);
6540EXPORT_SYMBOL(mpt_stm_index); 6430EXPORT_SYMBOL(mpt_stm_index);
6541EXPORT_SYMBOL(mpt_HardResetHandler); 6431EXPORT_SYMBOL(mpt_HardResetHandler);
6542EXPORT_SYMBOL(mpt_config); 6432EXPORT_SYMBOL(mpt_config);
6543EXPORT_SYMBOL(mpt_toolbox);
6544EXPORT_SYMBOL(mpt_findImVolumes); 6433EXPORT_SYMBOL(mpt_findImVolumes);
6545EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6434EXPORT_SYMBOL(mpt_read_ioc_pg_3);
6546EXPORT_SYMBOL(mpt_alloc_fw_memory); 6435EXPORT_SYMBOL(mpt_alloc_fw_memory);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index ea2649ecad1f..723d54300953 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -616,6 +616,7 @@ typedef struct _MPT_ADAPTER
616 * increments by 32 bytes 616 * increments by 32 bytes
617 */ 617 */
618 int errata_flag_1064; 618 int errata_flag_1064;
619 int aen_event_read_flag; /* flag to indicate event log was read*/
619 u8 FirstWhoInit; 620 u8 FirstWhoInit;
620 u8 upload_fw; /* If set, do a fw upload */ 621 u8 upload_fw; /* If set, do a fw upload */
621 u8 reload_fw; /* Force a FW Reload on next reset */ 622 u8 reload_fw; /* Force a FW Reload on next reset */
@@ -1026,7 +1027,6 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
1026extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 1027extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
1027extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 1028extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
1028extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 1029extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1029extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 1030extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 1031extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 1032extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index bdf709987982..9b64e07400da 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -136,6 +136,12 @@ static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
136 */ 136 */
137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 137static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
138 138
139/*
140 * Event Handler function
141 */
142static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
143struct fasync_struct *async_queue=NULL;
144
139/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
140/* 146/*
141 * Scatter gather list (SGL) sizes and limits... 147 * Scatter gather list (SGL) sizes and limits...
@@ -385,18 +391,18 @@ static int mptctl_bus_reset(MPT_IOCTL *ioctl)
385 } 391 }
386 392
387 /* Now wait for the command to complete */ 393 /* Now wait for the command to complete */
388 ii = wait_event_interruptible_timeout(mptctl_wait, 394 ii = wait_event_timeout(mptctl_wait,
389 ioctl->wait_done == 1, 395 ioctl->wait_done == 1,
390 HZ*5 /* 5 second timeout */); 396 HZ*5 /* 5 second timeout */);
391 397
392 if(ii <=0 && (ioctl->wait_done != 1 )) { 398 if(ii <=0 && (ioctl->wait_done != 1 )) {
399 mpt_free_msg_frame(hd->ioc, mf);
393 ioctl->wait_done = 0; 400 ioctl->wait_done = 0;
394 retval = -1; /* return failure */ 401 retval = -1; /* return failure */
395 } 402 }
396 403
397mptctl_bus_reset_done: 404mptctl_bus_reset_done:
398 405
399 mpt_free_msg_frame(hd->ioc, mf);
400 mptctl_free_tm_flags(ioctl->ioc); 406 mptctl_free_tm_flags(ioctl->ioc);
401 return retval; 407 return retval;
402} 408}
@@ -472,6 +478,69 @@ mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
472} 478}
473 479
474/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 480/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
481/* ASYNC Event Notification Support */
482static int
483mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
484{
485 u8 event;
486
487 event = le32_to_cpu(pEvReply->Event) & 0xFF;
488
489 dctlprintk(("%s() called\n", __FUNCTION__));
490 if(async_queue == NULL)
491 return 1;
492
493 /* Raise SIGIO for persistent events.
494 * TODO - this define is not in MPI spec yet,
495 * but they plan to set it to 0x21
496 */
497 if (event == 0x21 ) {
498 ioc->aen_event_read_flag=1;
499 dctlprintk(("Raised SIGIO to application\n"));
500 devtprintk(("Raised SIGIO to application\n"));
501 kill_fasync(&async_queue, SIGIO, POLL_IN);
502 return 1;
503 }
504
505 /* This flag is set after SIGIO was raised, and
506 * remains set until the application has read
507 * the event log via ioctl=MPTEVENTREPORT
508 */
509 if(ioc->aen_event_read_flag)
510 return 1;
511
512 /* Signal only for the events that are
513 * requested for by the application
514 */
515 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
516 ioc->aen_event_read_flag=1;
517 dctlprintk(("Raised SIGIO to application\n"));
518 devtprintk(("Raised SIGIO to application\n"));
519 kill_fasync(&async_queue, SIGIO, POLL_IN);
520 }
521 return 1;
522}
523
524static int
525mptctl_fasync(int fd, struct file *filep, int mode)
526{
527 MPT_ADAPTER *ioc;
528
529 list_for_each_entry(ioc, &ioc_list, list)
530 ioc->aen_event_read_flag=0;
531
532 dctlprintk(("%s() called\n", __FUNCTION__));
533 return fasync_helper(fd, filep, mode, &async_queue);
534}
535
536static int
537mptctl_release(struct inode *inode, struct file *filep)
538{
539 dctlprintk(("%s() called\n", __FUNCTION__));
540 return fasync_helper(-1, filep, 0, &async_queue);
541}
542
543/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
475/* 544/*
476 * MPT ioctl handler 545 * MPT ioctl handler
477 * cmd - specify the particular IOCTL command to be issued 546 * cmd - specify the particular IOCTL command to be issued
@@ -674,22 +743,23 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
674 u16 iocstat; 743 u16 iocstat;
675 pFWDownloadReply_t ReplyMsg = NULL; 744 pFWDownloadReply_t ReplyMsg = NULL;
676 745
677 dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); 746 dctlprintk(("mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id));
678 747
679 dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); 748 dctlprintk(("DbG: kfwdl.bufp = %p\n", ufwbuf));
680 dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); 749 dctlprintk(("DbG: kfwdl.fwlen = %d\n", (int)fwlen));
681 dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); 750 dctlprintk(("DbG: kfwdl.ioc = %04xh\n", ioc));
682 751
683 if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { 752 if (mpt_verify_adapter(ioc, &iocp) < 0) {
684 dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n", 753 dctlprintk(("ioctl_fwdl - ioc%d not found!\n",
685 __FILE__, __LINE__, ioc)); 754 ioc));
686 return -ENODEV; /* (-6) No such device or address */ 755 return -ENODEV; /* (-6) No such device or address */
687 } 756 } else {
688 757
689 /* Valid device. Get a message frame and construct the FW download message. 758 /* Valid device. Get a message frame and construct the FW download message.
690 */ 759 */
691 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) 760 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
692 return -EAGAIN; 761 return -EAGAIN;
762 }
693 dlmsg = (FWDownload_t*) mf; 763 dlmsg = (FWDownload_t*) mf;
694 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; 764 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
695 sgOut = (char *) (ptsge + 1); 765 sgOut = (char *) (ptsge + 1);
@@ -702,7 +772,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
702 dlmsg->ChainOffset = 0; 772 dlmsg->ChainOffset = 0;
703 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; 773 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
704 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; 774 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
705 dlmsg->MsgFlags = 0; 775 if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
776 dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
777 else
778 dlmsg->MsgFlags = 0;
779
706 780
707 /* Set up the Transaction SGE. 781 /* Set up the Transaction SGE.
708 */ 782 */
@@ -754,7 +828,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
754 goto fwdl_out; 828 goto fwdl_out;
755 } 829 }
756 830
757 dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); 831 dctlprintk(("DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags));
758 832
759 /* 833 /*
760 * Parse SG list, copying sgl itself, 834 * Parse SG list, copying sgl itself,
@@ -803,11 +877,11 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
803 /* 877 /*
804 * Finally, perform firmware download. 878 * Finally, perform firmware download.
805 */ 879 */
806 iocp->ioctl->wait_done = 0; 880 ReplyMsg = NULL;
807 mpt_put_msg_frame(mptctl_id, iocp, mf); 881 mpt_put_msg_frame(mptctl_id, iocp, mf);
808 882
809 /* Now wait for the command to complete */ 883 /* Now wait for the command to complete */
810 ret = wait_event_interruptible_timeout(mptctl_wait, 884 ret = wait_event_timeout(mptctl_wait,
811 iocp->ioctl->wait_done == 1, 885 iocp->ioctl->wait_done == 1,
812 HZ*60); 886 HZ*60);
813 887
@@ -1145,7 +1219,9 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1145 /* Fill in the data and return the structure to the calling 1219 /* Fill in the data and return the structure to the calling
1146 * program 1220 * program
1147 */ 1221 */
1148 if (ioc->bus_type == FC) 1222 if (ioc->bus_type == SAS)
1223 karg->adapterType = MPT_IOCTL_INTERFACE_SAS;
1224 else if (ioc->bus_type == FC)
1149 karg->adapterType = MPT_IOCTL_INTERFACE_FC; 1225 karg->adapterType = MPT_IOCTL_INTERFACE_FC;
1150 else 1226 else
1151 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; 1227 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
@@ -1170,12 +1246,11 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1170 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1246 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1171 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1247 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1172 } else if (cim_rev == 2) { 1248 } else if (cim_rev == 2) {
1173 /* Get the PCI bus, device, function and segment ID numbers 1249 /* Get the PCI bus, device, function and segment ID numbers
1174 for the IOC */ 1250 for the IOC */
1175 karg->pciInfo.u.bits.busNumber = pdev->bus->number; 1251 karg->pciInfo.u.bits.busNumber = pdev->bus->number;
1176 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); 1252 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1177 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); 1253 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1178 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1179 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); 1254 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
1180 } 1255 }
1181 1256
@@ -1500,7 +1575,7 @@ mptctl_eventquery (unsigned long arg)
1500 return -ENODEV; 1575 return -ENODEV;
1501 } 1576 }
1502 1577
1503 karg.eventEntries = ioc->eventLogSize; 1578 karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
1504 karg.eventTypes = ioc->eventTypes; 1579 karg.eventTypes = ioc->eventTypes;
1505 1580
1506 /* Copy the data from kernel memory to user memory 1581 /* Copy the data from kernel memory to user memory
@@ -1550,7 +1625,6 @@ mptctl_eventenable (unsigned long arg)
1550 memset(ioc->events, 0, sz); 1625 memset(ioc->events, 0, sz);
1551 ioc->alloc_total += sz; 1626 ioc->alloc_total += sz;
1552 1627
1553 ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE;
1554 ioc->eventContext = 0; 1628 ioc->eventContext = 0;
1555 } 1629 }
1556 1630
@@ -1590,7 +1664,7 @@ mptctl_eventreport (unsigned long arg)
1590 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); 1664 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
1591 1665
1592 1666
1593 max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; 1667 max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
1594 1668
1595 /* If fewer than 1 event is requested, there must have 1669 /* If fewer than 1 event is requested, there must have
1596 * been some type of error. 1670 * been some type of error.
@@ -1598,6 +1672,9 @@ mptctl_eventreport (unsigned long arg)
1598 if ((max < 1) || !ioc->events) 1672 if ((max < 1) || !ioc->events)
1599 return -ENODATA; 1673 return -ENODATA;
1600 1674
1675 /* reset this flag so SIGIO can restart */
1676 ioc->aen_event_read_flag=0;
1677
1601 /* Copy the data from kernel memory to user memory 1678 /* Copy the data from kernel memory to user memory
1602 */ 1679 */
1603 numBytes = max * sizeof(MPT_IOCTL_EVENTS); 1680 numBytes = max * sizeof(MPT_IOCTL_EVENTS);
@@ -1817,6 +1894,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1817 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 1894 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1818 case MPI_FUNCTION_FW_DOWNLOAD: 1895 case MPI_FUNCTION_FW_DOWNLOAD:
1819 case MPI_FUNCTION_FC_PRIMITIVE_SEND: 1896 case MPI_FUNCTION_FC_PRIMITIVE_SEND:
1897 case MPI_FUNCTION_TOOLBOX:
1898 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
1820 break; 1899 break;
1821 1900
1822 case MPI_FUNCTION_SCSI_IO_REQUEST: 1901 case MPI_FUNCTION_SCSI_IO_REQUEST:
@@ -1837,7 +1916,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1837 goto done_free_mem; 1916 goto done_free_mem;
1838 } 1917 }
1839 1918
1840 pScsiReq->MsgFlags = mpt_msg_flags(); 1919 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1920 pScsiReq->MsgFlags |= mpt_msg_flags();
1921
1841 1922
1842 /* verify that app has not requested 1923 /* verify that app has not requested
1843 * more sense data than driver 1924 * more sense data than driver
@@ -1888,6 +1969,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1888 } 1969 }
1889 break; 1970 break;
1890 1971
1972 case MPI_FUNCTION_SMP_PASSTHROUGH:
1973 /* Check mf->PassthruFlags to determine if
1974 * transfer is ImmediateMode or not.
1975 * Immediate mode returns data in the ReplyFrame.
1976 * Else, we are sending request and response data
1977 * in two SGLs at the end of the mf.
1978 */
1979 break;
1980
1981 case MPI_FUNCTION_SATA_PASSTHROUGH:
1982 if (!ioc->sh) {
1983 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1984 "SCSI driver is not loaded. \n",
1985 __FILE__, __LINE__);
1986 rc = -EFAULT;
1987 goto done_free_mem;
1988 }
1989 break;
1990
1891 case MPI_FUNCTION_RAID_ACTION: 1991 case MPI_FUNCTION_RAID_ACTION:
1892 /* Just add a SGE 1992 /* Just add a SGE
1893 */ 1993 */
@@ -1900,7 +2000,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1900 int scsidir = MPI_SCSIIO_CONTROL_READ; 2000 int scsidir = MPI_SCSIIO_CONTROL_READ;
1901 int dataSize; 2001 int dataSize;
1902 2002
1903 pScsiReq->MsgFlags = mpt_msg_flags(); 2003 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
2004 pScsiReq->MsgFlags |= mpt_msg_flags();
2005
1904 2006
1905 /* verify that app has not requested 2007 /* verify that app has not requested
1906 * more sense data than driver 2008 * more sense data than driver
@@ -2130,7 +2232,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2130 2232
2131 /* Now wait for the command to complete */ 2233 /* Now wait for the command to complete */
2132 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2234 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2133 timeout = wait_event_interruptible_timeout(mptctl_wait, 2235 timeout = wait_event_timeout(mptctl_wait,
2134 ioc->ioctl->wait_done == 1, 2236 ioc->ioctl->wait_done == 1,
2135 HZ*timeout); 2237 HZ*timeout);
2136 2238
@@ -2246,13 +2348,16 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2246 hp_host_info_t __user *uarg = (void __user *) arg; 2348 hp_host_info_t __user *uarg = (void __user *) arg;
2247 MPT_ADAPTER *ioc; 2349 MPT_ADAPTER *ioc;
2248 struct pci_dev *pdev; 2350 struct pci_dev *pdev;
2249 char *pbuf; 2351 char *pbuf=NULL;
2250 dma_addr_t buf_dma; 2352 dma_addr_t buf_dma;
2251 hp_host_info_t karg; 2353 hp_host_info_t karg;
2252 CONFIGPARMS cfg; 2354 CONFIGPARMS cfg;
2253 ConfigPageHeader_t hdr; 2355 ConfigPageHeader_t hdr;
2254 int iocnum; 2356 int iocnum;
2255 int rc, cim_rev; 2357 int rc, cim_rev;
2358 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2359 MPT_FRAME_HDR *mf = NULL;
2360 MPIHeader_t *mpi_hdr;
2256 2361
2257 dctlprintk((": mptctl_hp_hostinfo called.\n")); 2362 dctlprintk((": mptctl_hp_hostinfo called.\n"));
2258 /* Reset long to int. Should affect IA64 and SPARC only 2363 /* Reset long to int. Should affect IA64 and SPARC only
@@ -2370,7 +2475,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2370 2475
2371 karg.base_io_addr = pci_resource_start(pdev, 0); 2476 karg.base_io_addr = pci_resource_start(pdev, 0);
2372 2477
2373 if (ioc->bus_type == FC) 2478 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2374 karg.bus_phys_width = HP_BUS_WIDTH_UNK; 2479 karg.bus_phys_width = HP_BUS_WIDTH_UNK;
2375 else 2480 else
2376 karg.bus_phys_width = HP_BUS_WIDTH_16; 2481 karg.bus_phys_width = HP_BUS_WIDTH_16;
@@ -2388,20 +2493,67 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2388 } 2493 }
2389 } 2494 }
2390 2495
2391 cfg.pageAddr = 0; 2496 /*
2392 cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; 2497 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2393 cfg.dir = MPI_TB_ISTWI_FLAGS_READ; 2498 */
2394 cfg.timeout = 10; 2499 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2500 dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
2501 ioc->name,__FUNCTION__));
2502 goto out;
2503 }
2504
2505 IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
2506 mpi_hdr = (MPIHeader_t *) mf;
2507 memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
2508 IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
2509 IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
2510 IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
2511 IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
2512 IstwiRWRequest->NumAddressBytes = 0x01;
2513 IstwiRWRequest->DataLength = cpu_to_le16(0x04);
2514 if (pdev->devfn & 1)
2515 IstwiRWRequest->DeviceAddr = 0xB2;
2516 else
2517 IstwiRWRequest->DeviceAddr = 0xB0;
2518
2395 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2519 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2396 if (pbuf) { 2520 if (!pbuf)
2397 cfg.physAddr = buf_dma; 2521 goto out;
2398 if ((mpt_toolbox(ioc, &cfg)) == 0) { 2522 mpt_add_sge((char *)&IstwiRWRequest->SGL,
2399 karg.rsvd = *(u32 *)pbuf; 2523 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2400 } 2524
2401 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2525 ioc->ioctl->wait_done = 0;
2402 pbuf = NULL; 2526 mpt_put_msg_frame(mptctl_id, ioc, mf);
2527
2528 rc = wait_event_timeout(mptctl_wait,
2529 ioc->ioctl->wait_done == 1,
2530 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */);
2531
2532 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) {
2533 /*
2534 * Now we need to reset the board
2535 */
2536 mpt_free_msg_frame(ioc, mf);
2537 mptctl_timeout_expired(ioc->ioctl);
2538 goto out;
2403 } 2539 }
2404 2540
2541 /*
2542 *ISTWI Data Definition
2543 * pbuf[0] = FW_VERSION = 0x4
2544 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on
2545 * the config, you should be seeing one out of these three values
2546 * pbuf[2] = Drive Installed Map = bit pattern depend on which
2547 * bays have drives in them
2548 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2549 */
2550 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID)
2551 karg.rsvd = *(u32 *)pbuf;
2552
2553 out:
2554 if (pbuf)
2555 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2556
2405 /* Copy the data from kernel memory to user memory 2557 /* Copy the data from kernel memory to user memory
2406 */ 2558 */
2407 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { 2559 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
@@ -2459,7 +2611,7 @@ mptctl_hp_targetinfo(unsigned long arg)
2459 2611
2460 /* There is nothing to do for FCP parts. 2612 /* There is nothing to do for FCP parts.
2461 */ 2613 */
2462 if (ioc->bus_type == FC) 2614 if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
2463 return 0; 2615 return 0;
2464 2616
2465 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) 2617 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
@@ -2569,6 +2721,8 @@ mptctl_hp_targetinfo(unsigned long arg)
2569static struct file_operations mptctl_fops = { 2721static struct file_operations mptctl_fops = {
2570 .owner = THIS_MODULE, 2722 .owner = THIS_MODULE,
2571 .llseek = no_llseek, 2723 .llseek = no_llseek,
2724 .release = mptctl_release,
2725 .fasync = mptctl_fasync,
2572 .unlocked_ioctl = mptctl_ioctl, 2726 .unlocked_ioctl = mptctl_ioctl,
2573#ifdef CONFIG_COMPAT 2727#ifdef CONFIG_COMPAT
2574 .compat_ioctl = compat_mpctl_ioctl, 2728 .compat_ioctl = compat_mpctl_ioctl,
@@ -2813,6 +2967,11 @@ static int __init mptctl_init(void)
2813 /* FIXME! */ 2967 /* FIXME! */
2814 } 2968 }
2815 2969
2970 if (mpt_event_register(mptctl_id, mptctl_event_process) == 0) {
2971 devtprintk((KERN_INFO MYNAM
2972 ": Registered for IOC event notifications\n"));
2973 }
2974
2816 return 0; 2975 return 0;
2817 2976
2818out_fail: 2977out_fail:
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index 518996e03481..a2f8a97992e6 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -169,8 +169,10 @@ struct mpt_ioctl_pci_info2 {
169 * Read only. 169 * Read only.
170 * Data starts at offset 0xC 170 * Data starts at offset 0xC
171 */ 171 */
172#define MPT_IOCTL_INTERFACE_FC (0x01)
173#define MPT_IOCTL_INTERFACE_SCSI (0x00) 172#define MPT_IOCTL_INTERFACE_SCSI (0x00)
173#define MPT_IOCTL_INTERFACE_FC (0x01)
174#define MPT_IOCTL_INTERFACE_FC_IP (0x02)
175#define MPT_IOCTL_INTERFACE_SAS (0x03)
174#define MPT_IOCTL_VERSION_LENGTH (32) 176#define MPT_IOCTL_VERSION_LENGTH (32)
175 177
176struct mpt_ioctl_iocinfo { 178struct mpt_ioctl_iocinfo {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 05789e505464..4fee6befc93d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2489,7 +2489,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2489 int idx; 2489 int idx;
2490 MPT_ADAPTER *ioc = hd->ioc; 2490 MPT_ADAPTER *ioc = hd->ioc;
2491 2491
2492 idx = ioc->eventContext % ioc->eventLogSize; 2492 idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; 2493 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
2494 ioc->events[idx].eventContext = ioc->eventContext; 2494 ioc->events[idx].eventContext = ioc->eventContext;
2495 2495
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 37ee7f8dc82f..9fef29d978b5 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -97,6 +97,13 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
97 if (data->flags & MMC_DATA_READ) { 97 if (data->flags & MMC_DATA_READ) {
98 datactrl |= MCI_DPSM_DIRECTION; 98 datactrl |= MCI_DPSM_DIRECTION;
99 irqmask = MCI_RXFIFOHALFFULLMASK; 99 irqmask = MCI_RXFIFOHALFFULLMASK;
100
101 /*
102 * If we have less than a FIFOSIZE of bytes to transfer,
103 * trigger a PIO interrupt as soon as any data is available.
104 */
105 if (host->size < MCI_FIFOSIZE)
106 irqmask |= MCI_RXDATAAVLBLMASK;
100 } else { 107 } else {
101 /* 108 /*
102 * We don't actually need to include "FIFO empty" here 109 * We don't actually need to include "FIFO empty" here
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 47c72a63dfe1..e45a8f959719 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2020,8 +2020,8 @@ config SIS190
2020 will be called sis190. This is recommended. 2020 will be called sis190. This is recommended.
2021 2021
2022config SKGE 2022config SKGE
2023 tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" 2023 tristate "New SysKonnect GigaEthernet support"
2024 depends on PCI && EXPERIMENTAL 2024 depends on PCI
2025 select CRC32 2025 select CRC32
2026 ---help--- 2026 ---help---
2027 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx 2027 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
@@ -2082,7 +2082,6 @@ config SK98LIN
2082 - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter 2082 - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
2083 - Allied Telesyn AT-2971T Gigabit Ethernet Adapter 2083 - Allied Telesyn AT-2971T Gigabit Ethernet Adapter
2084 - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 2084 - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
2085 - DGE-530T Gigabit Ethernet Adapter
2086 - EG1032 v2 Instant Gigabit Network Adapter 2085 - EG1032 v2 Instant Gigabit Network Adapter
2087 - EG1064 v2 Instant Gigabit Network Adapter 2086 - EG1064 v2 Instant Gigabit Network Adapter
2088 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) 2087 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e0f51afec778..bcf9f17daf0d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1581,6 +1581,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1581 printk(KERN_INFO DRV_NAME 1581 printk(KERN_INFO DRV_NAME
1582 ": %s: %s not enslaved\n", 1582 ": %s: %s not enslaved\n",
1583 bond_dev->name, slave_dev->name); 1583 bond_dev->name, slave_dev->name);
1584 write_unlock_bh(&bond->lock);
1584 return -EINVAL; 1585 return -EINVAL;
1585 } 1586 }
1586 1587
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b420182eec4b..ed4bc91638d2 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1791,6 +1791,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1791 goto out; 1791 goto out;
1792 } 1792 }
1793 1793
1794 pci_set_drvdata(pdev, dev);
1795
1794 tp = netdev_priv(dev); 1796 tp = netdev_priv(dev);
1795 ioaddr = tp->mmio_addr; 1797 ioaddr = tp->mmio_addr;
1796 1798
@@ -1827,8 +1829,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1827 if (rc < 0) 1829 if (rc < 0)
1828 goto err_remove_mii; 1830 goto err_remove_mii;
1829 1831
1830 pci_set_drvdata(pdev, dev);
1831
1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " 1832 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 1833 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1834 pci_name(pdev), sis_chip_info[ent->driver_data].name, 1834 pci_name(pdev), sis_chip_info[ent->driver_data].name,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bf55a4cfb3d2..67fb19b8fde9 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1697,6 +1697,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1698 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1698 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1699 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1699 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1700
1700 if (skge->autoneg == AUTONEG_DISABLE) { 1701 if (skge->autoneg == AUTONEG_DISABLE) {
1701 reg = GM_GPCR_AU_ALL_DIS; 1702 reg = GM_GPCR_AU_ALL_DIS;
1702 gma_write16(hw, port, GM_GP_CTRL, 1703 gma_write16(hw, port, GM_GP_CTRL,
@@ -1704,16 +1705,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1704 1705
1705 switch (skge->speed) { 1706 switch (skge->speed) {
1706 case SPEED_1000: 1707 case SPEED_1000:
1708 reg &= ~GM_GPCR_SPEED_100;
1707 reg |= GM_GPCR_SPEED_1000; 1709 reg |= GM_GPCR_SPEED_1000;
1708 /* fallthru */ 1710 break;
1709 case SPEED_100: 1711 case SPEED_100:
1712 reg &= ~GM_GPCR_SPEED_1000;
1710 reg |= GM_GPCR_SPEED_100; 1713 reg |= GM_GPCR_SPEED_100;
1714 break;
1715 case SPEED_10:
1716 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1717 break;
1711 } 1718 }
1712 1719
1713 if (skge->duplex == DUPLEX_FULL) 1720 if (skge->duplex == DUPLEX_FULL)
1714 reg |= GM_GPCR_DUP_FULL; 1721 reg |= GM_GPCR_DUP_FULL;
1715 } else 1722 } else
1716 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1723 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1724
1717 switch (skge->flow_control) { 1725 switch (skge->flow_control) {
1718 case FLOW_MODE_NONE: 1726 case FLOW_MODE_NONE:
1719 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1727 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index cae2edf23004..bfeba5b9cd7a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -520,10 +520,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
520 520
521 switch (sky2->speed) { 521 switch (sky2->speed) {
522 case SPEED_1000: 522 case SPEED_1000:
523 reg &= ~GM_GPCR_SPEED_100;
523 reg |= GM_GPCR_SPEED_1000; 524 reg |= GM_GPCR_SPEED_1000;
524 /* fallthru */ 525 break;
525 case SPEED_100: 526 case SPEED_100:
527 reg &= ~GM_GPCR_SPEED_1000;
526 reg |= GM_GPCR_SPEED_100; 528 reg |= GM_GPCR_SPEED_100;
529 break;
530 case SPEED_10:
531 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
532 break;
527 } 533 }
528 534
529 if (sky2->duplex == DUPLEX_FULL) 535 if (sky2->duplex == DUPLEX_FULL)
@@ -1446,6 +1452,29 @@ static void sky2_link_up(struct sky2_port *sky2)
1446 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); 1452 sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1447 1453
1448 reg = gma_read16(hw, port, GM_GP_CTRL); 1454 reg = gma_read16(hw, port, GM_GP_CTRL);
1455 if (sky2->autoneg == AUTONEG_DISABLE) {
1456 reg |= GM_GPCR_AU_ALL_DIS;
1457
1458 /* Is write/read necessary? Copied from sky2_mac_init */
1459 gma_write16(hw, port, GM_GP_CTRL, reg);
1460 gma_read16(hw, port, GM_GP_CTRL);
1461
1462 switch (sky2->speed) {
1463 case SPEED_1000:
1464 reg &= ~GM_GPCR_SPEED_100;
1465 reg |= GM_GPCR_SPEED_1000;
1466 break;
1467 case SPEED_100:
1468 reg &= ~GM_GPCR_SPEED_1000;
1469 reg |= GM_GPCR_SPEED_100;
1470 break;
1471 case SPEED_10:
1472 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1473 break;
1474 }
1475 } else
1476 reg &= ~GM_GPCR_AU_ALL_DIS;
1477
1449 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE) 1478 if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1450 reg |= GM_GPCR_DUP_FULL; 1479 reg |= GM_GPCR_DUP_FULL;
1451 1480
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
index b306c7e4c793..88dfa2e01d6e 100644
--- a/drivers/net/tokenring/smctr.h
+++ b/drivers/net/tokenring/smctr.h
@@ -1042,7 +1042,7 @@ typedef struct net_local {
1042 __u16 functional_address[2]; 1042 __u16 functional_address[2];
1043 __u16 bitwise_group_address[2]; 1043 __u16 bitwise_group_address[2];
1044 1044
1045 __u8 *ptr_ucode; 1045 const __u8 *ptr_ucode;
1046 1046
1047 __u8 cleanup; 1047 __u8 cleanup;
1048 1048
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 98a76f10a0f7..dfc24016ba81 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1872,7 +1872,7 @@ static int atmel_set_encodeext(struct net_device *dev,
1872 struct atmel_private *priv = netdev_priv(dev); 1872 struct atmel_private *priv = netdev_priv(dev);
1873 struct iw_point *encoding = &wrqu->encoding; 1873 struct iw_point *encoding = &wrqu->encoding;
1874 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1874 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1875 int idx, key_len; 1875 int idx, key_len, alg = ext->alg, set_key = 1;
1876 1876
1877 /* Determine and validate the key index */ 1877 /* Determine and validate the key index */
1878 idx = encoding->flags & IW_ENCODE_INDEX; 1878 idx = encoding->flags & IW_ENCODE_INDEX;
@@ -1883,39 +1883,42 @@ static int atmel_set_encodeext(struct net_device *dev,
1883 } else 1883 } else
1884 idx = priv->default_key; 1884 idx = priv->default_key;
1885 1885
1886 if ((encoding->flags & IW_ENCODE_DISABLED) || 1886 if (encoding->flags & IW_ENCODE_DISABLED)
1887 ext->alg == IW_ENCODE_ALG_NONE) { 1887 alg = IW_ENCODE_ALG_NONE;
1888 priv->wep_is_on = 0;
1889 priv->encryption_level = 0;
1890 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
1891 }
1892 1888
1893 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) 1889 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
1894 priv->default_key = idx; 1890 priv->default_key = idx;
1891 set_key = ext->key_len > 0 ? 1 : 0;
1892 }
1895 1893
1896 /* Set the requested key */ 1894 if (set_key) {
1897 switch (ext->alg) { 1895 /* Set the requested key first */
1898 case IW_ENCODE_ALG_NONE: 1896 switch (alg) {
1899 break; 1897 case IW_ENCODE_ALG_NONE:
1900 case IW_ENCODE_ALG_WEP: 1898 priv->wep_is_on = 0;
1901 if (ext->key_len > 5) { 1899 priv->encryption_level = 0;
1902 priv->wep_key_len[idx] = 13; 1900 priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
1903 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128; 1901 break;
1904 priv->encryption_level = 2; 1902 case IW_ENCODE_ALG_WEP:
1905 } else if (ext->key_len > 0) { 1903 if (ext->key_len > 5) {
1906 priv->wep_key_len[idx] = 5; 1904 priv->wep_key_len[idx] = 13;
1907 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64; 1905 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
1908 priv->encryption_level = 1; 1906 priv->encryption_level = 2;
1909 } else { 1907 } else if (ext->key_len > 0) {
1908 priv->wep_key_len[idx] = 5;
1909 priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
1910 priv->encryption_level = 1;
1911 } else {
1912 return -EINVAL;
1913 }
1914 priv->wep_is_on = 1;
1915 memset(priv->wep_keys[idx], 0, 13);
1916 key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
1917 memcpy(priv->wep_keys[idx], ext->key, key_len);
1918 break;
1919 default:
1910 return -EINVAL; 1920 return -EINVAL;
1911 } 1921 }
1912 priv->wep_is_on = 1;
1913 memset(priv->wep_keys[idx], 0, 13);
1914 key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
1915 memcpy(priv->wep_keys[idx], ext->key, key_len);
1916 break;
1917 default:
1918 return -EINVAL;
1919 } 1922 }
1920 1923
1921 return -EINPROGRESS; 1924 return -EINPROGRESS;
@@ -3061,17 +3064,26 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3061 } 3064 }
3062 3065
3063 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) { 3066 if (status == C80211_MGMT_SC_Success && priv->wep_is_on) {
3067 int should_associate = 0;
3064 /* WEP */ 3068 /* WEP */
3065 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum) 3069 if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
3066 return; 3070 return;
3067 3071
3068 if (trans_seq_no == 0x0002 && 3072 if (system == C80211_MGMT_AAN_OPENSYSTEM) {
3069 auth->el_id == C80211_MGMT_ElementID_ChallengeText) { 3073 if (trans_seq_no == 0x0002) {
3070 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); 3074 should_associate = 1;
3071 return; 3075 }
3076 } else if (system == C80211_MGMT_AAN_SHAREDKEY) {
3077 if (trans_seq_no == 0x0002 &&
3078 auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
3079 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
3080 return;
3081 } else if (trans_seq_no == 0x0004) {
3082 should_associate = 1;
3083 }
3072 } 3084 }
3073 3085
3074 if (trans_seq_no == 0x0004) { 3086 if (should_associate) {
3075 if(priv->station_was_associated) { 3087 if(priv->station_was_associated) {
3076 atmel_enter_state(priv, STATION_STATE_REASSOCIATING); 3088 atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
3077 send_association_request(priv, 1); 3089 send_association_request(priv, 1);
@@ -3084,11 +3096,13 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3084 } 3096 }
3085 } 3097 }
3086 3098
3087 if (status == C80211_MGMT_SC_AuthAlgNotSupported) { 3099 if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
3088 /* Do opensystem first, then try sharedkey */ 3100 /* Do opensystem first, then try sharedkey */
3089 if (system == C80211_MGMT_AAN_OPENSYSTEM) { 3101 if (system == WLAN_AUTH_OPEN) {
3090 priv->CurrentAuthentTransactionSeqNum = 0x001; 3102 priv->CurrentAuthentTransactionSeqNum = 0x001;
3091 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 3103 priv->exclude_unencrypted = 1;
3104 send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
3105 return;
3092 } else if (priv->connect_to_any_BSS) { 3106 } else if (priv->connect_to_any_BSS) {
3093 int bss_index; 3107 int bss_index;
3094 3108
@@ -3439,10 +3453,13 @@ static void atmel_management_timer(u_long a)
3439 priv->AuthenticationRequestRetryCnt = 0; 3453 priv->AuthenticationRequestRetryCnt = 0;
3440 restart_search(priv); 3454 restart_search(priv);
3441 } else { 3455 } else {
3456 int auth = C80211_MGMT_AAN_OPENSYSTEM;
3442 priv->AuthenticationRequestRetryCnt++; 3457 priv->AuthenticationRequestRetryCnt++;
3443 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3458 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3444 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3459 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3445 send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0); 3460 if (priv->wep_is_on && priv->exclude_unencrypted)
3461 auth = C80211_MGMT_AAN_SHAREDKEY;
3462 send_authentication_request(priv, auth, NULL, 0);
3446 } 3463 }
3447 break; 3464 break;
3448 3465
@@ -3541,12 +3558,15 @@ static void atmel_command_irq(struct atmel_private *priv)
3541 priv->station_was_associated = priv->station_is_associated; 3558 priv->station_was_associated = priv->station_is_associated;
3542 atmel_enter_state(priv, STATION_STATE_READY); 3559 atmel_enter_state(priv, STATION_STATE_READY);
3543 } else { 3560 } else {
3561 int auth = C80211_MGMT_AAN_OPENSYSTEM;
3544 priv->AuthenticationRequestRetryCnt = 0; 3562 priv->AuthenticationRequestRetryCnt = 0;
3545 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING); 3563 atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
3546 3564
3547 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES); 3565 mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
3548 priv->CurrentAuthentTransactionSeqNum = 0x0001; 3566 priv->CurrentAuthentTransactionSeqNum = 0x0001;
3549 send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0); 3567 if (priv->wep_is_on && priv->exclude_unencrypted)
3568 auth = C80211_MGMT_AAN_SHAREDKEY;
3569 send_authentication_request(priv, auth, NULL, 0);
3550 } 3570 }
3551 return; 3571 return;
3552 } 3572 }
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index cf373625fc70..98122f3a4bc2 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -950,16 +950,8 @@ wv_82593_cmd(struct net_device * dev,
950static inline int 950static inline int
951wv_diag(struct net_device * dev) 951wv_diag(struct net_device * dev)
952{ 952{
953 int ret = FALSE; 953 return(wv_82593_cmd(dev, "wv_diag(): diagnose",
954 954 OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED));
955 if(wv_82593_cmd(dev, "wv_diag(): diagnose",
956 OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED))
957 ret = TRUE;
958
959#ifdef DEBUG_CONFIG_ERRORS
960 printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n");
961#endif
962 return(ret);
963} /* wv_diag */ 955} /* wv_diag */
964 956
965/*------------------------------------------------------------------*/ 957/*------------------------------------------------------------------*/
@@ -3604,8 +3596,8 @@ wv_82593_config(struct net_device * dev)
3604 cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */ 3596 cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */
3605 cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */ 3597 cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */
3606 cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */ 3598 cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */
3607 cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */ 3599 cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */
3608 cfblk.slottim_low = 0x20; /* 32 bit times slot time */ 3600 cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */
3609 cfblk.slottim_hi = 0x0; 3601 cfblk.slottim_hi = 0x0;
3610 cfblk.max_retr = 15; 3602 cfblk.max_retr = 15;
3611 cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */ 3603 cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 062fb100d94c..afc4e88551ad 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -359,7 +359,7 @@ ccw_device_set_online(struct ccw_device *cdev)
359 else 359 else
360 pr_debug("ccw_device_offline returned %d, device %s\n", 360 pr_debug("ccw_device_offline returned %d, device %s\n",
361 ret, cdev->dev.bus_id); 361 ret, cdev->dev.bus_id);
362 return (ret = 0) ? -ENODEV : ret; 362 return (ret == 0) ? -ENODEV : ret;
363} 363}
364 364
365static ssize_t 365static ssize_t
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d2a5b04d7cba..85b1020a1fcc 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -405,7 +405,7 @@ __ccw_device_disband_start(struct ccw_device *cdev)
405 cdev->private->iretry = 5; 405 cdev->private->iretry = 5;
406 cdev->private->imask >>= 1; 406 cdev->private->imask >>= 1;
407 } 407 }
408 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 408 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
409} 409}
410 410
411/* 411/*
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index dad4dd9887c9..6c762b43f921 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -317,7 +317,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 /* 317 /*
318 * We have ending status but no sense information. Do a basic sense. 318 * We have ending status but no sense information. Do a basic sense.
319 */ 319 */
320 sch = to_subchannel(cdev->dev.parent);
321 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; 320 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
322 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); 321 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
323 sch->sense_ccw.count = SENSE_MAX_COUNT; 322 sch->sense_ccw.count = SENSE_MAX_COUNT;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 6229ba4995ad..9cf88d7201d3 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -98,9 +98,9 @@ lcs_register_debug_facility(void)
98 return -ENOMEM; 98 return -ENOMEM;
99 } 99 }
100 debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); 100 debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
101 debug_set_level(lcs_dbf_setup, 4); 101 debug_set_level(lcs_dbf_setup, 2);
102 debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); 102 debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
103 debug_set_level(lcs_dbf_trace, 4); 103 debug_set_level(lcs_dbf_trace, 2);
104 return 0; 104 return 0;
105} 105}
106 106
@@ -1292,9 +1292,8 @@ lcs_set_multicast_list(struct net_device *dev)
1292 LCS_DBF_TEXT(4, trace, "setmulti"); 1292 LCS_DBF_TEXT(4, trace, "setmulti");
1293 card = (struct lcs_card *) dev->priv; 1293 card = (struct lcs_card *) dev->priv;
1294 1294
1295 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) { 1295 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1296 schedule_work(&card->kernel_thread_starter); 1296 schedule_work(&card->kernel_thread_starter);
1297 }
1298} 1297}
1299 1298
1300#endif /* CONFIG_IP_MULTICAST */ 1299#endif /* CONFIG_IP_MULTICAST */
@@ -1459,6 +1458,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
1459 lcs_release_buffer(channel, buffer); 1458 lcs_release_buffer(channel, buffer);
1460 card = (struct lcs_card *) 1459 card = (struct lcs_card *)
1461 ((char *) channel - offsetof(struct lcs_card, write)); 1460 ((char *) channel - offsetof(struct lcs_card, write));
1461 if (netif_queue_stopped(card->dev))
1462 netif_wake_queue(card->dev);
1462 spin_lock(&card->lock); 1463 spin_lock(&card->lock);
1463 card->tx_emitted--; 1464 card->tx_emitted--;
1464 if (card->tx_emitted <= 0 && card->tx_buffer != NULL) 1465 if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
@@ -1478,6 +1479,7 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1478 struct net_device *dev) 1479 struct net_device *dev)
1479{ 1480{
1480 struct lcs_header *header; 1481 struct lcs_header *header;
1482 int rc = 0;
1481 1483
1482 LCS_DBF_TEXT(5, trace, "hardxmit"); 1484 LCS_DBF_TEXT(5, trace, "hardxmit");
1483 if (skb == NULL) { 1485 if (skb == NULL) {
@@ -1492,10 +1494,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1492 card->stats.tx_carrier_errors++; 1494 card->stats.tx_carrier_errors++;
1493 return 0; 1495 return 0;
1494 } 1496 }
1495 if (netif_queue_stopped(dev) ) { 1497 netif_stop_queue(card->dev);
1496 card->stats.tx_dropped++; 1498 spin_lock(&card->lock);
1497 return -EBUSY;
1498 }
1499 if (card->tx_buffer != NULL && 1499 if (card->tx_buffer != NULL &&
1500 card->tx_buffer->count + sizeof(struct lcs_header) + 1500 card->tx_buffer->count + sizeof(struct lcs_header) +
1501 skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) 1501 skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
@@ -1506,7 +1506,8 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1506 card->tx_buffer = lcs_get_buffer(&card->write); 1506 card->tx_buffer = lcs_get_buffer(&card->write);
1507 if (card->tx_buffer == NULL) { 1507 if (card->tx_buffer == NULL) {
1508 card->stats.tx_dropped++; 1508 card->stats.tx_dropped++;
1509 return -EBUSY; 1509 rc = -EBUSY;
1510 goto out;
1510 } 1511 }
1511 card->tx_buffer->callback = lcs_txbuffer_cb; 1512 card->tx_buffer->callback = lcs_txbuffer_cb;
1512 card->tx_buffer->count = 0; 1513 card->tx_buffer->count = 0;
@@ -1518,13 +1519,18 @@ __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
1518 header->type = card->lan_type; 1519 header->type = card->lan_type;
1519 header->slot = card->portno; 1520 header->slot = card->portno;
1520 memcpy(header + 1, skb->data, skb->len); 1521 memcpy(header + 1, skb->data, skb->len);
1522 spin_unlock(&card->lock);
1521 card->stats.tx_bytes += skb->len; 1523 card->stats.tx_bytes += skb->len;
1522 card->stats.tx_packets++; 1524 card->stats.tx_packets++;
1523 dev_kfree_skb(skb); 1525 dev_kfree_skb(skb);
1524 if (card->tx_emitted <= 0) 1526 netif_wake_queue(card->dev);
1527 spin_lock(&card->lock);
1528 if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
1525 /* If this is the first tx buffer emit it immediately. */ 1529 /* If this is the first tx buffer emit it immediately. */
1526 __lcs_emit_txbuffer(card); 1530 __lcs_emit_txbuffer(card);
1527 return 0; 1531out:
1532 spin_unlock(&card->lock);
1533 return rc;
1528} 1534}
1529 1535
1530static int 1536static int
@@ -1535,9 +1541,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
1535 1541
1536 LCS_DBF_TEXT(5, trace, "pktxmit"); 1542 LCS_DBF_TEXT(5, trace, "pktxmit");
1537 card = (struct lcs_card *) dev->priv; 1543 card = (struct lcs_card *) dev->priv;
1538 spin_lock(&card->lock);
1539 rc = __lcs_start_xmit(card, skb, dev); 1544 rc = __lcs_start_xmit(card, skb, dev);
1540 spin_unlock(&card->lock);
1541 return rc; 1545 return rc;
1542} 1546}
1543 1547
@@ -2319,7 +2323,6 @@ __init lcs_init_module(void)
2319 PRINT_ERR("Initialization failed\n"); 2323 PRINT_ERR("Initialization failed\n");
2320 return rc; 2324 return rc;
2321 } 2325 }
2322
2323 return 0; 2326 return 0;
2324} 2327}
2325 2328
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 08e60ad43916..2fad5e40c2e4 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -95,7 +95,7 @@ do { \
95 */ 95 */
96#define LCS_ILLEGAL_OFFSET 0xffff 96#define LCS_ILLEGAL_OFFSET 0xffff
97#define LCS_IOBUFFERSIZE 0x5000 97#define LCS_IOBUFFERSIZE 0x5000
98#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */ 98#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */
99#define LCS_MAC_LENGTH 6 99#define LCS_MAC_LENGTH 6
100#define LCS_INVALID_PORT_NO -1 100#define LCS_INVALID_PORT_NO -1
101#define LCS_LANCMD_TIMEOUT_DEFAULT 5 101#define LCS_LANCMD_TIMEOUT_DEFAULT 5
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 9a064d4727ad..4df0fcd7b10b 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -1076,16 +1076,6 @@ qeth_get_qdio_q_format(struct qeth_card *card)
1076} 1076}
1077 1077
1078static inline int 1078static inline int
1079qeth_isdigit(char * buf)
1080{
1081 while (*buf) {
1082 if (!isdigit(*buf++))
1083 return 0;
1084 }
1085 return 1;
1086}
1087
1088static inline int
1089qeth_isxdigit(char * buf) 1079qeth_isxdigit(char * buf)
1090{ 1080{
1091 while (*buf) { 1081 while (*buf) {
@@ -1104,33 +1094,17 @@ qeth_ipaddr4_to_string(const __u8 *addr, char *buf)
1104static inline int 1094static inline int
1105qeth_string_to_ipaddr4(const char *buf, __u8 *addr) 1095qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
1106{ 1096{
1107 const char *start, *end; 1097 int count = 0, rc = 0;
1108 char abuf[4]; 1098 int in[4];
1109 char *tmp; 1099
1110 int len; 1100 rc = sscanf(buf, "%d.%d.%d.%d%n",
1111 int i; 1101 &in[0], &in[1], &in[2], &in[3], &count);
1112 1102 if (rc != 4 || count)
1113 start = buf; 1103 return -EINVAL;
1114 for (i = 0; i < 4; i++) { 1104 for (count = 0; count < 4; count++) {
1115 if (i == 3) { 1105 if (in[count] > 255)
1116 end = strchr(start,0xa);
1117 if (end)
1118 len = end - start;
1119 else
1120 len = strlen(start);
1121 }
1122 else {
1123 end = strchr(start, '.');
1124 len = end - start;
1125 }
1126 if ((len <= 0) || (len > 3))
1127 return -EINVAL;
1128 memset(abuf, 0, 4);
1129 strncpy(abuf, start, len);
1130 if (!qeth_isdigit(abuf))
1131 return -EINVAL; 1106 return -EINVAL;
1132 addr[i] = simple_strtoul(abuf, &tmp, 10); 1107 addr[count] = in[count];
1133 start = end + 1;
1134 } 1108 }
1135 return 0; 1109 return 0;
1136} 1110}
@@ -1149,36 +1123,44 @@ qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
1149static inline int 1123static inline int
1150qeth_string_to_ipaddr6(const char *buf, __u8 *addr) 1124qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
1151{ 1125{
1152 const char *start, *end; 1126 char *end, *start;
1153 u16 *tmp_addr; 1127 __u16 *in;
1154 char abuf[5]; 1128 char num[5];
1155 char *tmp; 1129 int num2, cnt, out, found, save_cnt;
1156 int len; 1130 unsigned short in_tmp[8] = {0, };
1157 int i; 1131
1158 1132 cnt = out = found = save_cnt = num2 = 0;
1159 tmp_addr = (u16 *)addr; 1133 end = start = (char *) buf;
1160 start = buf; 1134 in = (__u16 *) addr;
1161 for (i = 0; i < 8; i++) { 1135 memset(in, 0, 16);
1162 if (i == 7) { 1136 while (end) {
1163 end = strchr(start,0xa); 1137 end = strchr(end,':');
1164 if (end) 1138 if (end == NULL) {
1165 len = end - start; 1139 end = (char *)buf + (strlen(buf));
1166 else 1140 out = 1;
1167 len = strlen(start); 1141 }
1168 } 1142 if ((end - start)) {
1169 else { 1143 memset(num, 0, 5);
1170 end = strchr(start, ':'); 1144 memcpy(num, start, end - start);
1171 len = end - start; 1145 if (!qeth_isxdigit(num))
1146 return -EINVAL;
1147 sscanf(start, "%x", &num2);
1148 if (found)
1149 in_tmp[save_cnt++] = num2;
1150 else
1151 in[cnt++] = num2;
1152 if (out)
1153 break;
1154 } else {
1155 if (found)
1156 return -EINVAL;
1157 found = 1;
1172 } 1158 }
1173 if ((len <= 0) || (len > 4)) 1159 start = ++end;
1174 return -EINVAL; 1160 }
1175 memset(abuf, 0, 5); 1161 cnt = 7;
1176 strncpy(abuf, start, len); 1162 while (save_cnt)
1177 if (!qeth_isxdigit(abuf)) 1163 in[cnt--] = in_tmp[--save_cnt];
1178 return -EINVAL;
1179 tmp_addr[i] = simple_strtoul(abuf, &tmp, 16);
1180 start = end + 1;
1181 }
1182 return 0; 1164 return 0;
1183} 1165}
1184 1166
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index b02313127780..82cb4af2f0e7 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -59,8 +59,7 @@ qeth_eddp_free_context(struct qeth_eddp_context *ctx)
59 for (i = 0; i < ctx->num_pages; ++i) 59 for (i = 0; i < ctx->num_pages; ++i)
60 free_page((unsigned long)ctx->pages[i]); 60 free_page((unsigned long)ctx->pages[i]);
61 kfree(ctx->pages); 61 kfree(ctx->pages);
62 if (ctx->elements != NULL) 62 kfree(ctx->elements);
63 kfree(ctx->elements);
64 kfree(ctx); 63 kfree(ctx);
65} 64}
66 65
@@ -413,6 +412,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
413 412
414 QETH_DBF_TEXT(trace, 5, "eddpftcp"); 413 QETH_DBF_TEXT(trace, 5, "eddpftcp");
415 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 414 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
415 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
416 eddp->skb_offset += sizeof(struct ethhdr);
417#ifdef CONFIG_QETH_VLAN
418 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
419 eddp->skb_offset += VLAN_HLEN;
420#endif /* CONFIG_QETH_VLAN */
421 }
416 tcph = eddp->skb->h.th; 422 tcph = eddp->skb->h.th;
417 while (eddp->skb_offset < eddp->skb->len) { 423 while (eddp->skb_offset < eddp->skb->len) {
418 data_len = min((int)skb_shinfo(eddp->skb)->tso_size, 424 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
@@ -483,6 +489,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
483 return -ENOMEM; 489 return -ENOMEM;
484 } 490 }
485 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 491 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
492 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
486 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); 493 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
487#ifdef CONFIG_QETH_VLAN 494#ifdef CONFIG_QETH_VLAN
488 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { 495 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 410abeada6c4..dba7f7f02e79 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -516,7 +516,8 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
516 QETH_DBF_TEXT(setup, 3, "setoffl"); 516 QETH_DBF_TEXT(setup, 3, "setoffl");
517 QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); 517 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
518 518
519 netif_carrier_off(card->dev); 519 if (card->dev && netif_carrier_ok(card->dev))
520 netif_carrier_off(card->dev);
520 recover_flag = card->state; 521 recover_flag = card->state;
521 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){ 522 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
522 PRINT_WARN("Stopping card %s interrupted by user!\n", 523 PRINT_WARN("Stopping card %s interrupted by user!\n",
@@ -1679,6 +1680,7 @@ qeth_cmd_timeout(unsigned long data)
1679 spin_unlock_irqrestore(&reply->card->lock, flags); 1680 spin_unlock_irqrestore(&reply->card->lock, flags);
1680} 1681}
1681 1682
1683
1682static struct qeth_ipa_cmd * 1684static struct qeth_ipa_cmd *
1683qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1685qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1684{ 1686{
@@ -1699,7 +1701,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1699 QETH_CARD_IFNAME(card), 1701 QETH_CARD_IFNAME(card),
1700 card->info.chpid); 1702 card->info.chpid);
1701 card->lan_online = 0; 1703 card->lan_online = 0;
1702 netif_carrier_off(card->dev); 1704 if (card->dev && netif_carrier_ok(card->dev))
1705 netif_carrier_off(card->dev);
1703 return NULL; 1706 return NULL;
1704 case IPA_CMD_STARTLAN: 1707 case IPA_CMD_STARTLAN:
1705 PRINT_INFO("Link reestablished on %s " 1708 PRINT_INFO("Link reestablished on %s "
@@ -5562,7 +5565,7 @@ qeth_set_multicast_list(struct net_device *dev)
5562 if (card->info.type == QETH_CARD_TYPE_OSN) 5565 if (card->info.type == QETH_CARD_TYPE_OSN)
5563 return ; 5566 return ;
5564 5567
5565 QETH_DBF_TEXT(trace,3,"setmulti"); 5568 QETH_DBF_TEXT(trace, 3, "setmulti");
5566 qeth_delete_mc_addresses(card); 5569 qeth_delete_mc_addresses(card);
5567 if (card->options.layer2) { 5570 if (card->options.layer2) {
5568 qeth_layer2_add_multicast(card); 5571 qeth_layer2_add_multicast(card);
@@ -5579,7 +5582,6 @@ out:
5579 return; 5582 return;
5580 if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) 5583 if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0)
5581 schedule_work(&card->kernel_thread_starter); 5584 schedule_work(&card->kernel_thread_starter);
5582
5583} 5585}
5584 5586
5585static int 5587static int
@@ -7452,6 +7454,7 @@ qeth_softsetup_card(struct qeth_card *card)
7452 card->lan_online = 1; 7454 card->lan_online = 1;
7453 if (card->info.type==QETH_CARD_TYPE_OSN) 7455 if (card->info.type==QETH_CARD_TYPE_OSN)
7454 goto out; 7456 goto out;
7457 qeth_set_large_send(card, card->options.large_send);
7455 if (card->options.layer2) { 7458 if (card->options.layer2) {
7456 card->dev->features |= 7459 card->dev->features |=
7457 NETIF_F_HW_VLAN_FILTER | 7460 NETIF_F_HW_VLAN_FILTER |
@@ -7468,12 +7471,6 @@ qeth_softsetup_card(struct qeth_card *card)
7468#endif 7471#endif
7469 goto out; 7472 goto out;
7470 } 7473 }
7471 if ((card->options.large_send == QETH_LARGE_SEND_EDDP) ||
7472 (card->options.large_send == QETH_LARGE_SEND_TSO))
7473 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
7474 else
7475 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7476
7477 if ((rc = qeth_setadapter_parms(card))) 7474 if ((rc = qeth_setadapter_parms(card)))
7478 QETH_DBF_TEXT_(setup, 2, "2err%d", rc); 7475 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
7479 if ((rc = qeth_start_ipassists(card))) 7476 if ((rc = qeth_start_ipassists(card)))
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 4d7d47cf2394..a5f2ba9a8fdb 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -710,10 +710,9 @@ static inline void
710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 710_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
711 struct zfcp_adapter *adapter, 711 struct zfcp_adapter *adapter,
712 struct scsi_cmnd *scsi_cmnd, 712 struct scsi_cmnd *scsi_cmnd,
713 struct zfcp_fsf_req *new_fsf_req) 713 struct zfcp_fsf_req *fsf_req,
714 struct zfcp_fsf_req *old_fsf_req)
714{ 715{
715 struct zfcp_fsf_req *fsf_req =
716 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
717 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 716 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
718 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 717 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
719 unsigned long flags; 718 unsigned long flags;
@@ -727,19 +726,20 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
727 if (offset == 0) { 726 if (offset == 0) {
728 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 727 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
729 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); 728 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
730 if (scsi_cmnd->device) { 729 if (scsi_cmnd != NULL) {
731 rec->scsi_id = scsi_cmnd->device->id; 730 if (scsi_cmnd->device) {
732 rec->scsi_lun = scsi_cmnd->device->lun; 731 rec->scsi_id = scsi_cmnd->device->id;
732 rec->scsi_lun = scsi_cmnd->device->lun;
733 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode, &scsi_cmnd->cmnd,
738 min((int)scsi_cmnd->cmd_len,
739 ZFCP_DBF_SCSI_OPCODE));
740 rec->scsi_retries = scsi_cmnd->retries;
741 rec->scsi_allowed = scsi_cmnd->allowed;
733 } 742 }
734 rec->scsi_result = scsi_cmnd->result;
735 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
736 rec->scsi_serial = scsi_cmnd->serial_number;
737 memcpy(rec->scsi_opcode,
738 &scsi_cmnd->cmnd,
739 min((int)scsi_cmnd->cmd_len,
740 ZFCP_DBF_SCSI_OPCODE));
741 rec->scsi_retries = scsi_cmnd->retries;
742 rec->scsi_allowed = scsi_cmnd->allowed;
743 if (fsf_req != NULL) { 743 if (fsf_req != NULL) {
744 fcp_rsp = (struct fcp_rsp_iu *) 744 fcp_rsp = (struct fcp_rsp_iu *)
745 &(fsf_req->qtcb->bottom.io.fcp_rsp); 745 &(fsf_req->qtcb->bottom.io.fcp_rsp);
@@ -772,15 +772,8 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
772 rec->fsf_seqno = fsf_req->seq_no; 772 rec->fsf_seqno = fsf_req->seq_no;
773 rec->fsf_issued = fsf_req->issued; 773 rec->fsf_issued = fsf_req->issued;
774 } 774 }
775 if (new_fsf_req != NULL) { 775 rec->type.old_fsf_reqid =
776 rec->type.new_fsf_req.fsf_reqid = 776 (unsigned long) old_fsf_req;
777 (unsigned long)
778 new_fsf_req;
779 rec->type.new_fsf_req.fsf_seqno =
780 new_fsf_req->seq_no;
781 rec->type.new_fsf_req.fsf_issued =
782 new_fsf_req->issued;
783 }
784 } else { 777 } else {
785 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 778 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
786 dump->total_size = buflen; 779 dump->total_size = buflen;
@@ -801,19 +794,21 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
801inline void 794inline void
802zfcp_scsi_dbf_event_result(const char *tag, int level, 795zfcp_scsi_dbf_event_result(const char *tag, int level,
803 struct zfcp_adapter *adapter, 796 struct zfcp_adapter *adapter,
804 struct scsi_cmnd *scsi_cmnd) 797 struct scsi_cmnd *scsi_cmnd,
798 struct zfcp_fsf_req *fsf_req)
805{ 799{
806 _zfcp_scsi_dbf_event_common("rslt", 800 _zfcp_scsi_dbf_event_common("rslt", tag, level,
807 tag, level, adapter, scsi_cmnd, NULL); 801 adapter, scsi_cmnd, fsf_req, NULL);
808} 802}
809 803
810inline void 804inline void
811zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 805zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
812 struct scsi_cmnd *scsi_cmnd, 806 struct scsi_cmnd *scsi_cmnd,
813 struct zfcp_fsf_req *new_fsf_req) 807 struct zfcp_fsf_req *new_fsf_req,
808 struct zfcp_fsf_req *old_fsf_req)
814{ 809{
815 _zfcp_scsi_dbf_event_common("abrt", 810 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
816 tag, 1, adapter, scsi_cmnd, new_fsf_req); 811 adapter, scsi_cmnd, new_fsf_req, old_fsf_req);
817} 812}
818 813
819inline void 814inline void
@@ -823,7 +818,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct zfcp_adapter *adapter = unit->port->adapter; 818 struct zfcp_adapter *adapter = unit->port->adapter;
824 819
825 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 820 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
826 tag, 1, adapter, scsi_cmnd, NULL); 821 tag, 1, adapter, scsi_cmnd, NULL, NULL);
827} 822}
828 823
829static int 824static int
@@ -856,6 +851,10 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
856 rec->scsi_retries); 851 rec->scsi_retries);
857 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x", 852 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
858 rec->scsi_allowed); 853 rec->scsi_allowed);
854 if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
855 len += zfcp_dbf_view(out_buf + len, "old_fsf_reqid", "0x%0Lx",
856 rec->type.old_fsf_reqid);
857 }
859 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx", 858 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
860 rec->fsf_reqid); 859 rec->fsf_reqid);
861 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x", 860 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
@@ -883,21 +882,6 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
883 min((int)rec->type.fcp.sns_info_len, 882 min((int)rec->type.fcp.sns_info_len,
884 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, 883 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
885 rec->type.fcp.sns_info_len); 884 rec->type.fcp.sns_info_len);
886 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
887 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
888 rec->type.new_fsf_req.fsf_reqid);
889 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
890 rec->type.new_fsf_req.fsf_seqno);
891 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
892 rec->type.new_fsf_req.fsf_issued);
893 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
894 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
895 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
896 rec->type.new_fsf_req.fsf_reqid);
897 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
898 rec->type.new_fsf_req.fsf_seqno);
899 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
900 rec->type.new_fsf_req.fsf_issued);
901 } 885 }
902 886
903 len += sprintf(out_buf + len, "\n"); 887 len += sprintf(out_buf + len, "\n");
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e260d19fa717..7f551d66f47f 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -152,11 +152,6 @@ typedef u32 scsi_lun_t;
152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 152#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 153#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
154 154
155/* Retry 5 times every 2 second, then every minute */
156#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
157#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
158#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
159
160/* timeout value for "default timer" for fsf requests */ 155/* timeout value for "default timer" for fsf requests */
161#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 156#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
162 157
@@ -429,11 +424,7 @@ struct zfcp_scsi_dbf_record {
429 u32 fsf_seqno; 424 u32 fsf_seqno;
430 u64 fsf_issued; 425 u64 fsf_issued;
431 union { 426 union {
432 struct { 427 u64 old_fsf_reqid;
433 u64 fsf_reqid;
434 u32 fsf_seqno;
435 u64 fsf_issued;
436 } new_fsf_req;
437 struct { 428 struct {
438 u8 rsp_validity; 429 u8 rsp_validity;
439 u8 rsp_scsi_status; 430 u8 rsp_scsi_status;
@@ -915,8 +906,6 @@ struct zfcp_adapter {
915 wwn_t peer_wwnn; /* P2P peer WWNN */ 906 wwn_t peer_wwnn; /* P2P peer WWNN */
916 wwn_t peer_wwpn; /* P2P peer WWPN */ 907 wwn_t peer_wwpn; /* P2P peer WWPN */
917 u32 peer_d_id; /* P2P peer D_ID */ 908 u32 peer_d_id; /* P2P peer D_ID */
918 wwn_t physical_wwpn; /* WWPN of physical port */
919 u32 physical_s_id; /* local FC port ID */
920 struct ccw_device *ccw_device; /* S/390 ccw device */ 909 struct ccw_device *ccw_device; /* S/390 ccw device */
921 u8 fc_service_class; 910 u8 fc_service_class;
922 u32 hydra_version; /* Hydra version */ 911 u32 hydra_version; /* Hydra version */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index da947e662031..e3c4bdd29a60 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -2246,15 +2246,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2246{ 2246{
2247 int retval; 2247 int retval;
2248 2248
2249 if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2250 &erp_action->adapter->status)) &&
2251 (erp_action->adapter->adapter_features &
2252 FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2253 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2254 atomic_set(&erp_action->adapter->erp_counter, 0);
2255 return ZFCP_ERP_FAILED;
2256 }
2257
2258 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2249 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2259 if (retval == ZFCP_ERP_FAILED) 2250 if (retval == ZFCP_ERP_FAILED)
2260 return ZFCP_ERP_FAILED; 2251 return ZFCP_ERP_FAILED;
@@ -2266,13 +2257,6 @@ zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2266 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); 2257 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2267} 2258}
2268 2259
2269/*
2270 * function:
2271 *
2272 * purpose:
2273 *
2274 * returns:
2275 */
2276static int 2260static int
2277zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) 2261zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2278{ 2262{
@@ -2350,48 +2334,40 @@ static int
2350zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2334zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2351{ 2335{
2352 int ret; 2336 int ret;
2353 int retries; 2337 struct zfcp_adapter *adapter;
2354 int sleep;
2355 struct zfcp_adapter *adapter = erp_action->adapter;
2356 2338
2339 adapter = erp_action->adapter;
2357 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2340 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2358 2341
2359 retries = 0; 2342 write_lock(&adapter->erp_lock);
2360 do { 2343 zfcp_erp_action_to_running(erp_action);
2361 write_lock(&adapter->erp_lock); 2344 write_unlock(&adapter->erp_lock);
2362 zfcp_erp_action_to_running(erp_action);
2363 write_unlock(&adapter->erp_lock);
2364 zfcp_erp_timeout_init(erp_action);
2365 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2366 if (ret == -EOPNOTSUPP) {
2367 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2368 return ZFCP_ERP_SUCCEEDED;
2369 } else if (ret) {
2370 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2371 return ZFCP_ERP_FAILED;
2372 }
2373 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2374 2345
2375 down(&adapter->erp_ready_sem); 2346 zfcp_erp_timeout_init(erp_action);
2376 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2347 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2377 ZFCP_LOG_INFO("error: exchange of port data " 2348 if (ret == -EOPNOTSUPP) {
2378 "for adapter %s timed out\n", 2349 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
2379 zfcp_get_busid_by_adapter(adapter)); 2350 return ZFCP_ERP_SUCCEEDED;
2380 break; 2351 } else if (ret) {
2381 } 2352 debug_text_event(adapter->erp_dbf, 3, "a_xport_failed");
2382 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2353 return ZFCP_ERP_FAILED;
2383 &adapter->status)) 2354 }
2384 break; 2355 debug_text_event(adapter->erp_dbf, 6, "a_xport_ok");
2385 2356
2386 if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2357 ret = ZFCP_ERP_SUCCEEDED;
2387 sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2358 down(&adapter->erp_ready_sem);
2388 retries++; 2359 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2389 } else 2360 ZFCP_LOG_INFO("error: exchange port data timed out (adapter "
2390 sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2361 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2391 schedule_timeout(sleep); 2362 ret = ZFCP_ERP_FAILED;
2392 } while (1); 2363 }
2364 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) {
2365 ZFCP_LOG_INFO("error: exchange port data failed (adapter "
2366 "%s\n", zfcp_get_busid_by_adapter(adapter));
2367 ret = ZFCP_ERP_FAILED;
2368 }
2393 2369
2394 return ZFCP_ERP_SUCCEEDED; 2370 return ret;
2395} 2371}
2396 2372
2397/* 2373/*
@@ -3439,6 +3415,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3439 "(adapter %s, wwpn=0x%016Lx)\n", 3415 "(adapter %s, wwpn=0x%016Lx)\n",
3440 zfcp_get_busid_by_port(port), 3416 zfcp_get_busid_by_port(port),
3441 port->wwpn); 3417 port->wwpn);
3418 else
3419 scsi_flush_work(adapter->scsi_host);
3442 } 3420 }
3443 zfcp_port_put(port); 3421 zfcp_port_put(port);
3444 break; 3422 break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c1ba7cf1b496..700f5402a978 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -194,9 +194,10 @@ extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 194extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
195 195
196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 196extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
197 struct scsi_cmnd *); 197 struct scsi_cmnd *,
198 struct zfcp_fsf_req *);
198extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 199extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
199 struct scsi_cmnd *, 200 struct scsi_cmnd *, struct zfcp_fsf_req *,
200 struct zfcp_fsf_req *); 201 struct zfcp_fsf_req *);
201extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 202extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
202 struct scsi_cmnd *); 203 struct scsi_cmnd *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9f0cb3d820c0..662ec571d73b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -388,6 +388,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
388 case FSF_PROT_LINK_DOWN: 388 case FSF_PROT_LINK_DOWN:
389 zfcp_fsf_link_down_info_eval(adapter, 389 zfcp_fsf_link_down_info_eval(adapter,
390 &prot_status_qual->link_down_info); 390 &prot_status_qual->link_down_info);
391 zfcp_erp_adapter_reopen(adapter, 0);
391 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 392 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
392 break; 393 break;
393 394
@@ -558,10 +559,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
558 559
559 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 560 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
560 561
561 if (link_down == NULL) { 562 if (link_down == NULL)
562 zfcp_erp_adapter_reopen(adapter, 0); 563 goto out;
563 return;
564 }
565 564
566 switch (link_down->error_code) { 565 switch (link_down->error_code) {
567 case FSF_PSQ_LINK_NO_LIGHT: 566 case FSF_PSQ_LINK_NO_LIGHT:
@@ -643,16 +642,8 @@ zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
643 link_down->explanation_code, 642 link_down->explanation_code,
644 link_down->vendor_specific_code); 643 link_down->vendor_specific_code);
645 644
646 switch (link_down->error_code) { 645 out:
647 case FSF_PSQ_LINK_NO_LIGHT: 646 zfcp_erp_adapter_failed(adapter);
648 case FSF_PSQ_LINK_WRAP_PLUG:
649 case FSF_PSQ_LINK_NO_FCP:
650 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
651 zfcp_erp_adapter_reopen(adapter, 0);
652 break;
653 default:
654 zfcp_erp_adapter_failed(adapter);
655 }
656} 647}
657 648
658/* 649/*
@@ -2304,6 +2295,35 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2304 return retval; 2295 return retval;
2305} 2296}
2306 2297
2298/**
2299 * zfcp_fsf_exchange_port_evaluate
2300 * @fsf_req: fsf_req which belongs to xchg port data request
2301 * @xchg_ok: specifies if xchg port data was incomplete or complete (0/1)
2302 */
2303static void
2304zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2305{
2306 struct zfcp_adapter *adapter;
2307 struct fsf_qtcb *qtcb;
2308 struct fsf_qtcb_bottom_port *bottom, *data;
2309 struct Scsi_Host *shost;
2310
2311 adapter = fsf_req->adapter;
2312 qtcb = fsf_req->qtcb;
2313 bottom = &qtcb->bottom.port;
2314 shost = adapter->scsi_host;
2315
2316 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2317 if (data)
2318 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2319
2320 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2321 fc_host_permanent_port_name(shost) = bottom->wwpn;
2322 else
2323 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2324 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2325 fc_host_supported_speeds(shost) = bottom->supported_speed;
2326}
2307 2327
2308/** 2328/**
2309 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request 2329 * zfcp_fsf_exchange_port_data_handler - handler for exchange_port_data request
@@ -2312,38 +2332,26 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2312static void 2332static void
2313zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2333zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2314{ 2334{
2315 struct zfcp_adapter *adapter = fsf_req->adapter; 2335 struct zfcp_adapter *adapter;
2316 struct Scsi_Host *shost = adapter->scsi_host; 2336 struct fsf_qtcb *qtcb;
2317 struct fsf_qtcb *qtcb = fsf_req->qtcb; 2337
2318 struct fsf_qtcb_bottom_port *bottom, *data; 2338 adapter = fsf_req->adapter;
2339 qtcb = fsf_req->qtcb;
2319 2340
2320 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2341 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2321 return; 2342 return;
2322 2343
2323 switch (qtcb->header.fsf_status) { 2344 switch (qtcb->header.fsf_status) {
2324 case FSF_GOOD: 2345 case FSF_GOOD:
2346 zfcp_fsf_exchange_port_evaluate(fsf_req, 1);
2325 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2347 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2326
2327 bottom = &qtcb->bottom.port;
2328 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2329 if (data)
2330 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2331 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
2332 fc_host_permanent_port_name(shost) = bottom->wwpn;
2333 else
2334 fc_host_permanent_port_name(shost) =
2335 fc_host_port_name(shost);
2336 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2337 fc_host_supported_speeds(shost) = bottom->supported_speed;
2338 break; 2348 break;
2339
2340 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 2349 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2350 zfcp_fsf_exchange_port_evaluate(fsf_req, 0);
2341 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2351 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2342
2343 zfcp_fsf_link_down_info_eval(adapter, 2352 zfcp_fsf_link_down_info_eval(adapter,
2344 &qtcb->header.fsf_status_qual.link_down_info); 2353 &qtcb->header.fsf_status_qual.link_down_info);
2345 break; 2354 break;
2346
2347 default: 2355 default:
2348 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng"); 2356 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2349 debug_event(adapter->erp_dbf, 0, 2357 debug_event(adapter->erp_dbf, 0,
@@ -4203,11 +4211,11 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4203 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4211 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4204 4212
4205 if (scpnt->result != 0) 4213 if (scpnt->result != 0)
4206 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt); 4214 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt, fsf_req);
4207 else if (scpnt->retries > 0) 4215 else if (scpnt->retries > 0)
4208 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt); 4216 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt, fsf_req);
4209 else 4217 else
4210 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt); 4218 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt, fsf_req);
4211 4219
4212 /* cleanup pointer (need this especially for abort) */ 4220 /* cleanup pointer (need this especially for abort) */
4213 scpnt->host_scribble = NULL; 4221 scpnt->host_scribble = NULL;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e0803757c0fa..9f6b4d7a46f3 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -242,7 +242,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 242 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
243 zfcp_scsi_dbf_event_result("fail", 4, 243 zfcp_scsi_dbf_event_result("fail", 4,
244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0], 244 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
245 scpnt); 245 scpnt, NULL);
246 /* return directly */ 246 /* return directly */
247 scpnt->scsi_done(scpnt); 247 scpnt->scsi_done(scpnt);
248} 248}
@@ -446,7 +446,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble; 446 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
447 if (!old_fsf_req) { 447 if (!old_fsf_req) {
448 write_unlock_irqrestore(&adapter->abort_lock, flags); 448 write_unlock_irqrestore(&adapter->abort_lock, flags);
449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req); 449 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL);
450 retval = SUCCESS; 450 retval = SUCCESS;
451 goto out; 451 goto out;
452 } 452 }
@@ -460,6 +460,8 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
460 adapter, unit, 0); 460 adapter, unit, 0);
461 if (!new_fsf_req) { 461 if (!new_fsf_req) {
462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 462 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
463 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
464 old_fsf_req);
463 retval = FAILED; 465 retval = FAILED;
464 goto out; 466 goto out;
465 } 467 }
@@ -470,13 +472,16 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
470 472
471 /* status should be valid since signals were not permitted */ 473 /* status should be valid since signals were not permitted */
472 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 474 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
473 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req); 475 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
476 NULL);
474 retval = SUCCESS; 477 retval = SUCCESS;
475 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 478 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
476 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req); 479 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req,
480 NULL);
477 retval = SUCCESS; 481 retval = SUCCESS;
478 } else { 482 } else {
479 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req); 483 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req,
484 NULL);
480 retval = FAILED; 485 retval = FAILED;
481 } 486 }
482 zfcp_fsf_req_free(new_fsf_req); 487 zfcp_fsf_req_free(new_fsf_req);
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index dfc07370f412..b29ac25e07f3 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -55,8 +55,6 @@ ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 55ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 56ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 57ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
58ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
59ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
60ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 58ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
61ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 59ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
62ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 60ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
@@ -241,8 +239,6 @@ static struct attribute *zfcp_adapter_attrs[] = {
241 &dev_attr_peer_wwnn.attr, 239 &dev_attr_peer_wwnn.attr,
242 &dev_attr_peer_wwpn.attr, 240 &dev_attr_peer_wwpn.attr,
243 &dev_attr_peer_d_id.attr, 241 &dev_attr_peer_d_id.attr,
244 &dev_attr_physical_wwpn.attr,
245 &dev_attr_physical_s_id.attr,
246 &dev_attr_card_version.attr, 242 &dev_attr_card_version.attr,
247 &dev_attr_lic_version.attr, 243 &dev_attr_lic_version.attr,
248 &dev_attr_status.attr, 244 &dev_attr_status.attr,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 31c497542272..d9152d02088c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -61,6 +61,7 @@
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers. 63 2.26.02.004 - Add support for 9550SX controllers.
64 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64*/ 65*/
65 66
66#include <linux/module.h> 67#include <linux/module.h>
@@ -84,7 +85,7 @@
84#include "3w-9xxx.h" 85#include "3w-9xxx.h"
85 86
86/* Globals */ 87/* Globals */
87#define TW_DRIVER_VERSION "2.26.02.004" 88#define TW_DRIVER_VERSION "2.26.02.005"
88static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 89static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
89static unsigned int twa_device_extension_count; 90static unsigned int twa_device_extension_count;
90static int twa_major = -1; 91static int twa_major = -1;
@@ -1408,7 +1409,7 @@ static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int requ
1408 dma_addr_t mapping; 1409 dma_addr_t mapping;
1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1410 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410 struct pci_dev *pdev = tw_dev->tw_pci_dev; 1411 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1411 int retval = 0; 1412 dma_addr_t retval = 0;
1412 1413
1413 if (cmd->request_bufflen == 0) { 1414 if (cmd->request_bufflen == 0) {
1414 retval = 0; 1415 retval = 0;
@@ -1798,7 +1799,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1798 int i, sg_count; 1799 int i, sg_count;
1799 struct scsi_cmnd *srb = NULL; 1800 struct scsi_cmnd *srb = NULL;
1800 struct scatterlist *sglist = NULL; 1801 struct scatterlist *sglist = NULL;
1801 u32 buffaddr = 0x0; 1802 dma_addr_t buffaddr = 0x0;
1802 int retval = 1; 1803 int retval = 1;
1803 1804
1804 if (tw_dev->srb[request_id]) { 1805 if (tw_dev->srb[request_id]) {
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7139659dd952..a16f8ded8f1d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -173,10 +173,10 @@ int aac_get_config_status(struct aac_dev *dev)
173 int status = 0; 173 int status = 0;
174 struct fib * fibptr; 174 struct fib * fibptr;
175 175
176 if (!(fibptr = fib_alloc(dev))) 176 if (!(fibptr = aac_fib_alloc(dev)))
177 return -ENOMEM; 177 return -ENOMEM;
178 178
179 fib_init(fibptr); 179 aac_fib_init(fibptr);
180 { 180 {
181 struct aac_get_config_status *dinfo; 181 struct aac_get_config_status *dinfo;
182 dinfo = (struct aac_get_config_status *) fib_data(fibptr); 182 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
@@ -186,7 +186,7 @@ int aac_get_config_status(struct aac_dev *dev)
186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 186 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
187 } 187 }
188 188
189 status = fib_send(ContainerCommand, 189 status = aac_fib_send(ContainerCommand,
190 fibptr, 190 fibptr,
191 sizeof (struct aac_get_config_status), 191 sizeof (struct aac_get_config_status),
192 FsaNormal, 192 FsaNormal,
@@ -209,30 +209,30 @@ int aac_get_config_status(struct aac_dev *dev)
209 status = -EINVAL; 209 status = -EINVAL;
210 } 210 }
211 } 211 }
212 fib_complete(fibptr); 212 aac_fib_complete(fibptr);
213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 213 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
214 if (status >= 0) { 214 if (status >= 0) {
215 if (commit == 1) { 215 if (commit == 1) {
216 struct aac_commit_config * dinfo; 216 struct aac_commit_config * dinfo;
217 fib_init(fibptr); 217 aac_fib_init(fibptr);
218 dinfo = (struct aac_commit_config *) fib_data(fibptr); 218 dinfo = (struct aac_commit_config *) fib_data(fibptr);
219 219
220 dinfo->command = cpu_to_le32(VM_ContainerConfig); 220 dinfo->command = cpu_to_le32(VM_ContainerConfig);
221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 221 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
222 222
223 status = fib_send(ContainerCommand, 223 status = aac_fib_send(ContainerCommand,
224 fibptr, 224 fibptr,
225 sizeof (struct aac_commit_config), 225 sizeof (struct aac_commit_config),
226 FsaNormal, 226 FsaNormal,
227 1, 1, 227 1, 1,
228 NULL, NULL); 228 NULL, NULL);
229 fib_complete(fibptr); 229 aac_fib_complete(fibptr);
230 } else if (commit == 0) { 230 } else if (commit == 0) {
231 printk(KERN_WARNING 231 printk(KERN_WARNING
232 "aac_get_config_status: Foreign device configurations are being ignored\n"); 232 "aac_get_config_status: Foreign device configurations are being ignored\n");
233 } 233 }
234 } 234 }
235 fib_free(fibptr); 235 aac_fib_free(fibptr);
236 return status; 236 return status;
237} 237}
238 238
@@ -255,15 +255,15 @@ int aac_get_containers(struct aac_dev *dev)
255 255
256 instance = dev->scsi_host_ptr->unique_id; 256 instance = dev->scsi_host_ptr->unique_id;
257 257
258 if (!(fibptr = fib_alloc(dev))) 258 if (!(fibptr = aac_fib_alloc(dev)))
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 fib_init(fibptr); 261 aac_fib_init(fibptr);
262 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 262 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
263 dinfo->command = cpu_to_le32(VM_ContainerConfig); 263 dinfo->command = cpu_to_le32(VM_ContainerConfig);
264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 264 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
265 265
266 status = fib_send(ContainerCommand, 266 status = aac_fib_send(ContainerCommand,
267 fibptr, 267 fibptr,
268 sizeof (struct aac_get_container_count), 268 sizeof (struct aac_get_container_count),
269 FsaNormal, 269 FsaNormal,
@@ -272,7 +272,7 @@ int aac_get_containers(struct aac_dev *dev)
272 if (status >= 0) { 272 if (status >= 0) {
273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 273 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 274 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
275 fib_complete(fibptr); 275 aac_fib_complete(fibptr);
276 } 276 }
277 277
278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 278 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
@@ -280,7 +280,7 @@ int aac_get_containers(struct aac_dev *dev)
280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( 280 fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); 281 sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
282 if (!fsa_dev_ptr) { 282 if (!fsa_dev_ptr) {
283 fib_free(fibptr); 283 aac_fib_free(fibptr);
284 return -ENOMEM; 284 return -ENOMEM;
285 } 285 }
286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); 286 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
@@ -294,14 +294,14 @@ int aac_get_containers(struct aac_dev *dev)
294 294
295 fsa_dev_ptr[index].devname[0] = '\0'; 295 fsa_dev_ptr[index].devname[0] = '\0';
296 296
297 fib_init(fibptr); 297 aac_fib_init(fibptr);
298 dinfo = (struct aac_query_mount *) fib_data(fibptr); 298 dinfo = (struct aac_query_mount *) fib_data(fibptr);
299 299
300 dinfo->command = cpu_to_le32(VM_NameServe); 300 dinfo->command = cpu_to_le32(VM_NameServe);
301 dinfo->count = cpu_to_le32(index); 301 dinfo->count = cpu_to_le32(index);
302 dinfo->type = cpu_to_le32(FT_FILESYS); 302 dinfo->type = cpu_to_le32(FT_FILESYS);
303 303
304 status = fib_send(ContainerCommand, 304 status = aac_fib_send(ContainerCommand,
305 fibptr, 305 fibptr,
306 sizeof (struct aac_query_mount), 306 sizeof (struct aac_query_mount),
307 FsaNormal, 307 FsaNormal,
@@ -319,7 +319,7 @@ int aac_get_containers(struct aac_dev *dev)
319 dinfo->count = cpu_to_le32(index); 319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS); 320 dinfo->type = cpu_to_le32(FT_FILESYS);
321 321
322 if (fib_send(ContainerCommand, 322 if (aac_fib_send(ContainerCommand,
323 fibptr, 323 fibptr,
324 sizeof(struct aac_query_mount), 324 sizeof(struct aac_query_mount),
325 FsaNormal, 325 FsaNormal,
@@ -347,7 +347,7 @@ int aac_get_containers(struct aac_dev *dev)
347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
348 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
349 } 349 }
350 fib_complete(fibptr); 350 aac_fib_complete(fibptr);
351 /* 351 /*
352 * If there are no more containers, then stop asking. 352 * If there are no more containers, then stop asking.
353 */ 353 */
@@ -355,7 +355,7 @@ int aac_get_containers(struct aac_dev *dev)
355 break; 355 break;
356 } 356 }
357 } 357 }
358 fib_free(fibptr); 358 aac_fib_free(fibptr);
359 return status; 359 return status;
360} 360}
361 361
@@ -413,8 +413,8 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
413 413
414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
415 415
416 fib_complete(fibptr); 416 aac_fib_complete(fibptr);
417 fib_free(fibptr); 417 aac_fib_free(fibptr);
418 scsicmd->scsi_done(scsicmd); 418 scsicmd->scsi_done(scsicmd);
419} 419}
420 420
@@ -430,10 +430,10 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
430 430
431 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 431 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
432 432
433 if (!(cmd_fibcontext = fib_alloc(dev))) 433 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
434 return -ENOMEM; 434 return -ENOMEM;
435 435
436 fib_init(cmd_fibcontext); 436 aac_fib_init(cmd_fibcontext);
437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 437 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
438 438
439 dinfo->command = cpu_to_le32(VM_ContainerConfig); 439 dinfo->command = cpu_to_le32(VM_ContainerConfig);
@@ -441,7 +441,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
441 dinfo->cid = cpu_to_le32(cid); 441 dinfo->cid = cpu_to_le32(cid);
442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 442 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
443 443
444 status = fib_send(ContainerCommand, 444 status = aac_fib_send(ContainerCommand,
445 cmd_fibcontext, 445 cmd_fibcontext,
446 sizeof (struct aac_get_name), 446 sizeof (struct aac_get_name),
447 FsaNormal, 447 FsaNormal,
@@ -455,14 +455,14 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
455 if (status == -EINPROGRESS) 455 if (status == -EINPROGRESS)
456 return 0; 456 return 0;
457 457
458 printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); 458 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
459 fib_complete(cmd_fibcontext); 459 aac_fib_complete(cmd_fibcontext);
460 fib_free(cmd_fibcontext); 460 aac_fib_free(cmd_fibcontext);
461 return -1; 461 return -1;
462} 462}
463 463
464/** 464/**
465 * probe_container - query a logical volume 465 * aac_probe_container - query a logical volume
466 * @dev: device to query 466 * @dev: device to query
467 * @cid: container identifier 467 * @cid: container identifier
468 * 468 *
@@ -470,7 +470,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
470 * is updated in the struct fsa_dev_info structure rather than returned. 470 * is updated in the struct fsa_dev_info structure rather than returned.
471 */ 471 */
472 472
473int probe_container(struct aac_dev *dev, int cid) 473int aac_probe_container(struct aac_dev *dev, int cid)
474{ 474{
475 struct fsa_dev_info *fsa_dev_ptr; 475 struct fsa_dev_info *fsa_dev_ptr;
476 int status; 476 int status;
@@ -482,10 +482,10 @@ int probe_container(struct aac_dev *dev, int cid)
482 fsa_dev_ptr = dev->fsa_dev; 482 fsa_dev_ptr = dev->fsa_dev;
483 instance = dev->scsi_host_ptr->unique_id; 483 instance = dev->scsi_host_ptr->unique_id;
484 484
485 if (!(fibptr = fib_alloc(dev))) 485 if (!(fibptr = aac_fib_alloc(dev)))
486 return -ENOMEM; 486 return -ENOMEM;
487 487
488 fib_init(fibptr); 488 aac_fib_init(fibptr);
489 489
490 dinfo = (struct aac_query_mount *)fib_data(fibptr); 490 dinfo = (struct aac_query_mount *)fib_data(fibptr);
491 491
@@ -493,14 +493,14 @@ int probe_container(struct aac_dev *dev, int cid)
493 dinfo->count = cpu_to_le32(cid); 493 dinfo->count = cpu_to_le32(cid);
494 dinfo->type = cpu_to_le32(FT_FILESYS); 494 dinfo->type = cpu_to_le32(FT_FILESYS);
495 495
496 status = fib_send(ContainerCommand, 496 status = aac_fib_send(ContainerCommand,
497 fibptr, 497 fibptr,
498 sizeof(struct aac_query_mount), 498 sizeof(struct aac_query_mount),
499 FsaNormal, 499 FsaNormal,
500 1, 1, 500 1, 1,
501 NULL, NULL); 501 NULL, NULL);
502 if (status < 0) { 502 if (status < 0) {
503 printk(KERN_WARNING "aacraid: probe_container query failed.\n"); 503 printk(KERN_WARNING "aacraid: aac_probe_container query failed.\n");
504 goto error; 504 goto error;
505 } 505 }
506 506
@@ -512,7 +512,7 @@ int probe_container(struct aac_dev *dev, int cid)
512 dinfo->count = cpu_to_le32(cid); 512 dinfo->count = cpu_to_le32(cid);
513 dinfo->type = cpu_to_le32(FT_FILESYS); 513 dinfo->type = cpu_to_le32(FT_FILESYS);
514 514
515 if (fib_send(ContainerCommand, 515 if (aac_fib_send(ContainerCommand,
516 fibptr, 516 fibptr,
517 sizeof(struct aac_query_mount), 517 sizeof(struct aac_query_mount),
518 FsaNormal, 518 FsaNormal,
@@ -535,8 +535,8 @@ int probe_container(struct aac_dev *dev, int cid)
535 } 535 }
536 536
537error: 537error:
538 fib_complete(fibptr); 538 aac_fib_complete(fibptr);
539 fib_free(fibptr); 539 aac_fib_free(fibptr);
540 540
541 return status; 541 return status;
542} 542}
@@ -700,14 +700,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
700 struct aac_bus_info *command; 700 struct aac_bus_info *command;
701 struct aac_bus_info_response *bus_info; 701 struct aac_bus_info_response *bus_info;
702 702
703 if (!(fibptr = fib_alloc(dev))) 703 if (!(fibptr = aac_fib_alloc(dev)))
704 return -ENOMEM; 704 return -ENOMEM;
705 705
706 fib_init(fibptr); 706 aac_fib_init(fibptr);
707 info = (struct aac_adapter_info *) fib_data(fibptr); 707 info = (struct aac_adapter_info *) fib_data(fibptr);
708 memset(info,0,sizeof(*info)); 708 memset(info,0,sizeof(*info));
709 709
710 rcode = fib_send(RequestAdapterInfo, 710 rcode = aac_fib_send(RequestAdapterInfo,
711 fibptr, 711 fibptr,
712 sizeof(*info), 712 sizeof(*info),
713 FsaNormal, 713 FsaNormal,
@@ -716,8 +716,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
716 NULL); 716 NULL);
717 717
718 if (rcode < 0) { 718 if (rcode < 0) {
719 fib_complete(fibptr); 719 aac_fib_complete(fibptr);
720 fib_free(fibptr); 720 aac_fib_free(fibptr);
721 return rcode; 721 return rcode;
722 } 722 }
723 memcpy(&dev->adapter_info, info, sizeof(*info)); 723 memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -725,13 +725,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 725 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
726 struct aac_supplement_adapter_info * info; 726 struct aac_supplement_adapter_info * info;
727 727
728 fib_init(fibptr); 728 aac_fib_init(fibptr);
729 729
730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr); 730 info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
731 731
732 memset(info,0,sizeof(*info)); 732 memset(info,0,sizeof(*info));
733 733
734 rcode = fib_send(RequestSupplementAdapterInfo, 734 rcode = aac_fib_send(RequestSupplementAdapterInfo,
735 fibptr, 735 fibptr,
736 sizeof(*info), 736 sizeof(*info),
737 FsaNormal, 737 FsaNormal,
@@ -748,7 +748,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
748 * GetBusInfo 748 * GetBusInfo
749 */ 749 */
750 750
751 fib_init(fibptr); 751 aac_fib_init(fibptr);
752 752
753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 753 bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
754 754
@@ -761,7 +761,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
761 command->MethodId = cpu_to_le32(1); 761 command->MethodId = cpu_to_le32(1);
762 command->CtlCmd = cpu_to_le32(GetBusInfo); 762 command->CtlCmd = cpu_to_le32(GetBusInfo);
763 763
764 rcode = fib_send(ContainerCommand, 764 rcode = aac_fib_send(ContainerCommand,
765 fibptr, 765 fibptr,
766 sizeof (*bus_info), 766 sizeof (*bus_info),
767 FsaNormal, 767 FsaNormal,
@@ -891,8 +891,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
891 } 891 }
892 } 892 }
893 893
894 fib_complete(fibptr); 894 aac_fib_complete(fibptr);
895 fib_free(fibptr); 895 aac_fib_free(fibptr);
896 896
897 return rcode; 897 return rcode;
898} 898}
@@ -976,8 +976,8 @@ static void io_callback(void *context, struct fib * fibptr)
976 ? sizeof(scsicmd->sense_buffer) 976 ? sizeof(scsicmd->sense_buffer)
977 : sizeof(dev->fsa_dev[cid].sense_data)); 977 : sizeof(dev->fsa_dev[cid].sense_data));
978 } 978 }
979 fib_complete(fibptr); 979 aac_fib_complete(fibptr);
980 fib_free(fibptr); 980 aac_fib_free(fibptr);
981 981
982 scsicmd->scsi_done(scsicmd); 982 scsicmd->scsi_done(scsicmd);
983} 983}
@@ -1062,11 +1062,11 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1062 /* 1062 /*
1063 * Alocate and initialize a Fib 1063 * Alocate and initialize a Fib
1064 */ 1064 */
1065 if (!(cmd_fibcontext = fib_alloc(dev))) { 1065 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1066 return -1; 1066 return -1;
1067 } 1067 }
1068 1068
1069 fib_init(cmd_fibcontext); 1069 aac_fib_init(cmd_fibcontext);
1070 1070
1071 if (dev->raw_io_interface) { 1071 if (dev->raw_io_interface) {
1072 struct aac_raw_io *readcmd; 1072 struct aac_raw_io *readcmd;
@@ -1086,7 +1086,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1086 /* 1086 /*
1087 * Now send the Fib to the adapter 1087 * Now send the Fib to the adapter
1088 */ 1088 */
1089 status = fib_send(ContainerRawIo, 1089 status = aac_fib_send(ContainerRawIo,
1090 cmd_fibcontext, 1090 cmd_fibcontext,
1091 fibsize, 1091 fibsize,
1092 FsaNormal, 1092 FsaNormal,
@@ -1112,7 +1112,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1112 /* 1112 /*
1113 * Now send the Fib to the adapter 1113 * Now send the Fib to the adapter
1114 */ 1114 */
1115 status = fib_send(ContainerCommand64, 1115 status = aac_fib_send(ContainerCommand64,
1116 cmd_fibcontext, 1116 cmd_fibcontext,
1117 fibsize, 1117 fibsize,
1118 FsaNormal, 1118 FsaNormal,
@@ -1136,7 +1136,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1136 /* 1136 /*
1137 * Now send the Fib to the adapter 1137 * Now send the Fib to the adapter
1138 */ 1138 */
1139 status = fib_send(ContainerCommand, 1139 status = aac_fib_send(ContainerCommand,
1140 cmd_fibcontext, 1140 cmd_fibcontext,
1141 fibsize, 1141 fibsize,
1142 FsaNormal, 1142 FsaNormal,
@@ -1153,14 +1153,14 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1153 if (status == -EINPROGRESS) 1153 if (status == -EINPROGRESS)
1154 return 0; 1154 return 0;
1155 1155
1156 printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); 1156 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
1157 /* 1157 /*
1158 * For some reason, the Fib didn't queue, return QUEUE_FULL 1158 * For some reason, the Fib didn't queue, return QUEUE_FULL
1159 */ 1159 */
1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1160 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1161 scsicmd->scsi_done(scsicmd); 1161 scsicmd->scsi_done(scsicmd);
1162 fib_complete(cmd_fibcontext); 1162 aac_fib_complete(cmd_fibcontext);
1163 fib_free(cmd_fibcontext); 1163 aac_fib_free(cmd_fibcontext);
1164 return 0; 1164 return 0;
1165} 1165}
1166 1166
@@ -1228,12 +1228,12 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1228 /* 1228 /*
1229 * Allocate and initialize a Fib then setup a BlockWrite command 1229 * Allocate and initialize a Fib then setup a BlockWrite command
1230 */ 1230 */
1231 if (!(cmd_fibcontext = fib_alloc(dev))) { 1231 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
1232 scsicmd->result = DID_ERROR << 16; 1232 scsicmd->result = DID_ERROR << 16;
1233 scsicmd->scsi_done(scsicmd); 1233 scsicmd->scsi_done(scsicmd);
1234 return 0; 1234 return 0;
1235 } 1235 }
1236 fib_init(cmd_fibcontext); 1236 aac_fib_init(cmd_fibcontext);
1237 1237
1238 if (dev->raw_io_interface) { 1238 if (dev->raw_io_interface) {
1239 struct aac_raw_io *writecmd; 1239 struct aac_raw_io *writecmd;
@@ -1253,7 +1253,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1253 /* 1253 /*
1254 * Now send the Fib to the adapter 1254 * Now send the Fib to the adapter
1255 */ 1255 */
1256 status = fib_send(ContainerRawIo, 1256 status = aac_fib_send(ContainerRawIo,
1257 cmd_fibcontext, 1257 cmd_fibcontext,
1258 fibsize, 1258 fibsize,
1259 FsaNormal, 1259 FsaNormal,
@@ -1279,7 +1279,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1279 /* 1279 /*
1280 * Now send the Fib to the adapter 1280 * Now send the Fib to the adapter
1281 */ 1281 */
1282 status = fib_send(ContainerCommand64, 1282 status = aac_fib_send(ContainerCommand64,
1283 cmd_fibcontext, 1283 cmd_fibcontext,
1284 fibsize, 1284 fibsize,
1285 FsaNormal, 1285 FsaNormal,
@@ -1305,7 +1305,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1305 /* 1305 /*
1306 * Now send the Fib to the adapter 1306 * Now send the Fib to the adapter
1307 */ 1307 */
1308 status = fib_send(ContainerCommand, 1308 status = aac_fib_send(ContainerCommand,
1309 cmd_fibcontext, 1309 cmd_fibcontext,
1310 fibsize, 1310 fibsize,
1311 FsaNormal, 1311 FsaNormal,
@@ -1322,15 +1322,15 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1322 return 0; 1322 return 0;
1323 } 1323 }
1324 1324
1325 printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); 1325 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
1326 /* 1326 /*
1327 * For some reason, the Fib didn't queue, return QUEUE_FULL 1327 * For some reason, the Fib didn't queue, return QUEUE_FULL
1328 */ 1328 */
1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 1329 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1330 scsicmd->scsi_done(scsicmd); 1330 scsicmd->scsi_done(scsicmd);
1331 1331
1332 fib_complete(cmd_fibcontext); 1332 aac_fib_complete(cmd_fibcontext);
1333 fib_free(cmd_fibcontext); 1333 aac_fib_free(cmd_fibcontext);
1334 return 0; 1334 return 0;
1335} 1335}
1336 1336
@@ -1369,8 +1369,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1369 sizeof(cmd->sense_buffer))); 1369 sizeof(cmd->sense_buffer)));
1370 } 1370 }
1371 1371
1372 fib_complete(fibptr); 1372 aac_fib_complete(fibptr);
1373 fib_free(fibptr); 1373 aac_fib_free(fibptr);
1374 cmd->scsi_done(cmd); 1374 cmd->scsi_done(cmd);
1375} 1375}
1376 1376
@@ -1407,10 +1407,10 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1407 * Allocate and initialize a Fib 1407 * Allocate and initialize a Fib
1408 */ 1408 */
1409 if (!(cmd_fibcontext = 1409 if (!(cmd_fibcontext =
1410 fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 1410 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1411 return SCSI_MLQUEUE_HOST_BUSY; 1411 return SCSI_MLQUEUE_HOST_BUSY;
1412 1412
1413 fib_init(cmd_fibcontext); 1413 aac_fib_init(cmd_fibcontext);
1414 1414
1415 synchronizecmd = fib_data(cmd_fibcontext); 1415 synchronizecmd = fib_data(cmd_fibcontext);
1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 1416 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
@@ -1422,7 +1422,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1422 /* 1422 /*
1423 * Now send the Fib to the adapter 1423 * Now send the Fib to the adapter
1424 */ 1424 */
1425 status = fib_send(ContainerCommand, 1425 status = aac_fib_send(ContainerCommand,
1426 cmd_fibcontext, 1426 cmd_fibcontext,
1427 sizeof(struct aac_synchronize), 1427 sizeof(struct aac_synchronize),
1428 FsaNormal, 1428 FsaNormal,
@@ -1437,9 +1437,9 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1437 return 0; 1437 return 0;
1438 1438
1439 printk(KERN_WARNING 1439 printk(KERN_WARNING
1440 "aac_synchronize: fib_send failed with status: %d.\n", status); 1440 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
1441 fib_complete(cmd_fibcontext); 1441 aac_fib_complete(cmd_fibcontext);
1442 fib_free(cmd_fibcontext); 1442 aac_fib_free(cmd_fibcontext);
1443 return SCSI_MLQUEUE_HOST_BUSY; 1443 return SCSI_MLQUEUE_HOST_BUSY;
1444} 1444}
1445 1445
@@ -1465,7 +1465,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1465 * itself. 1465 * itself.
1466 */ 1466 */
1467 if (scmd_id(scsicmd) != host->this_id) { 1467 if (scmd_id(scsicmd) != host->this_id) {
1468 if ((scsicmd->device->channel == 0) ){ 1468 if ((scsicmd->device->channel == CONTAINER_CHANNEL)) {
1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 1469 if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
1470 scsicmd->result = DID_NO_CONNECT << 16; 1470 scsicmd->result = DID_NO_CONNECT << 16;
1471 scsicmd->scsi_done(scsicmd); 1471 scsicmd->scsi_done(scsicmd);
@@ -1488,7 +1488,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1488 case READ_CAPACITY: 1488 case READ_CAPACITY:
1489 case TEST_UNIT_READY: 1489 case TEST_UNIT_READY:
1490 spin_unlock_irq(host->host_lock); 1490 spin_unlock_irq(host->host_lock);
1491 probe_container(dev, cid); 1491 aac_probe_container(dev, cid);
1492 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1492 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1493 fsa_dev_ptr[cid].valid = 0; 1493 fsa_dev_ptr[cid].valid = 0;
1494 spin_lock_irq(host->host_lock); 1494 spin_lock_irq(host->host_lock);
@@ -1935,33 +1935,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1935 case SRB_STATUS_ERROR_RECOVERY: 1935 case SRB_STATUS_ERROR_RECOVERY:
1936 case SRB_STATUS_PENDING: 1936 case SRB_STATUS_PENDING:
1937 case SRB_STATUS_SUCCESS: 1937 case SRB_STATUS_SUCCESS:
1938 if(scsicmd->cmnd[0] == INQUIRY ){ 1938 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1939 u8 b;
1940 u8 b1;
1941 /* We can't expose disk devices because we can't tell whether they
1942 * are the raw container drives or stand alone drives. If they have
1943 * the removable bit set then we should expose them though.
1944 */
1945 b = (*(u8*)scsicmd->buffer)&0x1f;
1946 b1 = ((u8*)scsicmd->buffer)[1];
1947 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1948 || (b==TYPE_DISK && (b1&0x80)) ){
1949 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1950 /*
1951 * We will allow disk devices if in RAID/SCSI mode and
1952 * the channel is 2
1953 */
1954 } else if ((dev->raid_scsi_mode) &&
1955 (scmd_channel(scsicmd) == 2)) {
1956 scsicmd->result = DID_OK << 16 |
1957 COMMAND_COMPLETE << 8;
1958 } else {
1959 scsicmd->result = DID_NO_CONNECT << 16 |
1960 COMMAND_COMPLETE << 8;
1961 }
1962 } else {
1963 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1964 }
1965 break; 1939 break;
1966 case SRB_STATUS_DATA_OVERRUN: 1940 case SRB_STATUS_DATA_OVERRUN:
1967 switch(scsicmd->cmnd[0]){ 1941 switch(scsicmd->cmnd[0]){
@@ -1981,28 +1955,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1981 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 1955 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1982 break; 1956 break;
1983 case INQUIRY: { 1957 case INQUIRY: {
1984 u8 b; 1958 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1985 u8 b1;
1986 /* We can't expose disk devices because we can't tell whether they
1987 * are the raw container drives or stand alone drives
1988 */
1989 b = (*(u8*)scsicmd->buffer)&0x0f;
1990 b1 = ((u8*)scsicmd->buffer)[1];
1991 if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1992 || (b==TYPE_DISK && (b1&0x80)) ){
1993 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1994 /*
1995 * We will allow disk devices if in RAID/SCSI mode and
1996 * the channel is 2
1997 */
1998 } else if ((dev->raid_scsi_mode) &&
1999 (scmd_channel(scsicmd) == 2)) {
2000 scsicmd->result = DID_OK << 16 |
2001 COMMAND_COMPLETE << 8;
2002 } else {
2003 scsicmd->result = DID_NO_CONNECT << 16 |
2004 COMMAND_COMPLETE << 8;
2005 }
2006 break; 1959 break;
2007 } 1960 }
2008 default: 1961 default:
@@ -2089,8 +2042,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2089 */ 2042 */
2090 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 2043 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
2091 2044
2092 fib_complete(fibptr); 2045 aac_fib_complete(fibptr);
2093 fib_free(fibptr); 2046 aac_fib_free(fibptr);
2094 scsicmd->scsi_done(scsicmd); 2047 scsicmd->scsi_done(scsicmd);
2095} 2048}
2096 2049
@@ -2142,10 +2095,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2142 /* 2095 /*
2143 * Allocate and initialize a Fib then setup a BlockWrite command 2096 * Allocate and initialize a Fib then setup a BlockWrite command
2144 */ 2097 */
2145 if (!(cmd_fibcontext = fib_alloc(dev))) { 2098 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
2146 return -1; 2099 return -1;
2147 } 2100 }
2148 fib_init(cmd_fibcontext); 2101 aac_fib_init(cmd_fibcontext);
2149 2102
2150 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); 2103 srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
2151 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 2104 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
@@ -2179,7 +2132,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2179 /* 2132 /*
2180 * Now send the Fib to the adapter 2133 * Now send the Fib to the adapter
2181 */ 2134 */
2182 status = fib_send(ScsiPortCommand64, cmd_fibcontext, 2135 status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
2183 fibsize, FsaNormal, 0, 1, 2136 fibsize, FsaNormal, 0, 1,
2184 (fib_callback) aac_srb_callback, 2137 (fib_callback) aac_srb_callback,
2185 (void *) scsicmd); 2138 (void *) scsicmd);
@@ -2201,7 +2154,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2201 /* 2154 /*
2202 * Now send the Fib to the adapter 2155 * Now send the Fib to the adapter
2203 */ 2156 */
2204 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, 2157 status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
2205 (fib_callback) aac_srb_callback, (void *) scsicmd); 2158 (fib_callback) aac_srb_callback, (void *) scsicmd);
2206 } 2159 }
2207 /* 2160 /*
@@ -2211,9 +2164,9 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
2211 return 0; 2164 return 0;
2212 } 2165 }
2213 2166
2214 printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); 2167 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
2215 fib_complete(cmd_fibcontext); 2168 aac_fib_complete(cmd_fibcontext);
2216 fib_free(cmd_fibcontext); 2169 aac_fib_free(cmd_fibcontext);
2217 2170
2218 return -1; 2171 return -1;
2219} 2172}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 66dbb6d2c506..2d430b7e8cf4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1774,16 +1774,16 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
1774struct scsi_cmnd; 1774struct scsi_cmnd;
1775 1775
1776const char *aac_driverinfo(struct Scsi_Host *); 1776const char *aac_driverinfo(struct Scsi_Host *);
1777struct fib *fib_alloc(struct aac_dev *dev); 1777struct fib *aac_fib_alloc(struct aac_dev *dev);
1778int fib_setup(struct aac_dev *dev); 1778int aac_fib_setup(struct aac_dev *dev);
1779void fib_map_free(struct aac_dev *dev); 1779void aac_fib_map_free(struct aac_dev *dev);
1780void fib_free(struct fib * context); 1780void aac_fib_free(struct fib * context);
1781void fib_init(struct fib * context); 1781void aac_fib_init(struct fib * context);
1782void aac_printf(struct aac_dev *dev, u32 val); 1782void aac_printf(struct aac_dev *dev, u32 val);
1783int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 1783int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 1784int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 1785void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1786int fib_complete(struct fib * context); 1786int aac_fib_complete(struct fib * context);
1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1787#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1788struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1788struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1789int aac_get_config_status(struct aac_dev *dev); 1789int aac_get_config_status(struct aac_dev *dev);
@@ -1799,11 +1799,11 @@ unsigned int aac_command_normal(struct aac_queue * q);
1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1799unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1800int aac_command_thread(struct aac_dev * dev); 1800int aac_command_thread(struct aac_dev * dev);
1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1801int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1802int fib_adapter_complete(struct fib * fibptr, unsigned short size); 1802int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
1803struct aac_driver_ident* aac_get_driver_ident(int devtype); 1803struct aac_driver_ident* aac_get_driver_ident(int devtype);
1804int aac_get_adapter_info(struct aac_dev* dev); 1804int aac_get_adapter_info(struct aac_dev* dev);
1805int aac_send_shutdown(struct aac_dev *dev); 1805int aac_send_shutdown(struct aac_dev *dev);
1806int probe_container(struct aac_dev *dev, int cid); 1806int aac_probe_container(struct aac_dev *dev, int cid);
1807extern int numacb; 1807extern int numacb;
1808extern int acbsize; 1808extern int acbsize;
1809extern char aac_driver_version[]; 1809extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 4fe79cd7c957..47fefca72695 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
63 unsigned size; 63 unsigned size;
64 int retval; 64 int retval;
65 65
66 fibptr = fib_alloc(dev); 66 fibptr = aac_fib_alloc(dev);
67 if(fibptr == NULL) { 67 if(fibptr == NULL) {
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
@@ -73,7 +73,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
73 * First copy in the header so that we can check the size field. 73 * First copy in the header so that we can check the size field.
74 */ 74 */
75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 75 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
76 fib_free(fibptr); 76 aac_fib_free(fibptr);
77 return -EFAULT; 77 return -EFAULT;
78 } 78 }
79 /* 79 /*
@@ -110,13 +110,13 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
110 */ 110 */
111 kfib->header.XferState = 0; 111 kfib->header.XferState = 0;
112 } else { 112 } else {
113 retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr, 113 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
114 le16_to_cpu(kfib->header.Size) , FsaNormal, 114 le16_to_cpu(kfib->header.Size) , FsaNormal,
115 1, 1, NULL, NULL); 115 1, 1, NULL, NULL);
116 if (retval) { 116 if (retval) {
117 goto cleanup; 117 goto cleanup;
118 } 118 }
119 if (fib_complete(fibptr) != 0) { 119 if (aac_fib_complete(fibptr) != 0) {
120 retval = -EINVAL; 120 retval = -EINVAL;
121 goto cleanup; 121 goto cleanup;
122 } 122 }
@@ -138,7 +138,7 @@ cleanup:
138 fibptr->hw_fib_pa = hw_fib_pa; 138 fibptr->hw_fib_pa = hw_fib_pa;
139 fibptr->hw_fib = hw_fib; 139 fibptr->hw_fib = hw_fib;
140 } 140 }
141 fib_free(fibptr); 141 aac_fib_free(fibptr);
142 return retval; 142 return retval;
143} 143}
144 144
@@ -464,10 +464,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
464 /* 464 /*
465 * Allocate and initialize a Fib then setup a BlockWrite command 465 * Allocate and initialize a Fib then setup a BlockWrite command
466 */ 466 */
467 if (!(srbfib = fib_alloc(dev))) { 467 if (!(srbfib = aac_fib_alloc(dev))) {
468 return -ENOMEM; 468 return -ENOMEM;
469 } 469 }
470 fib_init(srbfib); 470 aac_fib_init(srbfib);
471 471
472 srbcmd = (struct aac_srb*) fib_data(srbfib); 472 srbcmd = (struct aac_srb*) fib_data(srbfib);
473 473
@@ -601,7 +601,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
601 601
602 srbcmd->count = cpu_to_le32(byte_count); 602 srbcmd->count = cpu_to_le32(byte_count);
603 psg->count = cpu_to_le32(sg_indx+1); 603 psg->count = cpu_to_le32(sg_indx+1);
604 status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 604 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
605 } else { 605 } else {
606 struct user_sgmap* upsg = &user_srbcmd->sg; 606 struct user_sgmap* upsg = &user_srbcmd->sg;
607 struct sgmap* psg = &srbcmd->sg; 607 struct sgmap* psg = &srbcmd->sg;
@@ -649,7 +649,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
649 } 649 }
650 srbcmd->count = cpu_to_le32(byte_count); 650 srbcmd->count = cpu_to_le32(byte_count);
651 psg->count = cpu_to_le32(sg_indx+1); 651 psg->count = cpu_to_le32(sg_indx+1);
652 status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 652 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
653 } 653 }
654 654
655 if (status != 0){ 655 if (status != 0){
@@ -684,8 +684,8 @@ cleanup:
684 for(i=0; i <= sg_indx; i++){ 684 for(i=0; i <= sg_indx; i++){
685 kfree(sg_list[i]); 685 kfree(sg_list[i]);
686 } 686 }
687 fib_complete(srbfib); 687 aac_fib_complete(srbfib);
688 fib_free(srbfib); 688 aac_fib_free(srbfib);
689 689
690 return rcode; 690 return rcode;
691} 691}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 82821d331c07..1628d094943d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -185,17 +185,17 @@ int aac_send_shutdown(struct aac_dev * dev)
185 struct aac_close *cmd; 185 struct aac_close *cmd;
186 int status; 186 int status;
187 187
188 fibctx = fib_alloc(dev); 188 fibctx = aac_fib_alloc(dev);
189 if (!fibctx) 189 if (!fibctx)
190 return -ENOMEM; 190 return -ENOMEM;
191 fib_init(fibctx); 191 aac_fib_init(fibctx);
192 192
193 cmd = (struct aac_close *) fib_data(fibctx); 193 cmd = (struct aac_close *) fib_data(fibctx);
194 194
195 cmd->command = cpu_to_le32(VM_CloseAll); 195 cmd->command = cpu_to_le32(VM_CloseAll);
196 cmd->cid = cpu_to_le32(0xffffffff); 196 cmd->cid = cpu_to_le32(0xffffffff);
197 197
198 status = fib_send(ContainerCommand, 198 status = aac_fib_send(ContainerCommand,
199 fibctx, 199 fibctx,
200 sizeof(struct aac_close), 200 sizeof(struct aac_close),
201 FsaNormal, 201 FsaNormal,
@@ -203,8 +203,8 @@ int aac_send_shutdown(struct aac_dev * dev)
203 NULL, NULL); 203 NULL, NULL);
204 204
205 if (status == 0) 205 if (status == 0)
206 fib_complete(fibctx); 206 aac_fib_complete(fibctx);
207 fib_free(fibctx); 207 aac_fib_free(fibctx);
208 return status; 208 return status;
209} 209}
210 210
@@ -427,7 +427,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
427 /* 427 /*
428 * Initialize the list of fibs 428 * Initialize the list of fibs
429 */ 429 */
430 if(fib_setup(dev)<0){ 430 if (aac_fib_setup(dev) < 0) {
431 kfree(dev->queues); 431 kfree(dev->queues);
432 return NULL; 432 return NULL;
433 } 433 }
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 014cc8d54a9f..609fd19b1844 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -67,27 +67,27 @@ static int fib_map_alloc(struct aac_dev *dev)
67} 67}
68 68
69/** 69/**
70 * fib_map_free - free the fib objects 70 * aac_fib_map_free - free the fib objects
71 * @dev: Adapter to free 71 * @dev: Adapter to free
72 * 72 *
73 * Free the PCI mappings and the memory allocated for FIB blocks 73 * Free the PCI mappings and the memory allocated for FIB blocks
74 * on this adapter. 74 * on this adapter.
75 */ 75 */
76 76
77void fib_map_free(struct aac_dev *dev) 77void aac_fib_map_free(struct aac_dev *dev)
78{ 78{
79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa); 79 pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
80} 80}
81 81
82/** 82/**
83 * fib_setup - setup the fibs 83 * aac_fib_setup - setup the fibs
84 * @dev: Adapter to set up 84 * @dev: Adapter to set up
85 * 85 *
86 * Allocate the PCI space for the fibs, map it and then intialise the 86 * Allocate the PCI space for the fibs, map it and then intialise the
87 * fib area, the unmapped fib data and also the free list 87 * fib area, the unmapped fib data and also the free list
88 */ 88 */
89 89
90int fib_setup(struct aac_dev * dev) 90int aac_fib_setup(struct aac_dev * dev)
91{ 91{
92 struct fib *fibptr; 92 struct fib *fibptr;
93 struct hw_fib *hw_fib_va; 93 struct hw_fib *hw_fib_va;
@@ -134,14 +134,14 @@ int fib_setup(struct aac_dev * dev)
134} 134}
135 135
136/** 136/**
137 * fib_alloc - allocate a fib 137 * aac_fib_alloc - allocate a fib
138 * @dev: Adapter to allocate the fib for 138 * @dev: Adapter to allocate the fib for
139 * 139 *
140 * Allocate a fib from the adapter fib pool. If the pool is empty we 140 * Allocate a fib from the adapter fib pool. If the pool is empty we
141 * return NULL. 141 * return NULL.
142 */ 142 */
143 143
144struct fib * fib_alloc(struct aac_dev *dev) 144struct fib *aac_fib_alloc(struct aac_dev *dev)
145{ 145{
146 struct fib * fibptr; 146 struct fib * fibptr;
147 unsigned long flags; 147 unsigned long flags;
@@ -170,14 +170,14 @@ struct fib * fib_alloc(struct aac_dev *dev)
170} 170}
171 171
172/** 172/**
173 * fib_free - free a fib 173 * aac_fib_free - free a fib
174 * @fibptr: fib to free up 174 * @fibptr: fib to free up
175 * 175 *
176 * Frees up a fib and places it on the appropriate queue 176 * Frees up a fib and places it on the appropriate queue
177 * (either free or timed out) 177 * (either free or timed out)
178 */ 178 */
179 179
180void fib_free(struct fib * fibptr) 180void aac_fib_free(struct fib *fibptr)
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 183
@@ -188,7 +188,7 @@ void fib_free(struct fib * fibptr)
188 fibptr->dev->timeout_fib = fibptr; 188 fibptr->dev->timeout_fib = fibptr;
189 } else { 189 } else {
190 if (fibptr->hw_fib->header.XferState != 0) { 190 if (fibptr->hw_fib->header.XferState != 0) {
191 printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 191 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
192 (void*)fibptr, 192 (void*)fibptr,
193 le32_to_cpu(fibptr->hw_fib->header.XferState)); 193 le32_to_cpu(fibptr->hw_fib->header.XferState));
194 } 194 }
@@ -199,13 +199,13 @@ void fib_free(struct fib * fibptr)
199} 199}
200 200
201/** 201/**
202 * fib_init - initialise a fib 202 * aac_fib_init - initialise a fib
203 * @fibptr: The fib to initialize 203 * @fibptr: The fib to initialize
204 * 204 *
205 * Set up the generic fib fields ready for use 205 * Set up the generic fib fields ready for use
206 */ 206 */
207 207
208void fib_init(struct fib *fibptr) 208void aac_fib_init(struct fib *fibptr)
209{ 209{
210 struct hw_fib *hw_fib = fibptr->hw_fib; 210 struct hw_fib *hw_fib = fibptr->hw_fib;
211 211
@@ -362,7 +362,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
362 */ 362 */
363 363
364/** 364/**
365 * fib_send - send a fib to the adapter 365 * aac_fib_send - send a fib to the adapter
366 * @command: Command to send 366 * @command: Command to send
367 * @fibptr: The fib 367 * @fibptr: The fib
368 * @size: Size of fib data area 368 * @size: Size of fib data area
@@ -378,7 +378,9 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
378 * response FIB is received from the adapter. 378 * response FIB is received from the adapter.
379 */ 379 */
380 380
381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
382 int priority, int wait, int reply, fib_callback callback,
383 void *callback_data)
382{ 384{
383 struct aac_dev * dev = fibptr->dev; 385 struct aac_dev * dev = fibptr->dev;
384 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
@@ -493,7 +495,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
493 q->numpending++; 495 q->numpending++;
494 *(q->headers.producer) = cpu_to_le32(index + 1); 496 *(q->headers.producer) = cpu_to_le32(index + 1);
495 spin_unlock_irqrestore(q->lock, qflags); 497 spin_unlock_irqrestore(q->lock, qflags);
496 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); 498 dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
497 if (!(nointr & aac_config.irq_mod)) 499 if (!(nointr & aac_config.irq_mod))
498 aac_adapter_notify(dev, AdapNormCmdQueue); 500 aac_adapter_notify(dev, AdapNormCmdQueue);
499 } 501 }
@@ -520,7 +522,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
520 list_del(&fibptr->queue); 522 list_del(&fibptr->queue);
521 spin_unlock_irqrestore(q->lock, qflags); 523 spin_unlock_irqrestore(q->lock, qflags);
522 if (wait == -1) { 524 if (wait == -1) {
523 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" 525 printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
524 "Usually a result of a PCI interrupt routing problem;\n" 526 "Usually a result of a PCI interrupt routing problem;\n"
525 "update mother board BIOS or consider utilizing one of\n" 527 "update mother board BIOS or consider utilizing one of\n"
526 "the SAFE mode kernel options (acpi, apic etc)\n"); 528 "the SAFE mode kernel options (acpi, apic etc)\n");
@@ -624,7 +626,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
624} 626}
625 627
626/** 628/**
627 * fib_adapter_complete - complete adapter issued fib 629 * aac_fib_adapter_complete - complete adapter issued fib
628 * @fibptr: fib to complete 630 * @fibptr: fib to complete
629 * @size: size of fib 631 * @size: size of fib
630 * 632 *
@@ -632,7 +634,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
632 * the adapter. 634 * the adapter.
633 */ 635 */
634 636
635int fib_adapter_complete(struct fib * fibptr, unsigned short size) 637int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
636{ 638{
637 struct hw_fib * hw_fib = fibptr->hw_fib; 639 struct hw_fib * hw_fib = fibptr->hw_fib;
638 struct aac_dev * dev = fibptr->dev; 640 struct aac_dev * dev = fibptr->dev;
@@ -683,20 +685,20 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
683 } 685 }
684 else 686 else
685 { 687 {
686 printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); 688 printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
687 BUG(); 689 BUG();
688 } 690 }
689 return 0; 691 return 0;
690} 692}
691 693
692/** 694/**
693 * fib_complete - fib completion handler 695 * aac_fib_complete - fib completion handler
694 * @fib: FIB to complete 696 * @fib: FIB to complete
695 * 697 *
696 * Will do all necessary work to complete a FIB. 698 * Will do all necessary work to complete a FIB.
697 */ 699 */
698 700
699int fib_complete(struct fib * fibptr) 701int aac_fib_complete(struct fib *fibptr)
700{ 702{
701 struct hw_fib * hw_fib = fibptr->hw_fib; 703 struct hw_fib * hw_fib = fibptr->hw_fib;
702 704
@@ -995,14 +997,14 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
995 if (!dev || !dev->scsi_host_ptr) 997 if (!dev || !dev->scsi_host_ptr)
996 return; 998 return;
997 /* 999 /*
998 * force reload of disk info via probe_container 1000 * force reload of disk info via aac_probe_container
999 */ 1001 */
1000 if ((device_config_needed == CHANGE) 1002 if ((device_config_needed == CHANGE)
1001 && (dev->fsa_dev[container].valid == 1)) 1003 && (dev->fsa_dev[container].valid == 1))
1002 dev->fsa_dev[container].valid = 2; 1004 dev->fsa_dev[container].valid = 2;
1003 if ((device_config_needed == CHANGE) || 1005 if ((device_config_needed == CHANGE) ||
1004 (device_config_needed == ADD)) 1006 (device_config_needed == ADD))
1005 probe_container(dev, container); 1007 aac_probe_container(dev, container);
1006 device = scsi_device_lookup(dev->scsi_host_ptr, 1008 device = scsi_device_lookup(dev->scsi_host_ptr,
1007 CONTAINER_TO_CHANNEL(container), 1009 CONTAINER_TO_CHANNEL(container),
1008 CONTAINER_TO_ID(container), 1010 CONTAINER_TO_ID(container),
@@ -1104,7 +1106,7 @@ int aac_command_thread(struct aac_dev * dev)
1104 /* Handle Driver Notify Events */ 1106 /* Handle Driver Notify Events */
1105 aac_handle_aif(dev, fib); 1107 aac_handle_aif(dev, fib);
1106 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1108 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1107 fib_adapter_complete(fib, (u16)sizeof(u32)); 1109 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1108 } else { 1110 } else {
1109 struct list_head *entry; 1111 struct list_head *entry;
1110 /* The u32 here is important and intended. We are using 1112 /* The u32 here is important and intended. We are using
@@ -1241,7 +1243,7 @@ int aac_command_thread(struct aac_dev * dev)
1241 * Set the status of this FIB 1243 * Set the status of this FIB
1242 */ 1244 */
1243 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1245 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1244 fib_adapter_complete(fib, sizeof(u32)); 1246 aac_fib_adapter_complete(fib, sizeof(u32));
1245 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1247 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1246 /* Free up the remaining resources */ 1248 /* Free up the remaining resources */
1247 hw_fib_p = hw_fib_pool; 1249 hw_fib_p = hw_fib_pool;
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 439948ef8251..f6bcb9486f85 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -206,7 +206,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
206 * Set the status of this FIB 206 * Set the status of this FIB
207 */ 207 */
208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
209 fib_adapter_complete(fib, sizeof(u32)); 209 aac_fib_adapter_complete(fib, sizeof(u32));
210 spin_lock_irqsave(q->lock, flags); 210 spin_lock_irqsave(q->lock, flags);
211 } 211 }
212 } 212 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0bf5f9a943e8..271617890562 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -385,17 +385,45 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
385 385
386static int aac_slave_configure(struct scsi_device *sdev) 386static int aac_slave_configure(struct scsi_device *sdev)
387{ 387{
388 struct Scsi_Host *host = sdev->host; 388 if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
389 sdev->skip_ms_page_8 = 1;
390 sdev->skip_ms_page_3f = 1;
391 }
392 if ((sdev->type == TYPE_DISK) &&
393 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
394 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
395 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
396 sdev->no_uld_attach = 1;
397 }
398 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
399 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
400 struct scsi_device * dev;
401 struct Scsi_Host *host = sdev->host;
402 unsigned num_lsu = 0;
403 unsigned num_one = 0;
404 unsigned depth;
389 405
390 if (sdev->tagged_supported) 406 __shost_for_each_device(dev, host) {
391 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128); 407 if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
392 else 408 (sdev_channel(dev) == CONTAINER_CHANNEL))
409 ++num_lsu;
410 else
411 ++num_one;
412 }
413 if (num_lsu == 0)
414 ++num_lsu;
415 depth = (host->can_queue - num_one) / num_lsu;
416 if (depth > 256)
417 depth = 256;
418 else if (depth < 2)
419 depth = 2;
420 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
421 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
422 AAC_OPT_NEW_COMM))
423 blk_queue_max_segment_size(sdev->request_queue, 65536);
424 } else
393 scsi_adjust_queue_depth(sdev, 0, 1); 425 scsi_adjust_queue_depth(sdev, 0, 1);
394 426
395 if (!(((struct aac_dev *)host->hostdata)->adapter_info.options
396 & AAC_OPT_NEW_COMM))
397 blk_queue_max_segment_size(sdev->request_queue, 65536);
398
399 return 0; 427 return 0;
400} 428}
401 429
@@ -870,7 +898,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
870 898
871 /* 899 /*
872 * max channel will be the physical channels plus 1 virtual channel 900 * max channel will be the physical channels plus 1 virtual channel
873 * all containers are on the virtual channel 0 901 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
874 * physical channels are address by their actual physical number+1 902 * physical channels are address by their actual physical number+1
875 */ 903 */
876 if (aac->nondasd_support == 1) 904 if (aac->nondasd_support == 1)
@@ -913,7 +941,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
913 aac_adapter_disable_int(aac); 941 aac_adapter_disable_int(aac);
914 free_irq(pdev->irq, aac); 942 free_irq(pdev->irq, aac);
915 out_unmap: 943 out_unmap:
916 fib_map_free(aac); 944 aac_fib_map_free(aac);
917 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 945 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
918 kfree(aac->queues); 946 kfree(aac->queues);
919 iounmap(aac->regs.sa); 947 iounmap(aac->regs.sa);
@@ -947,7 +975,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
947 975
948 aac_send_shutdown(aac); 976 aac_send_shutdown(aac);
949 aac_adapter_disable_int(aac); 977 aac_adapter_disable_int(aac);
950 fib_map_free(aac); 978 aac_fib_map_free(aac);
951 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, 979 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
952 aac->comm_phys); 980 aac->comm_phys);
953 kfree(aac->queues); 981 kfree(aac->queues);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index bd3ffdf6c800..62e3cda859af 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2816,7 +2816,7 @@ static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
2816 } 2816 }
2817#endif 2817#endif
2818 2818
2819 } else { 2819 } else if (scp->request_bufflen) {
2820 scp->SCp.Status = GDTH_MAP_SINGLE; 2820 scp->SCp.Status = GDTH_MAP_SINGLE;
2821 scp->SCp.Message = (read_write == 1 ? 2821 scp->SCp.Message = (read_write == 1 ?
2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2822 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 27acf78cf8d8..2bba5e55d7bc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4236,35 +4236,6 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4236} 4236}
4237 4237
4238/** 4238/**
4239 * ipr_save_ioafp_mode_select - Save adapters mode select data
4240 * @ioa_cfg: ioa config struct
4241 * @scsi_cmd: scsi command struct
4242 *
4243 * This function saves mode select data for the adapter to
4244 * use following an adapter reset.
4245 *
4246 * Return value:
4247 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4248 **/
4249static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4250 struct scsi_cmnd *scsi_cmd)
4251{
4252 if (!ioa_cfg->saved_mode_pages) {
4253 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
4254 GFP_ATOMIC);
4255 if (!ioa_cfg->saved_mode_pages) {
4256 dev_err(&ioa_cfg->pdev->dev,
4257 "IOA mode select buffer allocation failed\n");
4258 return SCSI_MLQUEUE_HOST_BUSY;
4259 }
4260 }
4261
4262 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4263 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4264 return 0;
4265}
4266
4267/**
4268 * ipr_queuecommand - Queue a mid-layer request 4239 * ipr_queuecommand - Queue a mid-layer request
4269 * @scsi_cmd: scsi command struct 4240 * @scsi_cmd: scsi command struct
4270 * @done: done function 4241 * @done: done function
@@ -4338,9 +4309,6 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4338 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 4309 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4339 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 4310 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4340 4311
4341 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4342 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4343
4344 if (likely(rc == 0)) 4312 if (likely(rc == 0))
4345 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 4313 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4346 4314
@@ -4829,17 +4797,11 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4829 int length; 4797 int length;
4830 4798
4831 ENTER; 4799 ENTER;
4832 if (ioa_cfg->saved_mode_pages) { 4800 ipr_scsi_bus_speed_limit(ioa_cfg);
4833 memcpy(mode_pages, ioa_cfg->saved_mode_pages, 4801 ipr_check_term_power(ioa_cfg, mode_pages);
4834 ioa_cfg->saved_mode_page_len); 4802 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4835 length = ioa_cfg->saved_mode_page_len; 4803 length = mode_pages->hdr.length + 1;
4836 } else { 4804 mode_pages->hdr.length = 0;
4837 ipr_scsi_bus_speed_limit(ioa_cfg);
4838 ipr_check_term_power(ioa_cfg, mode_pages);
4839 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4840 length = mode_pages->hdr.length + 1;
4841 mode_pages->hdr.length = 0;
4842 }
4843 4805
4844 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, 4806 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4845 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 4807 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
@@ -5969,7 +5931,6 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5969 } 5931 }
5970 5932
5971 ipr_free_dump(ioa_cfg); 5933 ipr_free_dump(ioa_cfg);
5972 kfree(ioa_cfg->saved_mode_pages);
5973 kfree(ioa_cfg->trace); 5934 kfree(ioa_cfg->trace);
5974} 5935}
5975 5936
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b639332131f1..fd360bfe56dd 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.1" 39#define IPR_DRIVER_VERSION "2.1.2"
40#define IPR_DRIVER_DATE "(November 15, 2005)" 40#define IPR_DRIVER_DATE "(February 8, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1000,7 +1000,6 @@ struct ipr_ioa_cfg {
1000 struct Scsi_Host *host; 1000 struct Scsi_Host *host;
1001 struct pci_dev *pdev; 1001 struct pci_dev *pdev;
1002 struct ipr_sglist *ucode_sglist; 1002 struct ipr_sglist *ucode_sglist;
1003 struct ipr_mode_pages *saved_mode_pages;
1004 u8 saved_mode_page_len; 1003 u8 saved_mode_page_len;
1005 1004
1006 struct work_struct work_q; 1005 struct work_struct work_q;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 780bfcc67096..ff79e68b347c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -146,7 +146,7 @@ iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
146 spin_unlock_irqrestore(&session->lock, flags); 146 spin_unlock_irqrestore(&session->lock, flags);
147 set_bit(SUSPEND_BIT, &conn->suspend_tx); 147 set_bit(SUSPEND_BIT, &conn->suspend_tx);
148 set_bit(SUSPEND_BIT, &conn->suspend_rx); 148 set_bit(SUSPEND_BIT, &conn->suspend_rx);
149 iscsi_conn_error(iscsi_handle(conn), err); 149 iscsi_conn_error(conn->cls_conn, err);
150} 150}
151 151
152static inline int 152static inline int
@@ -244,12 +244,10 @@ iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
244 if (sc->sc_data_direction == DMA_TO_DEVICE) { 244 if (sc->sc_data_direction == DMA_TO_DEVICE) {
245 struct iscsi_data_task *dtask, *n; 245 struct iscsi_data_task *dtask, *n;
246 /* WRITE: cleanup Data-Out's if any */ 246 /* WRITE: cleanup Data-Out's if any */
247 spin_lock(&conn->lock);
248 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) { 247 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
249 list_del(&dtask->item); 248 list_del(&dtask->item);
250 mempool_free(dtask, ctask->datapool); 249 mempool_free(dtask, ctask->datapool);
251 } 250 }
252 spin_unlock(&conn->lock);
253 } 251 }
254 ctask->xmstate = XMSTATE_IDLE; 252 ctask->xmstate = XMSTATE_IDLE;
255 ctask->r2t = NULL; 253 ctask->r2t = NULL;
@@ -689,7 +687,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
689 break; 687 break;
690 688
691 if (!conn->in.datalen) { 689 if (!conn->in.datalen) {
692 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 690 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
693 NULL, 0); 691 NULL, 0);
694 if (conn->login_mtask != mtask) { 692 if (conn->login_mtask != mtask) {
695 spin_lock(&session->lock); 693 spin_lock(&session->lock);
@@ -737,7 +735,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
737 if (!conn->in.datalen) { 735 if (!conn->in.datalen) {
738 struct iscsi_mgmt_task *mtask; 736 struct iscsi_mgmt_task *mtask;
739 737
740 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr, 738 rc = iscsi_recv_pdu(conn->cls_conn, hdr,
741 NULL, 0); 739 NULL, 0);
742 mtask = (struct iscsi_mgmt_task *) 740 mtask = (struct iscsi_mgmt_task *)
743 session->mgmt_cmds[conn->in.itt - 741 session->mgmt_cmds[conn->in.itt -
@@ -761,7 +759,7 @@ iscsi_hdr_recv(struct iscsi_conn *conn)
761 rc = iscsi_check_assign_cmdsn(session, 759 rc = iscsi_check_assign_cmdsn(session,
762 (struct iscsi_nopin*)hdr); 760 (struct iscsi_nopin*)hdr);
763 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG) 761 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
764 rc = iscsi_recv_pdu(iscsi_handle(conn), 762 rc = iscsi_recv_pdu(conn->cls_conn,
765 hdr, NULL, 0); 763 hdr, NULL, 0);
766 } else 764 } else
767 rc = ISCSI_ERR_PROTO; 765 rc = ISCSI_ERR_PROTO;
@@ -1044,7 +1042,7 @@ iscsi_data_recv(struct iscsi_conn *conn)
1044 goto exit; 1042 goto exit;
1045 } 1043 }
1046 1044
1047 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr, 1045 rc = iscsi_recv_pdu(conn->cls_conn, conn->in.hdr,
1048 conn->data, conn->in.datalen); 1046 conn->data, conn->in.datalen);
1049 1047
1050 if (!rc && conn->datadgst_en && 1048 if (!rc && conn->datadgst_en &&
@@ -2428,19 +2426,20 @@ iscsi_pool_free(struct iscsi_queue *q, void **items)
2428} 2426}
2429 2427
2430static struct iscsi_cls_conn * 2428static struct iscsi_cls_conn *
2431iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx) 2429iscsi_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
2432{ 2430{
2431 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2433 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 2432 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2434 struct iscsi_conn *conn; 2433 struct iscsi_conn *conn;
2435 struct iscsi_cls_conn *cls_conn; 2434 struct iscsi_cls_conn *cls_conn;
2436 2435
2437 cls_conn = iscsi_create_conn(hostdata_session(shost->hostdata), 2436 cls_conn = iscsi_create_conn(cls_session, conn_idx);
2438 conn_idx);
2439 if (!cls_conn) 2437 if (!cls_conn)
2440 return NULL; 2438 return NULL;
2441 conn = cls_conn->dd_data; 2439 conn = cls_conn->dd_data;
2440 memset(conn, 0, sizeof(*conn));
2442 2441
2443 memset(conn, 0, sizeof(struct iscsi_conn)); 2442 conn->cls_conn = cls_conn;
2444 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2443 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2445 conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2444 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2446 conn->id = conn_idx; 2445 conn->id = conn_idx;
@@ -2452,8 +2451,6 @@ iscsi_conn_create(struct Scsi_Host *shost, uint32_t conn_idx)
2452 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2451 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2453 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 2452 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2454 2453
2455 spin_lock_init(&conn->lock);
2456
2457 /* initialize general xmit PDU commands queue */ 2454 /* initialize general xmit PDU commands queue */
2458 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*), 2455 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2459 GFP_KERNEL, NULL); 2456 GFP_KERNEL, NULL);
@@ -2625,11 +2622,13 @@ iscsi_conn_destroy(struct iscsi_cls_conn *cls_conn)
2625} 2622}
2626 2623
2627static int 2624static int
2628iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh, 2625iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2629 uint32_t transport_fd, int is_leading) 2626 struct iscsi_cls_conn *cls_conn, uint32_t transport_fd,
2627 int is_leading)
2630{ 2628{
2631 struct iscsi_session *session = iscsi_ptr(sessionh); 2629 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2632 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh); 2630 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2631 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = cls_conn->dd_data;
2633 struct sock *sk; 2632 struct sock *sk;
2634 struct socket *sock; 2633 struct socket *sock;
2635 int err; 2634 int err;
@@ -2703,9 +2702,9 @@ iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2703} 2702}
2704 2703
2705static int 2704static int
2706iscsi_conn_start(iscsi_connh_t connh) 2705iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2707{ 2706{
2708 struct iscsi_conn *conn = iscsi_ptr(connh); 2707 struct iscsi_conn *conn = cls_conn->dd_data;
2709 struct iscsi_session *session = conn->session; 2708 struct iscsi_session *session = conn->session;
2710 struct sock *sk; 2709 struct sock *sk;
2711 2710
@@ -2754,9 +2753,9 @@ iscsi_conn_start(iscsi_connh_t connh)
2754} 2753}
2755 2754
2756static void 2755static void
2757iscsi_conn_stop(iscsi_connh_t connh, int flag) 2756iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2758{ 2757{
2759 struct iscsi_conn *conn = iscsi_ptr(connh); 2758 struct iscsi_conn *conn = cls_conn->dd_data;
2760 struct iscsi_session *session = conn->session; 2759 struct iscsi_session *session = conn->session;
2761 struct sock *sk; 2760 struct sock *sk;
2762 unsigned long flags; 2761 unsigned long flags;
@@ -3253,9 +3252,9 @@ static struct scsi_host_template iscsi_sht = {
3253 3252
3254static struct iscsi_transport iscsi_tcp_transport; 3253static struct iscsi_transport iscsi_tcp_transport;
3255 3254
3256static struct Scsi_Host * 3255static struct iscsi_cls_session *
3257iscsi_session_create(struct scsi_transport_template *scsit, 3256iscsi_session_create(struct scsi_transport_template *scsit,
3258 uint32_t initial_cmdsn) 3257 uint32_t initial_cmdsn, uint32_t *sid)
3259{ 3258{
3260 struct Scsi_Host *shost; 3259 struct Scsi_Host *shost;
3261 struct iscsi_session *session; 3260 struct iscsi_session *session;
@@ -3268,13 +3267,14 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3268 session = iscsi_hostdata(shost->hostdata); 3267 session = iscsi_hostdata(shost->hostdata);
3269 memset(session, 0, sizeof(struct iscsi_session)); 3268 memset(session, 0, sizeof(struct iscsi_session));
3270 session->host = shost; 3269 session->host = shost;
3271 session->state = ISCSI_STATE_LOGGED_IN; 3270 session->state = ISCSI_STATE_FREE;
3272 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 3271 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3273 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 3272 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3274 session->cmdsn = initial_cmdsn; 3273 session->cmdsn = initial_cmdsn;
3275 session->exp_cmdsn = initial_cmdsn + 1; 3274 session->exp_cmdsn = initial_cmdsn + 1;
3276 session->max_cmdsn = initial_cmdsn + 1; 3275 session->max_cmdsn = initial_cmdsn + 1;
3277 session->max_r2t = 1; 3276 session->max_r2t = 1;
3277 *sid = shost->host_no;
3278 3278
3279 /* initialize SCSI PDU commands pool */ 3279 /* initialize SCSI PDU commands pool */
3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 3280 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -3311,22 +3311,24 @@ iscsi_session_create(struct scsi_transport_template *scsit,
3311 if (iscsi_r2tpool_alloc(session)) 3311 if (iscsi_r2tpool_alloc(session))
3312 goto r2tpool_alloc_fail; 3312 goto r2tpool_alloc_fail;
3313 3313
3314 return shost; 3314 return hostdata_session(shost->hostdata);
3315 3315
3316r2tpool_alloc_fail: 3316r2tpool_alloc_fail:
3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) 3317 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3318 kfree(session->mgmt_cmds[cmd_i]->data); 3318 kfree(session->mgmt_cmds[cmd_i]->data);
3319 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3320immdata_alloc_fail: 3319immdata_alloc_fail:
3320 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3321mgmtpool_alloc_fail: 3321mgmtpool_alloc_fail:
3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 3322 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3323cmdpool_alloc_fail: 3323cmdpool_alloc_fail:
3324 iscsi_transport_destroy_session(shost);
3324 return NULL; 3325 return NULL;
3325} 3326}
3326 3327
3327static void 3328static void
3328iscsi_session_destroy(struct Scsi_Host *shost) 3329iscsi_session_destroy(struct iscsi_cls_session *cls_session)
3329{ 3330{
3331 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3330 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3332 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3331 int cmd_i; 3333 int cmd_i;
3332 struct iscsi_data_task *dtask, *n; 3334 struct iscsi_data_task *dtask, *n;
@@ -3350,10 +3352,10 @@ iscsi_session_destroy(struct Scsi_Host *shost)
3350} 3352}
3351 3353
3352static int 3354static int
3353iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param, 3355iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
3354 uint32_t value) 3356 uint32_t value)
3355{ 3357{
3356 struct iscsi_conn *conn = iscsi_ptr(connh); 3358 struct iscsi_conn *conn = cls_conn->dd_data;
3357 struct iscsi_session *session = conn->session; 3359 struct iscsi_session *session = conn->session;
3358 3360
3359 spin_lock_bh(&session->lock); 3361 spin_lock_bh(&session->lock);
@@ -3495,9 +3497,10 @@ iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3495} 3497}
3496 3498
3497static int 3499static int
3498iscsi_session_get_param(struct Scsi_Host *shost, 3500iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3499 enum iscsi_param param, uint32_t *value) 3501 enum iscsi_param param, uint32_t *value)
3500{ 3502{
3503 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
3501 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 3504 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
3502 3505
3503 switch(param) { 3506 switch(param) {
@@ -3539,9 +3542,10 @@ iscsi_session_get_param(struct Scsi_Host *shost,
3539} 3542}
3540 3543
3541static int 3544static int
3542iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value) 3545iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3546 enum iscsi_param param, uint32_t *value)
3543{ 3547{
3544 struct iscsi_conn *conn = data; 3548 struct iscsi_conn *conn = cls_conn->dd_data;
3545 3549
3546 switch(param) { 3550 switch(param) {
3547 case ISCSI_PARAM_MAX_RECV_DLENGTH: 3551 case ISCSI_PARAM_MAX_RECV_DLENGTH:
@@ -3564,9 +3568,9 @@ iscsi_conn_get_param(void *data, enum iscsi_param param, uint32_t *value)
3564} 3568}
3565 3569
3566static void 3570static void
3567iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats) 3571iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
3568{ 3572{
3569 struct iscsi_conn *conn = iscsi_ptr(connh); 3573 struct iscsi_conn *conn = cls_conn->dd_data;
3570 3574
3571 stats->txdata_octets = conn->txdata_octets; 3575 stats->txdata_octets = conn->txdata_octets;
3572 stats->rxdata_octets = conn->rxdata_octets; 3576 stats->rxdata_octets = conn->rxdata_octets;
@@ -3587,10 +3591,10 @@ iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3587} 3591}
3588 3592
3589static int 3593static int
3590iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data, 3594iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
3591 uint32_t data_size) 3595 char *data, uint32_t data_size)
3592{ 3596{
3593 struct iscsi_conn *conn = iscsi_ptr(connh); 3597 struct iscsi_conn *conn = cls_conn->dd_data;
3594 int rc; 3598 int rc;
3595 3599
3596 mutex_lock(&conn->xmitmutex); 3600 mutex_lock(&conn->xmitmutex);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index f95e61b76f70..ba26741ac154 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -113,7 +113,10 @@ struct iscsi_tcp_recv {
113 int datadgst; 113 int datadgst;
114}; 114};
115 115
116struct iscsi_cls_conn;
117
116struct iscsi_conn { 118struct iscsi_conn {
119 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
117 struct iscsi_hdr hdr; /* header placeholder */ 120 struct iscsi_hdr hdr; /* header placeholder */
118 char hdrext[4*sizeof(__u16) + 121 char hdrext[4*sizeof(__u16) +
119 sizeof(__u32)]; 122 sizeof(__u32)];
@@ -143,7 +146,6 @@ struct iscsi_conn {
143 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
144 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
145 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
146 spinlock_t lock; /* FIXME: to be removed */
147 149
148 /* old values for socket callbacks */ 150 /* old values for socket callbacks */
149 void (*old_data_ready)(struct sock *, int); 151 void (*old_data_ready)(struct sock *, int);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 46c4cdbaee86..7ddd5a69352a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -614,7 +614,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { 614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
615 /* Unable to use DMA due to host limitation */ 615 /* Unable to use DMA due to host limitation */
616 tf->protocol = ATA_PROT_PIO; 616 tf->protocol = ATA_PROT_PIO;
617 index = dev->multi_count ? 0 : 4; 617 index = dev->multi_count ? 0 : 8;
618 } else { 618 } else {
619 tf->protocol = ATA_PROT_DMA; 619 tf->protocol = ATA_PROT_DMA;
620 index = 16; 620 index = 16;
@@ -3357,11 +3357,12 @@ static void ata_pio_error(struct ata_port *ap)
3357{ 3357{
3358 struct ata_queued_cmd *qc; 3358 struct ata_queued_cmd *qc;
3359 3359
3360 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3361
3362 qc = ata_qc_from_tag(ap, ap->active_tag); 3360 qc = ata_qc_from_tag(ap, ap->active_tag);
3363 assert(qc != NULL); 3361 assert(qc != NULL);
3364 3362
3363 if (qc->tf.command != ATA_CMD_PACKET)
3364 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3365
3365 /* make sure qc->err_mask is available to 3366 /* make sure qc->err_mask is available to
3366 * know what's wrong and recover 3367 * know what's wrong and recover
3367 */ 3368 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index d101a8a6f4e8..7144674bc8e6 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -5049,7 +5049,7 @@ static struct pci_device_id megaraid_pci_tbl[] = {
5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl); 5049MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
5050 5050
5051static struct pci_driver megaraid_pci_driver = { 5051static struct pci_driver megaraid_pci_driver = {
5052 .name = "megaraid", 5052 .name = "megaraid_legacy",
5053 .id_table = megaraid_pci_tbl, 5053 .id_table = megaraid_pci_tbl,
5054 .probe = megaraid_probe_one, 5054 .probe = megaraid_probe_one,
5055 .remove = __devexit_p(megaraid_remove_one), 5055 .remove = __devexit_p(megaraid_remove_one),
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4b3e0d6e5afa..4b75fe619d9c 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -5,7 +5,7 @@
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6 6
7#define MEGARAID_VERSION \ 7#define MEGARAID_VERSION \
8 "v2.00.3 (Release Date: Wed Feb 19 08:51:30 EST 2003)\n" 8 "v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
9 9
10/* 10/*
11 * Driver features - change the values to enable or disable features in the 11 * Driver features - change the values to enable or disable features in the
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a487f414960e..7de267e14458 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_sas.c 12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.02 13 * Version : v00.00.02.04
14 * 14 *
15 * Authors: 15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> 16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
@@ -60,6 +60,12 @@ static struct pci_device_id megasas_pci_table[] = {
60 PCI_ANY_ID, 60 PCI_ANY_ID,
61 }, 61 },
62 { 62 {
63 PCI_VENDOR_ID_LSI_LOGIC,
64 PCI_DEVICE_ID_LSI_SAS1078R, // ppc IOP
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
63 PCI_VENDOR_ID_DELL, 69 PCI_VENDOR_ID_DELL,
64 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP 70 PCI_DEVICE_ID_DELL_PERC5, // xscale IOP
65 PCI_ANY_ID, 71 PCI_ANY_ID,
@@ -199,6 +205,86 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
199*/ 205*/
200 206
201/** 207/**
208* The following functions are defined for ppc (deviceid : 0x60)
209* controllers
210*/
211
212/**
213 * megasas_enable_intr_ppc - Enables interrupts
214 * @regs: MFI register set
215 */
216static inline void
217megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
218{
219 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
220
221 writel(~0x80000004, &(regs)->outbound_intr_mask);
222
223 /* Dummy readl to force pci flush */
224 readl(&regs->outbound_intr_mask);
225}
226
227/**
228 * megasas_read_fw_status_reg_ppc - returns the current FW status value
229 * @regs: MFI register set
230 */
231static u32
232megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
233{
234 return readl(&(regs)->outbound_scratch_pad);
235}
236
237/**
238 * megasas_clear_interrupt_ppc - Check & clear interrupt
239 * @regs: MFI register set
240 */
241static int
242megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
243{
244 u32 status;
245 /*
246 * Check if it is our interrupt
247 */
248 status = readl(&regs->outbound_intr_status);
249
250 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
251 return 1;
252 }
253
254 /*
255 * Clear the interrupt by writing back the same value
256 */
257 writel(status, &regs->outbound_doorbell_clear);
258
259 return 0;
260}
261/**
262 * megasas_fire_cmd_ppc - Sends command to the FW
263 * @frame_phys_addr : Physical address of cmd
264 * @frame_count : Number of frames for the command
265 * @regs : MFI register set
266 */
267static inline void
268megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
269{
270 writel((frame_phys_addr | (frame_count<<1))|1,
271 &(regs)->inbound_queue_port);
272}
273
274static struct megasas_instance_template megasas_instance_template_ppc = {
275
276 .fire_cmd = megasas_fire_cmd_ppc,
277 .enable_intr = megasas_enable_intr_ppc,
278 .clear_intr = megasas_clear_intr_ppc,
279 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
280};
281
282/**
283* This is the end of set of functions & definitions
284* specific to ppc (deviceid : 0x60) controllers
285*/
286
287/**
202 * megasas_disable_intr - Disables interrupts 288 * megasas_disable_intr - Disables interrupts
203 * @regs: MFI register set 289 * @regs: MFI register set
204 */ 290 */
@@ -1607,7 +1693,17 @@ static int megasas_init_mfi(struct megasas_instance *instance)
1607 1693
1608 reg_set = instance->reg_set; 1694 reg_set = instance->reg_set;
1609 1695
1610 instance->instancet = &megasas_instance_template_xscale; 1696 switch(instance->pdev->device)
1697 {
1698 case PCI_DEVICE_ID_LSI_SAS1078R:
1699 instance->instancet = &megasas_instance_template_ppc;
1700 break;
1701 case PCI_DEVICE_ID_LSI_SAS1064R:
1702 case PCI_DEVICE_ID_DELL_PERC5:
1703 default:
1704 instance->instancet = &megasas_instance_template_xscale;
1705 break;
1706 }
1611 1707
1612 /* 1708 /*
1613 * We expect the FW state to be READY 1709 * We expect the FW state to be READY
@@ -1983,6 +2079,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
1983 host->max_channel = MEGASAS_MAX_CHANNELS - 1; 2079 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1984 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; 2080 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1985 host->max_lun = MEGASAS_MAX_LUN; 2081 host->max_lun = MEGASAS_MAX_LUN;
2082 host->max_cmd_len = 16;
1986 2083
1987 /* 2084 /*
1988 * Notify the mid-layer about the new controller 2085 * Notify the mid-layer about the new controller
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index d6d166c0663f..89639f0c38ef 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
18/** 18/**
19 * MegaRAID SAS Driver meta data 19 * MegaRAID SAS Driver meta data
20 */ 20 */
21#define MEGASAS_VERSION "00.00.02.02" 21#define MEGASAS_VERSION "00.00.02.04"
22#define MEGASAS_RELDATE "Jan 23, 2006" 22#define MEGASAS_RELDATE "Feb 03, 2006"
23#define MEGASAS_EXT_VERSION "Mon Jan 23 14:09:01 PST 2006" 23#define MEGASAS_EXT_VERSION "Fri Feb 03 14:31:44 PST 2006"
24/* 24/*
25 * ===================================== 25 * =====================================
26 * MegaRAID SAS MFI firmware definitions 26 * MegaRAID SAS MFI firmware definitions
@@ -553,31 +553,46 @@ struct megasas_ctrl_info {
553#define MFI_OB_INTR_STATUS_MASK 0x00000002 553#define MFI_OB_INTR_STATUS_MASK 0x00000002
554#define MFI_POLL_TIMEOUT_SECS 10 554#define MFI_POLL_TIMEOUT_SECS 10
555 555
556#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
557#define PCI_DEVICE_ID_LSI_SAS1078R 0x00000060
558
556struct megasas_register_set { 559struct megasas_register_set {
560 u32 reserved_0[4]; /*0000h*/
557 561
558 u32 reserved_0[4]; /*0000h */ 562 u32 inbound_msg_0; /*0010h*/
563 u32 inbound_msg_1; /*0014h*/
564 u32 outbound_msg_0; /*0018h*/
565 u32 outbound_msg_1; /*001Ch*/
559 566
560 u32 inbound_msg_0; /*0010h */ 567 u32 inbound_doorbell; /*0020h*/
561 u32 inbound_msg_1; /*0014h */ 568 u32 inbound_intr_status; /*0024h*/
562 u32 outbound_msg_0; /*0018h */ 569 u32 inbound_intr_mask; /*0028h*/
563 u32 outbound_msg_1; /*001Ch */
564 570
565 u32 inbound_doorbell; /*0020h */ 571 u32 outbound_doorbell; /*002Ch*/
566 u32 inbound_intr_status; /*0024h */ 572 u32 outbound_intr_status; /*0030h*/
567 u32 inbound_intr_mask; /*0028h */ 573 u32 outbound_intr_mask; /*0034h*/
568 574
569 u32 outbound_doorbell; /*002Ch */ 575 u32 reserved_1[2]; /*0038h*/
570 u32 outbound_intr_status; /*0030h */
571 u32 outbound_intr_mask; /*0034h */
572 576
573 u32 reserved_1[2]; /*0038h */ 577 u32 inbound_queue_port; /*0040h*/
578 u32 outbound_queue_port; /*0044h*/
574 579
575 u32 inbound_queue_port; /*0040h */ 580 u32 reserved_2[22]; /*0048h*/
576 u32 outbound_queue_port; /*0044h */
577 581
578 u32 reserved_2; /*004Ch */ 582 u32 outbound_doorbell_clear; /*00A0h*/
579 583
580 u32 index_registers[1004]; /*0050h */ 584 u32 reserved_3[3]; /*00A4h*/
585
586 u32 outbound_scratch_pad ; /*00B0h*/
587
588 u32 reserved_4[3]; /*00B4h*/
589
590 u32 inbound_low_queue_port ; /*00C0h*/
591
592 u32 inbound_high_queue_port ; /*00C4h*/
593
594 u32 reserved_5; /*00C8h*/
595 u32 index_registers[820]; /*00CCh*/
581 596
582} __attribute__ ((packed)); 597} __attribute__ ((packed));
583 598
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b17ee62dd1a9..92b3e13e9061 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12/* SYSFS attributes --------------------------------------------------------- */ 11/* SYSFS attributes --------------------------------------------------------- */
13 12
@@ -114,7 +113,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
114 struct device, kobj))); 113 struct device, kobj)));
115 unsigned long flags; 114 unsigned long flags;
116 115
117 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 116 if (!capable(CAP_SYS_ADMIN) || off != 0)
118 return 0; 117 return 0;
119 118
120 /* Read NVRAM. */ 119 /* Read NVRAM. */
@@ -123,7 +122,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
123 ha->nvram_size); 122 ha->nvram_size);
124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 123 spin_unlock_irqrestore(&ha->hardware_lock, flags);
125 124
126 return (count); 125 return ha->nvram_size;
127} 126}
128 127
129static ssize_t 128static ssize_t
@@ -175,19 +174,150 @@ static struct bin_attribute sysfs_nvram_attr = {
175 .mode = S_IRUSR | S_IWUSR, 174 .mode = S_IRUSR | S_IWUSR,
176 .owner = THIS_MODULE, 175 .owner = THIS_MODULE,
177 }, 176 },
178 .size = 0, 177 .size = 512,
179 .read = qla2x00_sysfs_read_nvram, 178 .read = qla2x00_sysfs_read_nvram,
180 .write = qla2x00_sysfs_write_nvram, 179 .write = qla2x00_sysfs_write_nvram,
181}; 180};
182 181
182static ssize_t
183qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
184 size_t count)
185{
186 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
187 struct device, kobj)));
188
189 if (ha->optrom_state != QLA_SREADING)
190 return 0;
191 if (off > ha->optrom_size)
192 return 0;
193 if (off + count > ha->optrom_size)
194 count = ha->optrom_size - off;
195
196 memcpy(buf, &ha->optrom_buffer[off], count);
197
198 return count;
199}
200
201static ssize_t
202qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off,
203 size_t count)
204{
205 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
206 struct device, kobj)));
207
208 if (ha->optrom_state != QLA_SWRITING)
209 return -EINVAL;
210 if (off > ha->optrom_size)
211 return -ERANGE;
212 if (off + count > ha->optrom_size)
213 count = ha->optrom_size - off;
214
215 memcpy(&ha->optrom_buffer[off], buf, count);
216
217 return count;
218}
219
220static struct bin_attribute sysfs_optrom_attr = {
221 .attr = {
222 .name = "optrom",
223 .mode = S_IRUSR | S_IWUSR,
224 .owner = THIS_MODULE,
225 },
226 .size = OPTROM_SIZE_24XX,
227 .read = qla2x00_sysfs_read_optrom,
228 .write = qla2x00_sysfs_write_optrom,
229};
230
231static ssize_t
232qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
233 size_t count)
234{
235 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
236 struct device, kobj)));
237 int val;
238
239 if (off)
240 return 0;
241
242 if (sscanf(buf, "%d", &val) != 1)
243 return -EINVAL;
244
245 switch (val) {
246 case 0:
247 if (ha->optrom_state != QLA_SREADING &&
248 ha->optrom_state != QLA_SWRITING)
249 break;
250
251 ha->optrom_state = QLA_SWAITING;
252 vfree(ha->optrom_buffer);
253 ha->optrom_buffer = NULL;
254 break;
255 case 1:
256 if (ha->optrom_state != QLA_SWAITING)
257 break;
258
259 ha->optrom_state = QLA_SREADING;
260 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
261 if (ha->optrom_buffer == NULL) {
262 qla_printk(KERN_WARNING, ha,
263 "Unable to allocate memory for optrom retrieval "
264 "(%x).\n", ha->optrom_size);
265
266 ha->optrom_state = QLA_SWAITING;
267 return count;
268 }
269
270 memset(ha->optrom_buffer, 0, ha->optrom_size);
271 ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0,
272 ha->optrom_size);
273 break;
274 case 2:
275 if (ha->optrom_state != QLA_SWAITING)
276 break;
277
278 ha->optrom_state = QLA_SWRITING;
279 ha->optrom_buffer = (uint8_t *)vmalloc(ha->optrom_size);
280 if (ha->optrom_buffer == NULL) {
281 qla_printk(KERN_WARNING, ha,
282 "Unable to allocate memory for optrom update "
283 "(%x).\n", ha->optrom_size);
284
285 ha->optrom_state = QLA_SWAITING;
286 return count;
287 }
288 memset(ha->optrom_buffer, 0, ha->optrom_size);
289 break;
290 case 3:
291 if (ha->optrom_state != QLA_SWRITING)
292 break;
293
294 ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0,
295 ha->optrom_size);
296 break;
297 }
298 return count;
299}
300
301static struct bin_attribute sysfs_optrom_ctl_attr = {
302 .attr = {
303 .name = "optrom_ctl",
304 .mode = S_IWUSR,
305 .owner = THIS_MODULE,
306 },
307 .size = 0,
308 .write = qla2x00_sysfs_write_optrom_ctl,
309};
310
183void 311void
184qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 312qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
185{ 313{
186 struct Scsi_Host *host = ha->host; 314 struct Scsi_Host *host = ha->host;
187 315
188 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 316 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
189 sysfs_nvram_attr.size = ha->nvram_size;
190 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 317 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
318 sysfs_create_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
319 sysfs_create_bin_file(&host->shost_gendev.kobj,
320 &sysfs_optrom_ctl_attr);
191} 321}
192 322
193void 323void
@@ -197,6 +327,12 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
197 327
198 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr); 328 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_fw_dump_attr);
199 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); 329 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
330 sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_optrom_attr);
331 sysfs_remove_bin_file(&host->shost_gendev.kobj,
332 &sysfs_optrom_ctl_attr);
333
334 if (ha->beacon_blink_led == 1)
335 ha->isp_ops.beacon_off(ha);
200} 336}
201 337
202/* Scsi_Host attributes. */ 338/* Scsi_Host attributes. */
@@ -384,6 +520,50 @@ qla2x00_zio_timer_store(struct class_device *cdev, const char *buf,
384 return strlen(buf); 520 return strlen(buf);
385} 521}
386 522
523static ssize_t
524qla2x00_beacon_show(struct class_device *cdev, char *buf)
525{
526 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
527 int len = 0;
528
529 if (ha->beacon_blink_led)
530 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
531 else
532 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
533 return len;
534}
535
536static ssize_t
537qla2x00_beacon_store(struct class_device *cdev, const char *buf,
538 size_t count)
539{
540 scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
541 int val = 0;
542 int rval;
543
544 if (IS_QLA2100(ha) || IS_QLA2200(ha))
545 return -EPERM;
546
547 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
548 qla_printk(KERN_WARNING, ha,
549 "Abort ISP active -- ignoring beacon request.\n");
550 return -EBUSY;
551 }
552
553 if (sscanf(buf, "%d", &val) != 1)
554 return -EINVAL;
555
556 if (val)
557 rval = ha->isp_ops.beacon_on(ha);
558 else
559 rval = ha->isp_ops.beacon_off(ha);
560
561 if (rval != QLA_SUCCESS)
562 count = 0;
563
564 return count;
565}
566
387static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, 567static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
388 NULL); 568 NULL);
389static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 569static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
@@ -398,6 +578,8 @@ static CLASS_DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show,
398 qla2x00_zio_store); 578 qla2x00_zio_store);
399static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, 579static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
400 qla2x00_zio_timer_store); 580 qla2x00_zio_timer_store);
581static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
582 qla2x00_beacon_store);
401 583
402struct class_device_attribute *qla2x00_host_attrs[] = { 584struct class_device_attribute *qla2x00_host_attrs[] = {
403 &class_device_attr_driver_version, 585 &class_device_attr_driver_version,
@@ -411,6 +593,7 @@ struct class_device_attribute *qla2x00_host_attrs[] = {
411 &class_device_attr_state, 593 &class_device_attr_state,
412 &class_device_attr_zio, 594 &class_device_attr_zio,
413 &class_device_attr_zio_timer, 595 &class_device_attr_zio_timer,
596 &class_device_attr_beacon,
414 NULL, 597 NULL,
415}; 598};
416 599
@@ -426,6 +609,49 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
426} 609}
427 610
428static void 611static void
612qla2x00_get_host_speed(struct Scsi_Host *shost)
613{
614 scsi_qla_host_t *ha = to_qla_host(shost);
615 uint32_t speed = 0;
616
617 switch (ha->link_data_rate) {
618 case LDR_1GB:
619 speed = 1;
620 break;
621 case LDR_2GB:
622 speed = 2;
623 break;
624 case LDR_4GB:
625 speed = 4;
626 break;
627 }
628 fc_host_speed(shost) = speed;
629}
630
631static void
632qla2x00_get_host_port_type(struct Scsi_Host *shost)
633{
634 scsi_qla_host_t *ha = to_qla_host(shost);
635 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
636
637 switch (ha->current_topology) {
638 case ISP_CFG_NL:
639 port_type = FC_PORTTYPE_LPORT;
640 break;
641 case ISP_CFG_FL:
642 port_type = FC_PORTTYPE_NLPORT;
643 break;
644 case ISP_CFG_N:
645 port_type = FC_PORTTYPE_PTP;
646 break;
647 case ISP_CFG_F:
648 port_type = FC_PORTTYPE_NPORT;
649 break;
650 }
651 fc_host_port_type(shost) = port_type;
652}
653
654static void
429qla2x00_get_starget_node_name(struct scsi_target *starget) 655qla2x00_get_starget_node_name(struct scsi_target *starget)
430{ 656{
431 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 657 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
@@ -512,6 +738,41 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
512 return 0; 738 return 0;
513} 739}
514 740
741static struct fc_host_statistics *
742qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
743{
744 scsi_qla_host_t *ha = to_qla_host(shost);
745 int rval;
746 uint16_t mb_stat[1];
747 link_stat_t stat_buf;
748 struct fc_host_statistics *pfc_host_stat;
749
750 pfc_host_stat = &ha->fc_host_stat;
751 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
752
753 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
754 rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
755 sizeof(stat_buf) / 4, mb_stat);
756 } else {
757 rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
758 mb_stat);
759 }
760 if (rval != 0) {
761 qla_printk(KERN_WARNING, ha,
762 "Unable to retrieve host statistics (%d).\n", mb_stat[0]);
763 return pfc_host_stat;
764 }
765
766 pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
767 pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
768 pfc_host_stat->loss_of_signal_count = stat_buf.loss_sig_cnt;
769 pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
770 pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
771 pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
772
773 return pfc_host_stat;
774}
775
515struct fc_function_template qla2xxx_transport_functions = { 776struct fc_function_template qla2xxx_transport_functions = {
516 777
517 .show_host_node_name = 1, 778 .show_host_node_name = 1,
@@ -520,6 +781,10 @@ struct fc_function_template qla2xxx_transport_functions = {
520 781
521 .get_host_port_id = qla2x00_get_host_port_id, 782 .get_host_port_id = qla2x00_get_host_port_id,
522 .show_host_port_id = 1, 783 .show_host_port_id = 1,
784 .get_host_speed = qla2x00_get_host_speed,
785 .show_host_speed = 1,
786 .get_host_port_type = qla2x00_get_host_port_type,
787 .show_host_port_type = 1,
523 788
524 .dd_fcrport_size = sizeof(struct fc_port *), 789 .dd_fcrport_size = sizeof(struct fc_port *),
525 .show_rport_supported_classes = 1, 790 .show_rport_supported_classes = 1,
@@ -536,6 +801,7 @@ struct fc_function_template qla2xxx_transport_functions = {
536 .show_rport_dev_loss_tmo = 1, 801 .show_rport_dev_loss_tmo = 1,
537 802
538 .issue_fc_host_lip = qla2x00_issue_lip, 803 .issue_fc_host_lip = qla2x00_issue_lip,
804 .get_fc_host_stats = qla2x00_get_fc_host_stats,
539}; 805};
540 806
541void 807void
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index bad066e5772a..b31a03bbd14f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
31#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_transport_fc.h>
32 33
33#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE) 34#if defined(CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE)
34#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE) 35#if defined(CONFIG_SCSI_QLA21XX) || defined(CONFIG_SCSI_QLA21XX_MODULE)
@@ -181,6 +182,13 @@
181#define WRT_REG_DWORD(addr, data) writel(data,addr) 182#define WRT_REG_DWORD(addr, data) writel(data,addr)
182 183
183/* 184/*
185 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
186 * 133Mhz slot.
187 */
188#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
189#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr))
190
191/*
184 * Fibre Channel device definitions. 192 * Fibre Channel device definitions.
185 */ 193 */
186#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ 194#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
@@ -432,6 +440,9 @@ struct device_reg_2xxx {
432#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 440#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
433#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080 441#define GPIO_LED_GREEN_OFF_AMBER_ON 0x0080
434#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0 442#define GPIO_LED_GREEN_ON_AMBER_ON 0x00C0
443#define GPIO_LED_ALL_OFF 0x0000
444#define GPIO_LED_RED_ON_OTHER_OFF 0x0001 /* isp2322 */
445#define GPIO_LED_RGA_ON 0x00C1 /* isp2322: red green amber */
435 446
436 union { 447 union {
437 struct { 448 struct {
@@ -2199,6 +2210,15 @@ struct isp_operations {
2199 2210
2200 void (*fw_dump) (struct scsi_qla_host *, int); 2211 void (*fw_dump) (struct scsi_qla_host *, int);
2201 void (*ascii_fw_dump) (struct scsi_qla_host *); 2212 void (*ascii_fw_dump) (struct scsi_qla_host *);
2213
2214 int (*beacon_on) (struct scsi_qla_host *);
2215 int (*beacon_off) (struct scsi_qla_host *);
2216 void (*beacon_blink) (struct scsi_qla_host *);
2217
2218 uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
2219 uint32_t, uint32_t);
2220 int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
2221 uint32_t);
2202}; 2222};
2203 2223
2204/* 2224/*
@@ -2331,6 +2351,10 @@ typedef struct scsi_qla_host {
2331 uint16_t min_external_loopid; /* First external loop Id */ 2351 uint16_t min_external_loopid; /* First external loop Id */
2332 2352
2333 uint16_t link_data_rate; /* F/W operating speed */ 2353 uint16_t link_data_rate; /* F/W operating speed */
2354#define LDR_1GB 0
2355#define LDR_2GB 1
2356#define LDR_4GB 3
2357#define LDR_UNKNOWN 0xFFFF
2334 2358
2335 uint8_t current_topology; 2359 uint8_t current_topology;
2336 uint8_t prev_topology; 2360 uint8_t prev_topology;
@@ -2486,12 +2510,26 @@ typedef struct scsi_qla_host {
2486 uint8_t *port_name; 2510 uint8_t *port_name;
2487 uint32_t isp_abort_cnt; 2511 uint32_t isp_abort_cnt;
2488 2512
2513 /* Option ROM information. */
2514 char *optrom_buffer;
2515 uint32_t optrom_size;
2516 int optrom_state;
2517#define QLA_SWAITING 0
2518#define QLA_SREADING 1
2519#define QLA_SWRITING 2
2520
2489 /* Needed for BEACON */ 2521 /* Needed for BEACON */
2490 uint16_t beacon_blink_led; 2522 uint16_t beacon_blink_led;
2491 uint16_t beacon_green_on; 2523 uint8_t beacon_color_state;
2524#define QLA_LED_GRN_ON 0x01
2525#define QLA_LED_YLW_ON 0x02
2526#define QLA_LED_ABR_ON 0x04
2527#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2528 /* ISP2322: red, green, amber. */
2492 2529
2493 uint16_t zio_mode; 2530 uint16_t zio_mode;
2494 uint16_t zio_timer; 2531 uint16_t zio_timer;
2532 struct fc_host_statistics fc_host_stat;
2495} scsi_qla_host_t; 2533} scsi_qla_host_t;
2496 2534
2497 2535
@@ -2557,7 +2595,9 @@ struct _qla2x00stats {
2557/* 2595/*
2558 * Flash support definitions 2596 * Flash support definitions
2559 */ 2597 */
2560#define FLASH_IMAGE_SIZE 131072 2598#define OPTROM_SIZE_2300 0x20000
2599#define OPTROM_SIZE_2322 0x100000
2600#define OPTROM_SIZE_24XX 0x100000
2561 2601
2562#include "qla_gbl.h" 2602#include "qla_gbl.h"
2563#include "qla_dbg.h" 2603#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 35266bd5d538..ffdc2680f049 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -75,12 +75,12 @@ extern void qla2x00_cmd_timeout(srb_t *);
75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 75extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); 76extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
77 77
78extern void qla2x00_blink_led(scsi_qla_host_t *);
79
80extern int qla2x00_down_timeout(struct semaphore *, unsigned long); 78extern int qla2x00_down_timeout(struct semaphore *, unsigned long);
81 79
82extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); 80extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
83 81
82extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
83
84/* 84/*
85 * Global Function Prototypes in qla_iocb.c source file. 85 * Global Function Prototypes in qla_iocb.c source file.
86 */ 86 */
@@ -185,6 +185,13 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
185extern int 185extern int
186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); 186qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
187 187
188extern int
189qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, link_stat_t *,
190 uint16_t *);
191
192extern int
193qla24xx_get_isp_stats(scsi_qla_host_t *, uint32_t *, uint32_t, uint16_t *);
194
188extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 195extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
189extern int qla24xx_abort_target(fc_port_t *); 196extern int qla24xx_abort_target(fc_port_t *);
190 197
@@ -228,6 +235,22 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
228extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 235extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
229 uint32_t); 236 uint32_t);
230 237
238extern int qla2x00_beacon_on(struct scsi_qla_host *);
239extern int qla2x00_beacon_off(struct scsi_qla_host *);
240extern void qla2x00_beacon_blink(struct scsi_qla_host *);
241extern int qla24xx_beacon_on(struct scsi_qla_host *);
242extern int qla24xx_beacon_off(struct scsi_qla_host *);
243extern void qla24xx_beacon_blink(struct scsi_qla_host *);
244
245extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
246 uint32_t, uint32_t);
247extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
248 uint32_t, uint32_t);
249extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
250 uint32_t, uint32_t);
251extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
252 uint32_t, uint32_t);
253
231/* 254/*
232 * Global Function Prototypes in qla_dbg.c source file. 255 * Global Function Prototypes in qla_dbg.c source file.
233 */ 256 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e67bb0997818..634ee174bff2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <scsi/scsi_transport_fc.h>
12 11
13#include "qla_devtbl.h" 12#include "qla_devtbl.h"
14 13
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7ec0b8d6f07b..6544b6d0891d 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -814,6 +814,7 @@ qla24xx_start_scsi(srb_t *sp)
814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 814 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
815 815
816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 816 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
817 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
817 818
818 /* Load SCSI command packet. */ 819 /* Load SCSI command packet. */
819 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 820 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71a46fcee8cc..42aa7a7c1a73 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -402,9 +402,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
402 break; 402 break;
403 403
404 case MBA_LOOP_UP: /* Loop Up Event */ 404 case MBA_LOOP_UP: /* Loop Up Event */
405 ha->link_data_rate = 0;
406 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 405 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
407 link_speed = link_speeds[0]; 406 link_speed = link_speeds[0];
407 ha->link_data_rate = LDR_1GB;
408 } else { 408 } else {
409 link_speed = link_speeds[LS_UNKNOWN]; 409 link_speed = link_speeds[LS_UNKNOWN];
410 if (mb[1] < 5) 410 if (mb[1] < 5)
@@ -436,7 +436,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
436 } 436 }
437 437
438 ha->flags.management_server_logged_in = 0; 438 ha->flags.management_server_logged_in = 0;
439 ha->link_data_rate = 0; 439 ha->link_data_rate = LDR_UNKNOWN;
440 if (ql2xfdmienable) 440 if (ql2xfdmienable)
441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 441 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
442 442
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3099b379de9d..363dfdd042b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -7,7 +7,6 @@
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <scsi/scsi_transport_fc.h>
11 10
12static void 11static void
13qla2x00_mbx_sem_timeout(unsigned long data) 12qla2x00_mbx_sem_timeout(unsigned long data)
@@ -1874,7 +1873,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1874 mcp->mb[3] = LSW(id_list_dma); 1873 mcp->mb[3] = LSW(id_list_dma);
1875 mcp->mb[6] = MSW(MSD(id_list_dma)); 1874 mcp->mb[6] = MSW(MSD(id_list_dma));
1876 mcp->mb[7] = LSW(MSD(id_list_dma)); 1875 mcp->mb[7] = LSW(MSD(id_list_dma));
1877 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2; 1876 mcp->mb[8] = 0;
1877 mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1878 } else { 1878 } else {
1879 mcp->mb[1] = MSW(id_list_dma); 1879 mcp->mb[1] = MSW(id_list_dma);
1880 mcp->mb[2] = LSW(id_list_dma); 1880 mcp->mb[2] = LSW(id_list_dma);
@@ -2017,8 +2017,109 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2017 2017
2018 return rval; 2018 return rval;
2019} 2019}
2020#endif
2021
2022/*
2023 * qla2x00_get_link_status
2024 *
2025 * Input:
2026 * ha = adapter block pointer.
2027 * loop_id = device loop ID.
2028 * ret_buf = pointer to link status return buffer.
2029 *
2030 * Returns:
2031 * 0 = success.
2032 * BIT_0 = mem alloc error.
2033 * BIT_1 = mailbox error.
2034 */
2035int
2036qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2037 link_stat_t *ret_buf, uint16_t *status)
2038{
2039 int rval;
2040 mbx_cmd_t mc;
2041 mbx_cmd_t *mcp = &mc;
2042 link_stat_t *stat_buf;
2043 dma_addr_t stat_buf_dma;
2044
2045 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no);)
2046
2047 stat_buf = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &stat_buf_dma);
2048 if (stat_buf == NULL) {
2049 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
2050 __func__, ha->host_no));
2051 return BIT_0;
2052 }
2053 memset(stat_buf, 0, sizeof(link_stat_t));
2054
2055 mcp->mb[0] = MBC_GET_LINK_STATUS;
2056 mcp->mb[2] = MSW(stat_buf_dma);
2057 mcp->mb[3] = LSW(stat_buf_dma);
2058 mcp->mb[6] = MSW(MSD(stat_buf_dma));
2059 mcp->mb[7] = LSW(MSD(stat_buf_dma));
2060 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2061 mcp->in_mb = MBX_0;
2062 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
2063 mcp->mb[1] = loop_id;
2064 mcp->mb[4] = 0;
2065 mcp->mb[10] = 0;
2066 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2067 mcp->in_mb |= MBX_1;
2068 } else if (HAS_EXTENDED_IDS(ha)) {
2069 mcp->mb[1] = loop_id;
2070 mcp->mb[10] = 0;
2071 mcp->out_mb |= MBX_10|MBX_1;
2072 } else {
2073 mcp->mb[1] = loop_id << 8;
2074 mcp->out_mb |= MBX_1;
2075 }
2076 mcp->tov = 30;
2077 mcp->flags = IOCTL_CMD;
2078 rval = qla2x00_mailbox_command(ha, mcp);
2079
2080 if (rval == QLA_SUCCESS) {
2081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2082 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2083 __func__, ha->host_no, mcp->mb[0]);)
2084 status[0] = mcp->mb[0];
2085 rval = BIT_1;
2086 } else {
2087 /* copy over data -- firmware data is LE. */
2088 ret_buf->link_fail_cnt =
2089 le32_to_cpu(stat_buf->link_fail_cnt);
2090 ret_buf->loss_sync_cnt =
2091 le32_to_cpu(stat_buf->loss_sync_cnt);
2092 ret_buf->loss_sig_cnt =
2093 le32_to_cpu(stat_buf->loss_sig_cnt);
2094 ret_buf->prim_seq_err_cnt =
2095 le32_to_cpu(stat_buf->prim_seq_err_cnt);
2096 ret_buf->inval_xmit_word_cnt =
2097 le32_to_cpu(stat_buf->inval_xmit_word_cnt);
2098 ret_buf->inval_crc_cnt =
2099 le32_to_cpu(stat_buf->inval_crc_cnt);
2100
2101 DEBUG11(printk("%s(%ld): stat dump: fail_cnt=%d "
2102 "loss_sync=%d loss_sig=%d seq_err=%d "
2103 "inval_xmt_word=%d inval_crc=%d.\n", __func__,
2104 ha->host_no, stat_buf->link_fail_cnt,
2105 stat_buf->loss_sync_cnt, stat_buf->loss_sig_cnt,
2106 stat_buf->prim_seq_err_cnt,
2107 stat_buf->inval_xmit_word_cnt,
2108 stat_buf->inval_crc_cnt);)
2109 }
2110 } else {
2111 /* Failed. */
2112 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2113 ha->host_no, rval);)
2114 rval = BIT_1;
2115 }
2116
2117 dma_pool_free(ha->s_dma_pool, stat_buf, stat_buf_dma);
2020 2118
2021uint8_t 2119 return rval;
2120}
2121
2122int
2022qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords, 2123qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2023 uint16_t *status) 2124 uint16_t *status)
2024{ 2125{
@@ -2080,7 +2181,6 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, uint32_t *dwbuf, uint32_t dwords,
2080 2181
2081 return rval; 2182 return rval;
2082} 2183}
2083#endif
2084 2184
2085int 2185int
2086qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2186qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5866a7c706a8..9f91f1a20542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -366,6 +366,12 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
366 goto qc_fail_command; 366 goto qc_fail_command;
367 } 367 }
368 368
369 /* Close window on fcport/rport state-transitioning. */
370 if (!*(fc_port_t **)rport->dd_data) {
371 cmd->result = DID_IMM_RETRY << 16;
372 goto qc_fail_command;
373 }
374
369 if (atomic_read(&fcport->state) != FCS_ONLINE) { 375 if (atomic_read(&fcport->state) != FCS_ONLINE) {
370 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 376 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
371 atomic_read(&ha->loop_state) == LOOP_DEAD) { 377 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -421,6 +427,12 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
421 goto qc24_fail_command; 427 goto qc24_fail_command;
422 } 428 }
423 429
430 /* Close window on fcport/rport state-transitioning. */
431 if (!*(fc_port_t **)rport->dd_data) {
432 cmd->result = DID_IMM_RETRY << 16;
433 goto qc24_fail_command;
434 }
435
424 if (atomic_read(&fcport->state) != FCS_ONLINE) { 436 if (atomic_read(&fcport->state) != FCS_ONLINE) {
425 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 437 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
426 atomic_read(&ha->loop_state) == LOOP_DEAD) { 438 atomic_read(&ha->loop_state) == LOOP_DEAD) {
@@ -513,7 +525,7 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
513 * Success (Adapter is online) : 0 525 * Success (Adapter is online) : 0
514 * Failed (Adapter is offline/disabled) : 1 526 * Failed (Adapter is offline/disabled) : 1
515 */ 527 */
516static int 528int
517qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 529qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
518{ 530{
519 int return_status; 531 int return_status;
@@ -1312,6 +1324,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1312 ha->ports = MAX_BUSES; 1324 ha->ports = MAX_BUSES;
1313 ha->init_cb_size = sizeof(init_cb_t); 1325 ha->init_cb_size = sizeof(init_cb_t);
1314 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1326 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
1327 ha->link_data_rate = LDR_UNKNOWN;
1328 ha->optrom_size = OPTROM_SIZE_2300;
1315 1329
1316 /* Assign ISP specific operations. */ 1330 /* Assign ISP specific operations. */
1317 ha->isp_ops.pci_config = qla2100_pci_config; 1331 ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1339,6 +1353,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1339 ha->isp_ops.write_nvram = qla2x00_write_nvram_data; 1353 ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
1340 ha->isp_ops.fw_dump = qla2100_fw_dump; 1354 ha->isp_ops.fw_dump = qla2100_fw_dump;
1341 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump; 1355 ha->isp_ops.ascii_fw_dump = qla2100_ascii_fw_dump;
1356 ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
1357 ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
1342 if (IS_QLA2100(ha)) { 1358 if (IS_QLA2100(ha)) {
1343 host->max_id = MAX_TARGETS_2100; 1359 host->max_id = MAX_TARGETS_2100;
1344 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1360 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1364,7 +1380,12 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1364 ha->isp_ops.intr_handler = qla2300_intr_handler; 1380 ha->isp_ops.intr_handler = qla2300_intr_handler;
1365 ha->isp_ops.fw_dump = qla2300_fw_dump; 1381 ha->isp_ops.fw_dump = qla2300_fw_dump;
1366 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump; 1382 ha->isp_ops.ascii_fw_dump = qla2300_ascii_fw_dump;
1383 ha->isp_ops.beacon_on = qla2x00_beacon_on;
1384 ha->isp_ops.beacon_off = qla2x00_beacon_off;
1385 ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
1367 ha->gid_list_info_size = 6; 1386 ha->gid_list_info_size = 6;
1387 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1388 ha->optrom_size = OPTROM_SIZE_2322;
1368 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) { 1389 } else if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
1369 host->max_id = MAX_TARGETS_2200; 1390 host->max_id = MAX_TARGETS_2200;
1370 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1391 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1400,7 +1421,13 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
1400 ha->isp_ops.write_nvram = qla24xx_write_nvram_data; 1421 ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
1401 ha->isp_ops.fw_dump = qla24xx_fw_dump; 1422 ha->isp_ops.fw_dump = qla24xx_fw_dump;
1402 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump; 1423 ha->isp_ops.ascii_fw_dump = qla24xx_ascii_fw_dump;
1424 ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
1425 ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
1426 ha->isp_ops.beacon_on = qla24xx_beacon_on;
1427 ha->isp_ops.beacon_off = qla24xx_beacon_off;
1428 ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
1403 ha->gid_list_info_size = 8; 1429 ha->gid_list_info_size = 8;
1430 ha->optrom_size = OPTROM_SIZE_24XX;
1404 } 1431 }
1405 host->can_queue = ha->request_q_length + 128; 1432 host->can_queue = ha->request_q_length + 128;
1406 1433
@@ -1657,11 +1684,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1657 spin_lock_irqsave(&fcport->rport_lock, flags); 1684 spin_lock_irqsave(&fcport->rport_lock, flags);
1658 fcport->drport = rport; 1685 fcport->drport = rport;
1659 fcport->rport = NULL; 1686 fcport->rport = NULL;
1687 *(fc_port_t **)rport->dd_data = NULL;
1660 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1688 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1661 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1689 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
1662 } else { 1690 } else {
1663 spin_lock_irqsave(&fcport->rport_lock, flags); 1691 spin_lock_irqsave(&fcport->rport_lock, flags);
1664 fcport->rport = NULL; 1692 fcport->rport = NULL;
1693 *(fc_port_t **)rport->dd_data = NULL;
1665 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1694 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1666 fc_remote_port_delete(rport); 1695 fc_remote_port_delete(rport);
1667 } 1696 }
@@ -2066,6 +2095,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2066 ha->fw_dumped = 0; 2095 ha->fw_dumped = 0;
2067 ha->fw_dump_reading = 0; 2096 ha->fw_dump_reading = 0;
2068 ha->fw_dump_buffer = NULL; 2097 ha->fw_dump_buffer = NULL;
2098
2099 vfree(ha->optrom_buffer);
2069} 2100}
2070 2101
2071/* 2102/*
@@ -2314,6 +2345,9 @@ qla2x00_do_dpc(void *data)
2314 if (!ha->interrupts_on) 2345 if (!ha->interrupts_on)
2315 ha->isp_ops.enable_intrs(ha); 2346 ha->isp_ops.enable_intrs(ha);
2316 2347
2348 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2349 ha->isp_ops.beacon_blink(ha);
2350
2317 ha->dpc_active = 0; 2351 ha->dpc_active = 0;
2318 } /* End of while(1) */ 2352 } /* End of while(1) */
2319 2353
@@ -2491,6 +2525,12 @@ qla2x00_timer(scsi_qla_host_t *ha)
2491 atomic_read(&ha->loop_down_timer))); 2525 atomic_read(&ha->loop_down_timer)));
2492 } 2526 }
2493 2527
2528 /* Check if beacon LED needs to be blinked */
2529 if (ha->beacon_blink_led == 1) {
2530 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags);
2531 start_dpc++;
2532 }
2533
2494 /* Schedule the DPC routine if needed */ 2534 /* Schedule the DPC routine if needed */
2495 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2535 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
2496 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2536 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
@@ -2499,6 +2539,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2499 start_dpc || 2539 start_dpc ||
2500 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2540 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2501 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2541 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
2542 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
2502 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2543 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
2503 ha->dpc_wait && !ha->dpc_active) { 2544 ha->dpc_wait && !ha->dpc_active) {
2504 2545
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 2c3342108dd8..b70bebe18c01 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -6,8 +6,6 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <scsi/scsi_transport_fc.h>
10
11/** 9/**
12 * IO descriptor handle definitions. 10 * IO descriptor handle definitions.
13 * 11 *
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index f4d755a643e4..3866a5760f15 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -695,3 +695,966 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
695 695
696 return ret; 696 return ret;
697} 697}
698
699
700static inline void
701qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
702{
703 if (IS_QLA2322(ha)) {
704 /* Flip all colors. */
705 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
706 /* Turn off. */
707 ha->beacon_color_state = 0;
708 *pflags = GPIO_LED_ALL_OFF;
709 } else {
710 /* Turn on. */
711 ha->beacon_color_state = QLA_LED_ALL_ON;
712 *pflags = GPIO_LED_RGA_ON;
713 }
714 } else {
715 /* Flip green led only. */
716 if (ha->beacon_color_state == QLA_LED_GRN_ON) {
717 /* Turn off. */
718 ha->beacon_color_state = 0;
719 *pflags = GPIO_LED_GREEN_OFF_AMBER_OFF;
720 } else {
721 /* Turn on. */
722 ha->beacon_color_state = QLA_LED_GRN_ON;
723 *pflags = GPIO_LED_GREEN_ON_AMBER_OFF;
724 }
725 }
726}
727
728void
729qla2x00_beacon_blink(struct scsi_qla_host *ha)
730{
731 uint16_t gpio_enable;
732 uint16_t gpio_data;
733 uint16_t led_color = 0;
734 unsigned long flags;
735 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
736
737 if (ha->pio_address)
738 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
739
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741
742 /* Save the Original GPIOE. */
743 if (ha->pio_address) {
744 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
745 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
746 } else {
747 gpio_enable = RD_REG_WORD(&reg->gpioe);
748 gpio_data = RD_REG_WORD(&reg->gpiod);
749 }
750
751 /* Set the modified gpio_enable values */
752 gpio_enable |= GPIO_LED_MASK;
753
754 if (ha->pio_address) {
755 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
756 } else {
757 WRT_REG_WORD(&reg->gpioe, gpio_enable);
758 RD_REG_WORD(&reg->gpioe);
759 }
760
761 qla2x00_flip_colors(ha, &led_color);
762
763 /* Clear out any previously set LED color. */
764 gpio_data &= ~GPIO_LED_MASK;
765
766 /* Set the new input LED color to GPIOD. */
767 gpio_data |= led_color;
768
769 /* Set the modified gpio_data values */
770 if (ha->pio_address) {
771 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
772 } else {
773 WRT_REG_WORD(&reg->gpiod, gpio_data);
774 RD_REG_WORD(&reg->gpiod);
775 }
776
777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
778}
779
780int
781qla2x00_beacon_on(struct scsi_qla_host *ha)
782{
783 uint16_t gpio_enable;
784 uint16_t gpio_data;
785 unsigned long flags;
786 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
787
788 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
789 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
790
791 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
792 qla_printk(KERN_WARNING, ha,
793 "Unable to update fw options (beacon on).\n");
794 return QLA_FUNCTION_FAILED;
795 }
796
797 if (ha->pio_address)
798 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
799
800 /* Turn off LEDs. */
801 spin_lock_irqsave(&ha->hardware_lock, flags);
802 if (ha->pio_address) {
803 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
804 gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
805 } else {
806 gpio_enable = RD_REG_WORD(&reg->gpioe);
807 gpio_data = RD_REG_WORD(&reg->gpiod);
808 }
809 gpio_enable |= GPIO_LED_MASK;
810
811 /* Set the modified gpio_enable values. */
812 if (ha->pio_address) {
813 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
814 } else {
815 WRT_REG_WORD(&reg->gpioe, gpio_enable);
816 RD_REG_WORD(&reg->gpioe);
817 }
818
819 /* Clear out previously set LED colour. */
820 gpio_data &= ~GPIO_LED_MASK;
821 if (ha->pio_address) {
822 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
823 } else {
824 WRT_REG_WORD(&reg->gpiod, gpio_data);
825 RD_REG_WORD(&reg->gpiod);
826 }
827 spin_unlock_irqrestore(&ha->hardware_lock, flags);
828
829 /*
830 * Let the per HBA timer kick off the blinking process based on
831 * the following flags. No need to do anything else now.
832 */
833 ha->beacon_blink_led = 1;
834 ha->beacon_color_state = 0;
835
836 return QLA_SUCCESS;
837}
838
839int
840qla2x00_beacon_off(struct scsi_qla_host *ha)
841{
842 int rval = QLA_SUCCESS;
843
844 ha->beacon_blink_led = 0;
845
846 /* Set the on flag so when it gets flipped it will be off. */
847 if (IS_QLA2322(ha))
848 ha->beacon_color_state = QLA_LED_ALL_ON;
849 else
850 ha->beacon_color_state = QLA_LED_GRN_ON;
851
852 ha->isp_ops.beacon_blink(ha); /* This turns green LED off */
853
854 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
855 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
856
857 rval = qla2x00_set_fw_options(ha, ha->fw_options);
858 if (rval != QLA_SUCCESS)
859 qla_printk(KERN_WARNING, ha,
860 "Unable to update fw options (beacon off).\n");
861 return rval;
862}
863
864
865static inline void
866qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
867{
868 /* Flip all colors. */
869 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
870 /* Turn off. */
871 ha->beacon_color_state = 0;
872 *pflags = 0;
873 } else {
874 /* Turn on. */
875 ha->beacon_color_state = QLA_LED_ALL_ON;
876 *pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON;
877 }
878}
879
880void
881qla24xx_beacon_blink(struct scsi_qla_host *ha)
882{
883 uint16_t led_color = 0;
884 uint32_t gpio_data;
885 unsigned long flags;
886 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
887
888 /* Save the Original GPIOD. */
889 spin_lock_irqsave(&ha->hardware_lock, flags);
890 gpio_data = RD_REG_DWORD(&reg->gpiod);
891
892 /* Enable the gpio_data reg for update. */
893 gpio_data |= GPDX_LED_UPDATE_MASK;
894
895 WRT_REG_DWORD(&reg->gpiod, gpio_data);
896 gpio_data = RD_REG_DWORD(&reg->gpiod);
897
898 /* Set the color bits. */
899 qla24xx_flip_colors(ha, &led_color);
900
901 /* Clear out any previously set LED color. */
902 gpio_data &= ~GPDX_LED_COLOR_MASK;
903
904 /* Set the new input LED color to GPIOD. */
905 gpio_data |= led_color;
906
907 /* Set the modified gpio_data values. */
908 WRT_REG_DWORD(&reg->gpiod, gpio_data);
909 gpio_data = RD_REG_DWORD(&reg->gpiod);
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911}
912
913int
914qla24xx_beacon_on(struct scsi_qla_host *ha)
915{
916 uint32_t gpio_data;
917 unsigned long flags;
918 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
919
920 if (ha->beacon_blink_led == 0) {
921 /* Enable firmware for update */
922 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
923
924 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS)
925 return QLA_FUNCTION_FAILED;
926
927 if (qla2x00_get_fw_options(ha, ha->fw_options) !=
928 QLA_SUCCESS) {
929 qla_printk(KERN_WARNING, ha,
930 "Unable to update fw options (beacon on).\n");
931 return QLA_FUNCTION_FAILED;
932 }
933
934 spin_lock_irqsave(&ha->hardware_lock, flags);
935 gpio_data = RD_REG_DWORD(&reg->gpiod);
936
937 /* Enable the gpio_data reg for update. */
938 gpio_data |= GPDX_LED_UPDATE_MASK;
939 WRT_REG_DWORD(&reg->gpiod, gpio_data);
940 RD_REG_DWORD(&reg->gpiod);
941
942 spin_unlock_irqrestore(&ha->hardware_lock, flags);
943 }
944
945 /* So all colors blink together. */
946 ha->beacon_color_state = 0;
947
948 /* Let the per HBA timer kick off the blinking process. */
949 ha->beacon_blink_led = 1;
950
951 return QLA_SUCCESS;
952}
953
954int
955qla24xx_beacon_off(struct scsi_qla_host *ha)
956{
957 uint32_t gpio_data;
958 unsigned long flags;
959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
960
961 ha->beacon_blink_led = 0;
962 ha->beacon_color_state = QLA_LED_ALL_ON;
963
964 ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */
965
966 /* Give control back to firmware. */
967 spin_lock_irqsave(&ha->hardware_lock, flags);
968 gpio_data = RD_REG_DWORD(&reg->gpiod);
969
970 /* Disable the gpio_data reg for update. */
971 gpio_data &= ~GPDX_LED_UPDATE_MASK;
972 WRT_REG_DWORD(&reg->gpiod, gpio_data);
973 RD_REG_DWORD(&reg->gpiod);
974 spin_unlock_irqrestore(&ha->hardware_lock, flags);
975
976 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
977
978 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
979 qla_printk(KERN_WARNING, ha,
980 "Unable to update fw options (beacon off).\n");
981 return QLA_FUNCTION_FAILED;
982 }
983
984 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
985 qla_printk(KERN_WARNING, ha,
986 "Unable to get fw options (beacon off).\n");
987 return QLA_FUNCTION_FAILED;
988 }
989
990 return QLA_SUCCESS;
991}
992
993
994/*
995 * Flash support routines
996 */
997
998/**
999 * qla2x00_flash_enable() - Setup flash for reading and writing.
1000 * @ha: HA context
1001 */
1002static void
1003qla2x00_flash_enable(scsi_qla_host_t *ha)
1004{
1005 uint16_t data;
1006 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1007
1008 data = RD_REG_WORD(&reg->ctrl_status);
1009 data |= CSR_FLASH_ENABLE;
1010 WRT_REG_WORD(&reg->ctrl_status, data);
1011 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1012}
1013
1014/**
1015 * qla2x00_flash_disable() - Disable flash and allow RISC to run.
1016 * @ha: HA context
1017 */
1018static void
1019qla2x00_flash_disable(scsi_qla_host_t *ha)
1020{
1021 uint16_t data;
1022 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1023
1024 data = RD_REG_WORD(&reg->ctrl_status);
1025 data &= ~(CSR_FLASH_ENABLE);
1026 WRT_REG_WORD(&reg->ctrl_status, data);
1027 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1028}
1029
1030/**
1031 * qla2x00_read_flash_byte() - Reads a byte from flash
1032 * @ha: HA context
1033 * @addr: Address in flash to read
1034 *
1035 * A word is read from the chip, but, only the lower byte is valid.
1036 *
1037 * Returns the byte read from flash @addr.
1038 */
1039static uint8_t
1040qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1041{
1042 uint16_t data;
1043 uint16_t bank_select;
1044 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1045
1046 bank_select = RD_REG_WORD(&reg->ctrl_status);
1047
1048 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1049 /* Specify 64K address range: */
1050 /* clear out Module Select and Flash Address bits [19:16]. */
1051 bank_select &= ~0xf8;
1052 bank_select |= addr >> 12 & 0xf0;
1053 bank_select |= CSR_FLASH_64K_BANK;
1054 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1055 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1056
1057 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1058 data = RD_REG_WORD(&reg->flash_data);
1059
1060 return (uint8_t)data;
1061 }
1062
1063 /* Setup bit 16 of flash address. */
1064 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1065 bank_select |= CSR_FLASH_64K_BANK;
1066 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1067 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1068 } else if (((addr & BIT_16) == 0) &&
1069 (bank_select & CSR_FLASH_64K_BANK)) {
1070 bank_select &= ~(CSR_FLASH_64K_BANK);
1071 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1072 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1073 }
1074
1075 /* Always perform IO mapped accesses to the FLASH registers. */
1076 if (ha->pio_address) {
1077 uint16_t data2;
1078
1079 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1080 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1081 do {
1082 data = RD_REG_WORD_PIO(&reg->flash_data);
1083 barrier();
1084 cpu_relax();
1085 data2 = RD_REG_WORD_PIO(&reg->flash_data);
1086 } while (data != data2);
1087 } else {
1088 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1089 data = qla2x00_debounce_register(&reg->flash_data);
1090 }
1091
1092 return (uint8_t)data;
1093}
1094
1095/**
1096 * qla2x00_write_flash_byte() - Write a byte to flash
1097 * @ha: HA context
1098 * @addr: Address in flash to write
1099 * @data: Data to write
1100 */
1101static void
1102qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1103{
1104 uint16_t bank_select;
1105 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1106
1107 bank_select = RD_REG_WORD(&reg->ctrl_status);
1108 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1109 /* Specify 64K address range: */
1110 /* clear out Module Select and Flash Address bits [19:16]. */
1111 bank_select &= ~0xf8;
1112 bank_select |= addr >> 12 & 0xf0;
1113 bank_select |= CSR_FLASH_64K_BANK;
1114 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1115 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1116
1117 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1118 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1119 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1120 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1121
1122 return;
1123 }
1124
1125 /* Setup bit 16 of flash address. */
1126 if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
1127 bank_select |= CSR_FLASH_64K_BANK;
1128 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1129 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1130 } else if (((addr & BIT_16) == 0) &&
1131 (bank_select & CSR_FLASH_64K_BANK)) {
1132 bank_select &= ~(CSR_FLASH_64K_BANK);
1133 WRT_REG_WORD(&reg->ctrl_status, bank_select);
1134 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1135 }
1136
1137 /* Always perform IO mapped accesses to the FLASH registers. */
1138 if (ha->pio_address) {
1139 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
1140 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1141 WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
1142 } else {
1143 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1144 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1145 WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
1146 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1147 }
1148}
1149
1150/**
1151 * qla2x00_poll_flash() - Polls flash for completion.
1152 * @ha: HA context
1153 * @addr: Address in flash to poll
1154 * @poll_data: Data to be polled
1155 * @man_id: Flash manufacturer ID
1156 * @flash_id: Flash ID
1157 *
1158 * This function polls the device until bit 7 of what is read matches data
1159 * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
1160 * out (a fatal error). The flash book recommeds reading bit 7 again after
1161 * reading bit 5 as a 1.
1162 *
1163 * Returns 0 on success, else non-zero.
1164 */
1165static int
1166qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1167 uint8_t man_id, uint8_t flash_id)
1168{
1169 int status;
1170 uint8_t flash_data;
1171 uint32_t cnt;
1172
1173 status = 1;
1174
1175 /* Wait for 30 seconds for command to finish. */
1176 poll_data &= BIT_7;
1177 for (cnt = 3000000; cnt; cnt--) {
1178 flash_data = qla2x00_read_flash_byte(ha, addr);
1179 if ((flash_data & BIT_7) == poll_data) {
1180 status = 0;
1181 break;
1182 }
1183
1184 if (man_id != 0x40 && man_id != 0xda) {
1185 if ((flash_data & BIT_5) && cnt > 2)
1186 cnt = 2;
1187 }
1188 udelay(10);
1189 barrier();
1190 }
1191 return status;
1192}
1193
1194#define IS_OEM_001(ha) \
1195 ((ha)->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2322 && \
1196 (ha)->pdev->subsystem_vendor == 0x1028 && \
1197 (ha)->pdev->subsystem_device == 0x0170)
1198
1199/**
1200 * qla2x00_program_flash_address() - Programs a flash address
1201 * @ha: HA context
1202 * @addr: Address in flash to program
1203 * @data: Data to be written in flash
1204 * @man_id: Flash manufacturer ID
1205 * @flash_id: Flash ID
1206 *
1207 * Returns 0 on success, else non-zero.
1208 */
1209static int
1210qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1211 uint8_t man_id, uint8_t flash_id)
1212{
1213 /* Write Program Command Sequence. */
1214 if (IS_OEM_001(ha)) {
1215 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1216 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1217 qla2x00_write_flash_byte(ha, 0xaaa, 0xa0);
1218 qla2x00_write_flash_byte(ha, addr, data);
1219 } else {
1220 if (man_id == 0xda && flash_id == 0xc1) {
1221 qla2x00_write_flash_byte(ha, addr, data);
1222 if (addr & 0x7e)
1223 return 0;
1224 } else {
1225 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1226 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1227 qla2x00_write_flash_byte(ha, 0x5555, 0xa0);
1228 qla2x00_write_flash_byte(ha, addr, data);
1229 }
1230 }
1231
1232 udelay(150);
1233
1234 /* Wait for write to complete. */
1235 return qla2x00_poll_flash(ha, addr, data, man_id, flash_id);
1236}
1237
1238/**
1239 * qla2x00_erase_flash() - Erase the flash.
1240 * @ha: HA context
1241 * @man_id: Flash manufacturer ID
1242 * @flash_id: Flash ID
1243 *
1244 * Returns 0 on success, else non-zero.
1245 */
1246static int
1247qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1248{
1249 /* Individual Sector Erase Command Sequence */
1250 if (IS_OEM_001(ha)) {
1251 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1252 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1253 qla2x00_write_flash_byte(ha, 0xaaa, 0x80);
1254 qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
1255 qla2x00_write_flash_byte(ha, 0x555, 0x55);
1256 qla2x00_write_flash_byte(ha, 0xaaa, 0x10);
1257 } else {
1258 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1259 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1260 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1261 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1262 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1263 qla2x00_write_flash_byte(ha, 0x5555, 0x10);
1264 }
1265
1266 udelay(150);
1267
1268 /* Wait for erase to complete. */
1269 return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id);
1270}
1271
1272/**
1273 * qla2x00_erase_flash_sector() - Erase a flash sector.
1274 * @ha: HA context
1275 * @addr: Flash sector to erase
1276 * @sec_mask: Sector address mask
1277 * @man_id: Flash manufacturer ID
1278 * @flash_id: Flash ID
1279 *
1280 * Returns 0 on success, else non-zero.
1281 */
1282static int
1283qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1284 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1285{
1286 /* Individual Sector Erase Command Sequence */
1287 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1288 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1289 qla2x00_write_flash_byte(ha, 0x5555, 0x80);
1290 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1291 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1292 if (man_id == 0x1f && flash_id == 0x13)
1293 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10);
1294 else
1295 qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30);
1296
1297 udelay(150);
1298
1299 /* Wait for erase to complete. */
1300 return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id);
1301}
1302
1303/**
1304 * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
1305 * @man_id: Flash manufacturer ID
1306 * @flash_id: Flash ID
1307 */
1308static void
1309qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1310 uint8_t *flash_id)
1311{
1312 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1313 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1314 qla2x00_write_flash_byte(ha, 0x5555, 0x90);
1315 *man_id = qla2x00_read_flash_byte(ha, 0x0000);
1316 *flash_id = qla2x00_read_flash_byte(ha, 0x0001);
1317 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
1318 qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
1319 qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
1320}
1321
1322
1323static inline void
1324qla2x00_suspend_hba(struct scsi_qla_host *ha)
1325{
1326 int cnt;
1327 unsigned long flags;
1328 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1329
1330 /* Suspend HBA. */
1331 scsi_block_requests(ha->host);
1332 ha->isp_ops.disable_intrs(ha);
1333 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1334
1335 /* Pause RISC. */
1336 spin_lock_irqsave(&ha->hardware_lock, flags);
1337 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1338 RD_REG_WORD(&reg->hccr);
1339 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1340 for (cnt = 0; cnt < 30000; cnt++) {
1341 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
1342 break;
1343 udelay(100);
1344 }
1345 } else {
1346 udelay(10);
1347 }
1348 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1349}
1350
1351static inline void
1352qla2x00_resume_hba(struct scsi_qla_host *ha)
1353{
1354 /* Resume HBA. */
1355 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1356 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1357 up(ha->dpc_wait);
1358 qla2x00_wait_for_hba_online(ha);
1359 scsi_unblock_requests(ha->host);
1360}
1361
1362uint8_t *
1363qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1364 uint32_t offset, uint32_t length)
1365{
1366 unsigned long flags;
1367 uint32_t addr, midpoint;
1368 uint8_t *data;
1369 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1370
1371 /* Suspend HBA. */
1372 qla2x00_suspend_hba(ha);
1373
1374 /* Go with read. */
1375 spin_lock_irqsave(&ha->hardware_lock, flags);
1376 midpoint = ha->optrom_size / 2;
1377
1378 qla2x00_flash_enable(ha);
1379 WRT_REG_WORD(&reg->nvram, 0);
1380 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1381 for (addr = offset, data = buf; addr < length; addr++, data++) {
1382 if (addr == midpoint) {
1383 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1384 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
1385 }
1386
1387 *data = qla2x00_read_flash_byte(ha, addr);
1388 }
1389 qla2x00_flash_disable(ha);
1390 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1391
1392 /* Resume HBA. */
1393 qla2x00_resume_hba(ha);
1394
1395 return buf;
1396}
1397
1398int
1399qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1400 uint32_t offset, uint32_t length)
1401{
1402
1403 int rval;
1404 unsigned long flags;
1405 uint8_t man_id, flash_id, sec_number, data;
1406 uint16_t wd;
1407 uint32_t addr, liter, sec_mask, rest_addr;
1408 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1409
1410 /* Suspend HBA. */
1411 qla2x00_suspend_hba(ha);
1412
1413 rval = QLA_SUCCESS;
1414 sec_number = 0;
1415
1416 /* Reset ISP chip. */
1417 spin_lock_irqsave(&ha->hardware_lock, flags);
1418 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1419 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
1420
1421 /* Go with write. */
1422 qla2x00_flash_enable(ha);
1423 do { /* Loop once to provide quick error exit */
1424 /* Structure of flash memory based on manufacturer */
1425 if (IS_OEM_001(ha)) {
1426 /* OEM variant with special flash part. */
1427 man_id = flash_id = 0;
1428 rest_addr = 0xffff;
1429 sec_mask = 0x10000;
1430 goto update_flash;
1431 }
1432 qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id);
1433 switch (man_id) {
1434 case 0x20: /* ST flash. */
1435 if (flash_id == 0xd2 || flash_id == 0xe3) {
1436 /*
1437 * ST m29w008at part - 64kb sector size with
1438 * 32kb,8kb,8kb,16kb sectors at memory address
1439 * 0xf0000.
1440 */
1441 rest_addr = 0xffff;
1442 sec_mask = 0x10000;
1443 break;
1444 }
1445 /*
1446 * ST m29w010b part - 16kb sector size
1447 * Default to 16kb sectors
1448 */
1449 rest_addr = 0x3fff;
1450 sec_mask = 0x1c000;
1451 break;
1452 case 0x40: /* Mostel flash. */
1453 /* Mostel v29c51001 part - 512 byte sector size. */
1454 rest_addr = 0x1ff;
1455 sec_mask = 0x1fe00;
1456 break;
1457 case 0xbf: /* SST flash. */
1458 /* SST39sf10 part - 4kb sector size. */
1459 rest_addr = 0xfff;
1460 sec_mask = 0x1f000;
1461 break;
1462 case 0xda: /* Winbond flash. */
1463 /* Winbond W29EE011 part - 256 byte sector size. */
1464 rest_addr = 0x7f;
1465 sec_mask = 0x1ff80;
1466 break;
1467 case 0xc2: /* Macronix flash. */
1468 /* 64k sector size. */
1469 if (flash_id == 0x38 || flash_id == 0x4f) {
1470 rest_addr = 0xffff;
1471 sec_mask = 0x10000;
1472 break;
1473 }
1474 /* Fall through... */
1475
1476 case 0x1f: /* Atmel flash. */
1477 /* 512k sector size. */
1478 if (flash_id == 0x13) {
1479 rest_addr = 0x7fffffff;
1480 sec_mask = 0x80000000;
1481 break;
1482 }
1483 /* Fall through... */
1484
1485 case 0x01: /* AMD flash. */
1486 if (flash_id == 0x38 || flash_id == 0x40 ||
1487 flash_id == 0x4f) {
1488 /* Am29LV081 part - 64kb sector size. */
1489 /* Am29LV002BT part - 64kb sector size. */
1490 rest_addr = 0xffff;
1491 sec_mask = 0x10000;
1492 break;
1493 } else if (flash_id == 0x3e) {
1494 /*
1495 * Am29LV008b part - 64kb sector size with
1496 * 32kb,8kb,8kb,16kb sector at memory address
1497 * h0xf0000.
1498 */
1499 rest_addr = 0xffff;
1500 sec_mask = 0x10000;
1501 break;
1502 } else if (flash_id == 0x20 || flash_id == 0x6e) {
1503 /*
1504 * Am29LV010 part or AM29f010 - 16kb sector
1505 * size.
1506 */
1507 rest_addr = 0x3fff;
1508 sec_mask = 0x1c000;
1509 break;
1510 } else if (flash_id == 0x6d) {
1511 /* Am29LV001 part - 8kb sector size. */
1512 rest_addr = 0x1fff;
1513 sec_mask = 0x1e000;
1514 break;
1515 }
1516 default:
1517 /* Default to 16 kb sector size. */
1518 rest_addr = 0x3fff;
1519 sec_mask = 0x1c000;
1520 break;
1521 }
1522
1523update_flash:
1524 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1525 if (qla2x00_erase_flash(ha, man_id, flash_id)) {
1526 rval = QLA_FUNCTION_FAILED;
1527 break;
1528 }
1529 }
1530
1531 for (addr = offset, liter = 0; liter < length; liter++,
1532 addr++) {
1533 data = buf[liter];
1534 /* Are we at the beginning of a sector? */
1535 if ((addr & rest_addr) == 0) {
1536 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
1537 if (addr >= 0x10000UL) {
1538 if (((addr >> 12) & 0xf0) &&
1539 ((man_id == 0x01 &&
1540 flash_id == 0x3e) ||
1541 (man_id == 0x20 &&
1542 flash_id == 0xd2))) {
1543 sec_number++;
1544 if (sec_number == 1) {
1545 rest_addr =
1546 0x7fff;
1547 sec_mask =
1548 0x18000;
1549 } else if (
1550 sec_number == 2 ||
1551 sec_number == 3) {
1552 rest_addr =
1553 0x1fff;
1554 sec_mask =
1555 0x1e000;
1556 } else if (
1557 sec_number == 4) {
1558 rest_addr =
1559 0x3fff;
1560 sec_mask =
1561 0x1c000;
1562 }
1563 }
1564 }
1565 } else if (addr == ha->optrom_size / 2) {
1566 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
1567 RD_REG_WORD(&reg->nvram);
1568 }
1569
1570 if (flash_id == 0xda && man_id == 0xc1) {
1571 qla2x00_write_flash_byte(ha, 0x5555,
1572 0xaa);
1573 qla2x00_write_flash_byte(ha, 0x2aaa,
1574 0x55);
1575 qla2x00_write_flash_byte(ha, 0x5555,
1576 0xa0);
1577 } else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) {
1578 /* Then erase it */
1579 if (qla2x00_erase_flash_sector(ha,
1580 addr, sec_mask, man_id,
1581 flash_id)) {
1582 rval = QLA_FUNCTION_FAILED;
1583 break;
1584 }
1585 if (man_id == 0x01 && flash_id == 0x6d)
1586 sec_number++;
1587 }
1588 }
1589
1590 if (man_id == 0x01 && flash_id == 0x6d) {
1591 if (sec_number == 1 &&
1592 addr == (rest_addr - 1)) {
1593 rest_addr = 0x0fff;
1594 sec_mask = 0x1f000;
1595 } else if (sec_number == 3 && (addr & 0x7ffe)) {
1596 rest_addr = 0x3fff;
1597 sec_mask = 0x1c000;
1598 }
1599 }
1600
1601 if (qla2x00_program_flash_address(ha, addr, data,
1602 man_id, flash_id)) {
1603 rval = QLA_FUNCTION_FAILED;
1604 break;
1605 }
1606 }
1607 } while (0);
1608 qla2x00_flash_disable(ha);
1609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1610
1611 /* Resume HBA. */
1612 qla2x00_resume_hba(ha);
1613
1614 return rval;
1615}
1616
1617uint8_t *
1618qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1619 uint32_t offset, uint32_t length)
1620{
1621 /* Suspend HBA. */
1622 scsi_block_requests(ha->host);
1623 ha->isp_ops.disable_intrs(ha);
1624 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1625
1626 /* Go with read. */
1627 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2);
1628
1629 /* Resume HBA. */
1630 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1631 ha->isp_ops.enable_intrs(ha);
1632 scsi_unblock_requests(ha->host);
1633
1634 return buf;
1635}
1636
1637int
1638qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1639 uint32_t offset, uint32_t length)
1640{
1641 int rval;
1642
1643 /* Suspend HBA. */
1644 scsi_block_requests(ha->host);
1645 ha->isp_ops.disable_intrs(ha);
1646 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1647
1648 /* Go with write. */
1649 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2,
1650 length >> 2);
1651
1652 /* Resume HBA -- RISC reset needed. */
1653 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1654 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1655 up(ha->dpc_wait);
1656 qla2x00_wait_for_hba_online(ha);
1657 scsi_unblock_requests(ha->host);
1658
1659 return rval;
1660}
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 6fddf17a3b70..2770005324b4 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -997,6 +997,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
997 case ATA_CMD_READ_EXT: 997 case ATA_CMD_READ_EXT:
998 case ATA_CMD_WRITE: 998 case ATA_CMD_WRITE:
999 case ATA_CMD_WRITE_EXT: 999 case ATA_CMD_WRITE_EXT:
1000 case ATA_CMD_WRITE_FUA_EXT:
1000 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1001 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1001 break; 1002 break;
1002#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 1003#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 2e2c3b7acb0c..e484e8db6810 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -81,6 +81,19 @@
81/* Port stride */ 81/* Port stride */
82#define VSC_SATA_PORT_OFFSET 0x200 82#define VSC_SATA_PORT_OFFSET 0x200
83 83
84/* Error interrupt status bit offsets */
85#define VSC_SATA_INT_ERROR_E_OFFSET 2
86#define VSC_SATA_INT_ERROR_P_OFFSET 4
87#define VSC_SATA_INT_ERROR_T_OFFSET 5
88#define VSC_SATA_INT_ERROR_M_OFFSET 1
89#define is_vsc_sata_int_err(port_idx, int_status) \
90 (int_status & ((1 << (VSC_SATA_INT_ERROR_E_OFFSET + (8 * port_idx))) | \
91 (1 << (VSC_SATA_INT_ERROR_P_OFFSET + (8 * port_idx))) | \
92 (1 << (VSC_SATA_INT_ERROR_T_OFFSET + (8 * port_idx))) | \
93 (1 << (VSC_SATA_INT_ERROR_M_OFFSET + (8 * port_idx))) \
94 )\
95 )
96
84 97
85static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 98static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
86{ 99{
@@ -201,13 +214,28 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
201 struct ata_port *ap; 214 struct ata_port *ap;
202 215
203 ap = host_set->ports[i]; 216 ap = host_set->ports[i];
217
218 if (is_vsc_sata_int_err(i, int_status)) {
219 u32 err_status;
220 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
221 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
222 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
223 handled++;
224 }
225
204 if (ap && !(ap->flags & 226 if (ap && !(ap->flags &
205 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 227 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
206 struct ata_queued_cmd *qc; 228 struct ata_queued_cmd *qc;
207 229
208 qc = ata_qc_from_tag(ap, ap->active_tag); 230 qc = ata_qc_from_tag(ap, ap->active_tag);
209 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 231 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
210 handled += ata_host_intr(ap, qc); 232 handled += ata_host_intr(ap, qc);
233 } else {
234 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
235 ata_chk_status(ap);
236 handled++;
237 }
238
211 } 239 }
212 } 240 }
213 } 241 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4a602853a98e..4362dcde74af 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/hardirq.h>
19 20
20#include <scsi/scsi.h> 21#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h> 22#include <scsi/scsi_dbg.h>
@@ -2248,3 +2249,61 @@ scsi_target_unblock(struct device *dev)
2248 device_for_each_child(dev, NULL, target_unblock); 2249 device_for_each_child(dev, NULL, target_unblock);
2249} 2250}
2250EXPORT_SYMBOL_GPL(scsi_target_unblock); 2251EXPORT_SYMBOL_GPL(scsi_target_unblock);
2252
2253
2254struct work_queue_work {
2255 struct work_struct work;
2256 void (*fn)(void *);
2257 void *data;
2258};
2259
2260static void execute_in_process_context_work(void *data)
2261{
2262 void (*fn)(void *data);
2263 struct work_queue_work *wqw = data;
2264
2265 fn = wqw->fn;
2266 data = wqw->data;
2267
2268 kfree(wqw);
2269
2270 fn(data);
2271}
2272
2273/**
2274 * scsi_execute_in_process_context - reliably execute the routine with user context
2275 * @fn: the function to execute
2276 * @data: data to pass to the function
2277 *
2278 * Executes the function immediately if process context is available,
2279 * otherwise schedules the function for delayed execution.
2280 *
2281 * Returns: 0 - function was executed
2282 * 1 - function was scheduled for execution
2283 * <0 - error
2284 */
2285int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
2286{
2287 struct work_queue_work *wqw;
2288
2289 if (!in_interrupt()) {
2290 fn(data);
2291 return 0;
2292 }
2293
2294 wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
2295
2296 if (unlikely(!wqw)) {
2297 printk(KERN_ERR "Failed to allocate memory\n");
2298 WARN_ON(1);
2299 return -ENOMEM;
2300 }
2301
2302 INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
2303 wqw->fn = fn;
2304 wqw->data = data;
2305 schedule_work(&wqw->work);
2306
2307 return 1;
2308}
2309EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 752fb5da3de4..5acb83ca5ae5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -387,19 +387,12 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
387 return found_target; 387 return found_target;
388} 388}
389 389
390struct work_queue_wrapper { 390static void scsi_target_reap_usercontext(void *data)
391 struct work_struct work; 391{
392 struct scsi_target *starget; 392 struct scsi_target *starget = data;
393};
394
395static void scsi_target_reap_work(void *data) {
396 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
397 struct scsi_target *starget = wqw->starget;
398 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 393 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
399 unsigned long flags; 394 unsigned long flags;
400 395
401 kfree(wqw);
402
403 spin_lock_irqsave(shost->host_lock, flags); 396 spin_lock_irqsave(shost->host_lock, flags);
404 397
405 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { 398 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
@@ -428,18 +421,7 @@ static void scsi_target_reap_work(void *data) {
428 */ 421 */
429void scsi_target_reap(struct scsi_target *starget) 422void scsi_target_reap(struct scsi_target *starget)
430{ 423{
431 struct work_queue_wrapper *wqw = 424 scsi_execute_in_process_context(scsi_target_reap_usercontext, starget);
432 kzalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
433
434 if (!wqw) {
435 starget_printk(KERN_ERR, starget,
436 "Failed to allocate memory in scsi_reap_target()\n");
437 return;
438 }
439
440 INIT_WORK(&wqw->work, scsi_target_reap_work, wqw);
441 wqw->starget = starget;
442 schedule_work(&wqw->work);
443} 425}
444 426
445/** 427/**
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index a77b32deaf8f..902a5def8e62 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -217,8 +217,9 @@ static void scsi_device_cls_release(struct class_device *class_dev)
217 put_device(&sdev->sdev_gendev); 217 put_device(&sdev->sdev_gendev);
218} 218}
219 219
220static void scsi_device_dev_release(struct device *dev) 220static void scsi_device_dev_release_usercontext(void *data)
221{ 221{
222 struct device *dev = data;
222 struct scsi_device *sdev; 223 struct scsi_device *sdev;
223 struct device *parent; 224 struct device *parent;
224 struct scsi_target *starget; 225 struct scsi_target *starget;
@@ -237,6 +238,7 @@ static void scsi_device_dev_release(struct device *dev)
237 238
238 if (sdev->request_queue) { 239 if (sdev->request_queue) {
239 sdev->request_queue->queuedata = NULL; 240 sdev->request_queue->queuedata = NULL;
241 /* user context needed to free queue */
240 scsi_free_queue(sdev->request_queue); 242 scsi_free_queue(sdev->request_queue);
241 /* temporary expedient, try to catch use of queue lock 243 /* temporary expedient, try to catch use of queue lock
242 * after free of sdev */ 244 * after free of sdev */
@@ -252,6 +254,11 @@ static void scsi_device_dev_release(struct device *dev)
252 put_device(parent); 254 put_device(parent);
253} 255}
254 256
257static void scsi_device_dev_release(struct device *dev)
258{
259 scsi_execute_in_process_context(scsi_device_dev_release_usercontext, dev);
260}
261
255static struct class sdev_class = { 262static struct class sdev_class = {
256 .name = "scsi_device", 263 .name = "scsi_device",
257 .release = scsi_device_cls_release, 264 .release = scsi_device_cls_release,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 723f7acbeb12..71e54a64adca 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -39,10 +39,6 @@ struct iscsi_internal {
39 struct iscsi_transport *iscsi_transport; 39 struct iscsi_transport *iscsi_transport;
40 struct list_head list; 40 struct list_head list;
41 /* 41 /*
42 * List of sessions for this transport
43 */
44 struct list_head sessions;
45 /*
46 * based on transport capabilities, at register time we set these 42 * based on transport capabilities, at register time we set these
47 * bits to tell the transport class it wants attributes displayed 43 * bits to tell the transport class it wants attributes displayed
48 * in sysfs or that it can support different iSCSI Data-Path 44 * in sysfs or that it can support different iSCSI Data-Path
@@ -164,9 +160,43 @@ static struct mempool_zone *z_reply;
164#define Z_MAX_ERROR 16 160#define Z_MAX_ERROR 16
165#define Z_HIWAT_ERROR 12 161#define Z_HIWAT_ERROR 12
166 162
163static LIST_HEAD(sesslist);
164static DEFINE_SPINLOCK(sesslock);
167static LIST_HEAD(connlist); 165static LIST_HEAD(connlist);
168static DEFINE_SPINLOCK(connlock); 166static DEFINE_SPINLOCK(connlock);
169 167
168static struct iscsi_cls_session *iscsi_session_lookup(uint64_t handle)
169{
170 unsigned long flags;
171 struct iscsi_cls_session *sess;
172
173 spin_lock_irqsave(&sesslock, flags);
174 list_for_each_entry(sess, &sesslist, sess_list) {
175 if (sess == iscsi_ptr(handle)) {
176 spin_unlock_irqrestore(&sesslock, flags);
177 return sess;
178 }
179 }
180 spin_unlock_irqrestore(&sesslock, flags);
181 return NULL;
182}
183
184static struct iscsi_cls_conn *iscsi_conn_lookup(uint64_t handle)
185{
186 unsigned long flags;
187 struct iscsi_cls_conn *conn;
188
189 spin_lock_irqsave(&connlock, flags);
190 list_for_each_entry(conn, &connlist, conn_list) {
191 if (conn == iscsi_ptr(handle)) {
192 spin_unlock_irqrestore(&connlock, flags);
193 return conn;
194 }
195 }
196 spin_unlock_irqrestore(&connlock, flags);
197 return NULL;
198}
199
170/* 200/*
171 * The following functions can be used by LLDs that allocate 201 * The following functions can be used by LLDs that allocate
172 * their own scsi_hosts or by software iscsi LLDs 202 * their own scsi_hosts or by software iscsi LLDs
@@ -365,6 +395,7 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
365{ 395{
366 struct iscsi_cls_session *session; 396 struct iscsi_cls_session *session;
367 struct Scsi_Host *shost; 397 struct Scsi_Host *shost;
398 unsigned long flags;
368 399
369 shost = scsi_host_alloc(transport->host_template, 400 shost = scsi_host_alloc(transport->host_template,
370 hostdata_privsize(transport)); 401 hostdata_privsize(transport));
@@ -389,6 +420,9 @@ iscsi_transport_create_session(struct scsi_transport_template *scsit,
389 goto remove_host; 420 goto remove_host;
390 421
391 *(unsigned long*)shost->hostdata = (unsigned long)session; 422 *(unsigned long*)shost->hostdata = (unsigned long)session;
423 spin_lock_irqsave(&sesslock, flags);
424 list_add(&session->sess_list, &sesslist);
425 spin_unlock_irqrestore(&sesslock, flags);
392 return shost; 426 return shost;
393 427
394remove_host: 428remove_host:
@@ -410,9 +444,13 @@ EXPORT_SYMBOL_GPL(iscsi_transport_create_session);
410int iscsi_transport_destroy_session(struct Scsi_Host *shost) 444int iscsi_transport_destroy_session(struct Scsi_Host *shost)
411{ 445{
412 struct iscsi_cls_session *session; 446 struct iscsi_cls_session *session;
447 unsigned long flags;
413 448
414 scsi_remove_host(shost); 449 scsi_remove_host(shost);
415 session = hostdata_session(shost->hostdata); 450 session = hostdata_session(shost->hostdata);
451 spin_lock_irqsave(&sesslock, flags);
452 list_del(&session->sess_list);
453 spin_unlock_irqrestore(&sesslock, flags);
416 iscsi_destroy_session(session); 454 iscsi_destroy_session(session);
417 /* ref from host alloc */ 455 /* ref from host alloc */
418 scsi_host_put(shost); 456 scsi_host_put(shost);
@@ -424,22 +462,6 @@ EXPORT_SYMBOL_GPL(iscsi_transport_destroy_session);
424/* 462/*
425 * iscsi interface functions 463 * iscsi interface functions
426 */ 464 */
427static struct iscsi_cls_conn*
428iscsi_if_find_conn(uint64_t key)
429{
430 unsigned long flags;
431 struct iscsi_cls_conn *conn;
432
433 spin_lock_irqsave(&connlock, flags);
434 list_for_each_entry(conn, &connlist, conn_list)
435 if (conn->connh == key) {
436 spin_unlock_irqrestore(&connlock, flags);
437 return conn;
438 }
439 spin_unlock_irqrestore(&connlock, flags);
440 return NULL;
441}
442
443static struct iscsi_internal * 465static struct iscsi_internal *
444iscsi_if_transport_lookup(struct iscsi_transport *tt) 466iscsi_if_transport_lookup(struct iscsi_transport *tt)
445{ 467{
@@ -504,6 +526,12 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
504 if (!zp) 526 if (!zp)
505 return NULL; 527 return NULL;
506 528
529 zp->size = size;
530 zp->hiwat = hiwat;
531 INIT_LIST_HEAD(&zp->freequeue);
532 spin_lock_init(&zp->freelock);
533 atomic_set(&zp->allocated, 0);
534
507 zp->pool = mempool_create(max, mempool_zone_alloc_skb, 535 zp->pool = mempool_create(max, mempool_zone_alloc_skb,
508 mempool_zone_free_skb, zp); 536 mempool_zone_free_skb, zp);
509 if (!zp->pool) { 537 if (!zp->pool) {
@@ -511,13 +539,6 @@ mempool_zone_init(unsigned max, unsigned size, unsigned hiwat)
511 return NULL; 539 return NULL;
512 } 540 }
513 541
514 zp->size = size;
515 zp->hiwat = hiwat;
516
517 INIT_LIST_HEAD(&zp->freequeue);
518 spin_lock_init(&zp->freelock);
519 atomic_set(&zp->allocated, 0);
520
521 return zp; 542 return zp;
522} 543}
523 544
@@ -559,25 +580,21 @@ iscsi_unicast_skb(struct mempool_zone *zone, struct sk_buff *skb)
559 return 0; 580 return 0;
560} 581}
561 582
562int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, 583int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
563 char *data, uint32_t data_size) 584 char *data, uint32_t data_size)
564{ 585{
565 struct nlmsghdr *nlh; 586 struct nlmsghdr *nlh;
566 struct sk_buff *skb; 587 struct sk_buff *skb;
567 struct iscsi_uevent *ev; 588 struct iscsi_uevent *ev;
568 struct iscsi_cls_conn *conn;
569 char *pdu; 589 char *pdu;
570 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 590 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) +
571 data_size); 591 data_size);
572 592
573 conn = iscsi_if_find_conn(connh);
574 BUG_ON(!conn);
575
576 mempool_zone_complete(conn->z_pdu); 593 mempool_zone_complete(conn->z_pdu);
577 594
578 skb = mempool_zone_get_skb(conn->z_pdu); 595 skb = mempool_zone_get_skb(conn->z_pdu);
579 if (!skb) { 596 if (!skb) {
580 iscsi_conn_error(connh, ISCSI_ERR_CONN_FAILED); 597 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
581 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 598 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
582 "control PDU: OOM\n"); 599 "control PDU: OOM\n");
583 return -ENOMEM; 600 return -ENOMEM;
@@ -590,7 +607,7 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
590 ev->type = ISCSI_KEVENT_RECV_PDU; 607 ev->type = ISCSI_KEVENT_RECV_PDU;
591 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat) 608 if (atomic_read(&conn->z_pdu->allocated) >= conn->z_pdu->hiwat)
592 ev->iferror = -ENOMEM; 609 ev->iferror = -ENOMEM;
593 ev->r.recv_req.conn_handle = connh; 610 ev->r.recv_req.conn_handle = iscsi_handle(conn);
594 pdu = (char*)ev + sizeof(*ev); 611 pdu = (char*)ev + sizeof(*ev);
595 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 612 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
596 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 613 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
@@ -599,17 +616,13 @@ int iscsi_recv_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr,
599} 616}
600EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 617EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
601 618
602void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error) 619void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
603{ 620{
604 struct nlmsghdr *nlh; 621 struct nlmsghdr *nlh;
605 struct sk_buff *skb; 622 struct sk_buff *skb;
606 struct iscsi_uevent *ev; 623 struct iscsi_uevent *ev;
607 struct iscsi_cls_conn *conn;
608 int len = NLMSG_SPACE(sizeof(*ev)); 624 int len = NLMSG_SPACE(sizeof(*ev));
609 625
610 conn = iscsi_if_find_conn(connh);
611 BUG_ON(!conn);
612
613 mempool_zone_complete(conn->z_error); 626 mempool_zone_complete(conn->z_error);
614 627
615 skb = mempool_zone_get_skb(conn->z_error); 628 skb = mempool_zone_get_skb(conn->z_error);
@@ -626,7 +639,7 @@ void iscsi_conn_error(iscsi_connh_t connh, enum iscsi_err error)
626 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat) 639 if (atomic_read(&conn->z_error->allocated) >= conn->z_error->hiwat)
627 ev->iferror = -ENOMEM; 640 ev->iferror = -ENOMEM;
628 ev->r.connerror.error = error; 641 ev->r.connerror.error = error;
629 ev->r.connerror.conn_handle = connh; 642 ev->r.connerror.conn_handle = iscsi_handle(conn);
630 643
631 iscsi_unicast_skb(conn->z_error, skb); 644 iscsi_unicast_skb(conn->z_error, skb);
632 645
@@ -662,8 +675,7 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
662} 675}
663 676
664static int 677static int
665iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb, 678iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
666 struct nlmsghdr *nlh)
667{ 679{
668 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 680 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
669 struct iscsi_stats *stats; 681 struct iscsi_stats *stats;
@@ -677,7 +689,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
677 ISCSI_STATS_CUSTOM_MAX); 689 ISCSI_STATS_CUSTOM_MAX);
678 int err = 0; 690 int err = 0;
679 691
680 conn = iscsi_if_find_conn(ev->u.get_stats.conn_handle); 692 conn = iscsi_conn_lookup(ev->u.get_stats.conn_handle);
681 if (!conn) 693 if (!conn)
682 return -EEXIST; 694 return -EEXIST;
683 695
@@ -707,14 +719,14 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct sk_buff *skb,
707 ((char*)evstat + sizeof(*evstat)); 719 ((char*)evstat + sizeof(*evstat));
708 memset(stats, 0, sizeof(*stats)); 720 memset(stats, 0, sizeof(*stats));
709 721
710 transport->get_stats(ev->u.get_stats.conn_handle, stats); 722 transport->get_stats(conn, stats);
711 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 723 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) +
712 sizeof(struct iscsi_stats) + 724 sizeof(struct iscsi_stats) +
713 sizeof(struct iscsi_stats_custom) * 725 sizeof(struct iscsi_stats_custom) *
714 stats->custom_length); 726 stats->custom_length);
715 actual_size -= sizeof(*nlhstat); 727 actual_size -= sizeof(*nlhstat);
716 actual_size = NLMSG_LENGTH(actual_size); 728 actual_size = NLMSG_LENGTH(actual_size);
717 skb_trim(skb, NLMSG_ALIGN(actual_size)); 729 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
718 nlhstat->nlmsg_len = actual_size; 730 nlhstat->nlmsg_len = actual_size;
719 731
720 err = iscsi_unicast_skb(conn->z_pdu, skbstat); 732 err = iscsi_unicast_skb(conn->z_pdu, skbstat);
@@ -727,58 +739,34 @@ static int
727iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 739iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
728{ 740{
729 struct iscsi_transport *transport = priv->iscsi_transport; 741 struct iscsi_transport *transport = priv->iscsi_transport;
730 struct Scsi_Host *shost; 742 struct iscsi_cls_session *session;
731 743 uint32_t sid;
732 if (!transport->create_session)
733 return -EINVAL;
734 744
735 shost = transport->create_session(&priv->t, 745 session = transport->create_session(&priv->t,
736 ev->u.c_session.initial_cmdsn); 746 ev->u.c_session.initial_cmdsn,
737 if (!shost) 747 &sid);
748 if (!session)
738 return -ENOMEM; 749 return -ENOMEM;
739 750
740 ev->r.c_session_ret.session_handle = iscsi_handle(iscsi_hostdata(shost->hostdata)); 751 ev->r.c_session_ret.session_handle = iscsi_handle(session);
741 ev->r.c_session_ret.sid = shost->host_no; 752 ev->r.c_session_ret.sid = sid;
742 return 0; 753 return 0;
743} 754}
744 755
745static int 756static int
746iscsi_if_destroy_session(struct iscsi_internal *priv, struct iscsi_uevent *ev) 757iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
747{ 758{
748 struct iscsi_transport *transport = priv->iscsi_transport;
749
750 struct Scsi_Host *shost;
751
752 if (!transport->destroy_session)
753 return -EINVAL;
754
755 shost = scsi_host_lookup(ev->u.d_session.sid);
756 if (shost == ERR_PTR(-ENXIO))
757 return -EEXIST;
758
759 if (transport->destroy_session)
760 transport->destroy_session(shost);
761 /* ref from host lookup */
762 scsi_host_put(shost);
763 return 0;
764}
765
766static int
767iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev){
768 struct Scsi_Host *shost;
769 struct iscsi_cls_conn *conn; 759 struct iscsi_cls_conn *conn;
760 struct iscsi_cls_session *session;
770 unsigned long flags; 761 unsigned long flags;
771 762
772 if (!transport->create_conn) 763 session = iscsi_session_lookup(ev->u.c_conn.session_handle);
764 if (!session)
773 return -EINVAL; 765 return -EINVAL;
774 766
775 shost = scsi_host_lookup(ev->u.c_conn.sid); 767 conn = transport->create_conn(session, ev->u.c_conn.cid);
776 if (shost == ERR_PTR(-ENXIO))
777 return -EEXIST;
778
779 conn = transport->create_conn(shost, ev->u.c_conn.cid);
780 if (!conn) 768 if (!conn)
781 goto release_ref; 769 return -ENOMEM;
782 770
783 conn->z_pdu = mempool_zone_init(Z_MAX_PDU, 771 conn->z_pdu = mempool_zone_init(Z_MAX_PDU,
784 NLMSG_SPACE(sizeof(struct iscsi_uevent) + 772 NLMSG_SPACE(sizeof(struct iscsi_uevent) +
@@ -800,14 +788,13 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
800 goto free_pdu_pool; 788 goto free_pdu_pool;
801 } 789 }
802 790
803 ev->r.handle = conn->connh = iscsi_handle(conn->dd_data); 791 ev->r.handle = iscsi_handle(conn);
804 792
805 spin_lock_irqsave(&connlock, flags); 793 spin_lock_irqsave(&connlock, flags);
806 list_add(&conn->conn_list, &connlist); 794 list_add(&conn->conn_list, &connlist);
807 conn->active = 1; 795 conn->active = 1;
808 spin_unlock_irqrestore(&connlock, flags); 796 spin_unlock_irqrestore(&connlock, flags);
809 797
810 scsi_host_put(shost);
811 return 0; 798 return 0;
812 799
813free_pdu_pool: 800free_pdu_pool:
@@ -815,8 +802,6 @@ free_pdu_pool:
815destroy_conn: 802destroy_conn:
816 if (transport->destroy_conn) 803 if (transport->destroy_conn)
817 transport->destroy_conn(conn->dd_data); 804 transport->destroy_conn(conn->dd_data);
818release_ref:
819 scsi_host_put(shost);
820 return -ENOMEM; 805 return -ENOMEM;
821} 806}
822 807
@@ -827,13 +812,9 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
827 struct iscsi_cls_conn *conn; 812 struct iscsi_cls_conn *conn;
828 struct mempool_zone *z_error, *z_pdu; 813 struct mempool_zone *z_error, *z_pdu;
829 814
830 conn = iscsi_if_find_conn(ev->u.d_conn.conn_handle); 815 conn = iscsi_conn_lookup(ev->u.d_conn.conn_handle);
831 if (!conn) 816 if (!conn)
832 return -EEXIST;
833
834 if (!transport->destroy_conn)
835 return -EINVAL; 817 return -EINVAL;
836
837 spin_lock_irqsave(&connlock, flags); 818 spin_lock_irqsave(&connlock, flags);
838 conn->active = 0; 819 conn->active = 0;
839 list_del(&conn->conn_list); 820 list_del(&conn->conn_list);
@@ -858,23 +839,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 839 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
859 struct iscsi_transport *transport = NULL; 840 struct iscsi_transport *transport = NULL;
860 struct iscsi_internal *priv; 841 struct iscsi_internal *priv;
861 842 struct iscsi_cls_session *session;
862 if (NETLINK_CREDS(skb)->uid) 843 struct iscsi_cls_conn *conn;
863 return -EPERM;
864 844
865 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 845 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
866 if (!priv) 846 if (!priv)
867 return -EINVAL; 847 return -EINVAL;
868 transport = priv->iscsi_transport; 848 transport = priv->iscsi_transport;
869 849
870 daemon_pid = NETLINK_CREDS(skb)->pid; 850 if (!try_module_get(transport->owner))
851 return -EINVAL;
871 852
872 switch (nlh->nlmsg_type) { 853 switch (nlh->nlmsg_type) {
873 case ISCSI_UEVENT_CREATE_SESSION: 854 case ISCSI_UEVENT_CREATE_SESSION:
874 err = iscsi_if_create_session(priv, ev); 855 err = iscsi_if_create_session(priv, ev);
875 break; 856 break;
876 case ISCSI_UEVENT_DESTROY_SESSION: 857 case ISCSI_UEVENT_DESTROY_SESSION:
877 err = iscsi_if_destroy_session(priv, ev); 858 session = iscsi_session_lookup(ev->u.d_session.session_handle);
859 if (session)
860 transport->destroy_session(session);
861 else
862 err = -EINVAL;
878 break; 863 break;
879 case ISCSI_UEVENT_CREATE_CONN: 864 case ISCSI_UEVENT_CREATE_CONN:
880 err = iscsi_if_create_conn(transport, ev); 865 err = iscsi_if_create_conn(transport, ev);
@@ -883,56 +868,64 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
883 err = iscsi_if_destroy_conn(transport, ev); 868 err = iscsi_if_destroy_conn(transport, ev);
884 break; 869 break;
885 case ISCSI_UEVENT_BIND_CONN: 870 case ISCSI_UEVENT_BIND_CONN:
886 if (!iscsi_if_find_conn(ev->u.b_conn.conn_handle)) 871 session = iscsi_session_lookup(ev->u.b_conn.session_handle);
887 return -EEXIST; 872 conn = iscsi_conn_lookup(ev->u.b_conn.conn_handle);
888 ev->r.retcode = transport->bind_conn( 873
889 ev->u.b_conn.session_handle, 874 if (session && conn)
890 ev->u.b_conn.conn_handle, 875 ev->r.retcode = transport->bind_conn(session, conn,
891 ev->u.b_conn.transport_fd, 876 ev->u.b_conn.transport_fd,
892 ev->u.b_conn.is_leading); 877 ev->u.b_conn.is_leading);
878 else
879 err = -EINVAL;
893 break; 880 break;
894 case ISCSI_UEVENT_SET_PARAM: 881 case ISCSI_UEVENT_SET_PARAM:
895 if (!iscsi_if_find_conn(ev->u.set_param.conn_handle)) 882 conn = iscsi_conn_lookup(ev->u.set_param.conn_handle);
896 return -EEXIST; 883 if (conn)
897 ev->r.retcode = transport->set_param( 884 ev->r.retcode = transport->set_param(conn,
898 ev->u.set_param.conn_handle, 885 ev->u.set_param.param, ev->u.set_param.value);
899 ev->u.set_param.param, ev->u.set_param.value); 886 else
887 err = -EINVAL;
900 break; 888 break;
901 case ISCSI_UEVENT_START_CONN: 889 case ISCSI_UEVENT_START_CONN:
902 if (!iscsi_if_find_conn(ev->u.start_conn.conn_handle)) 890 conn = iscsi_conn_lookup(ev->u.start_conn.conn_handle);
903 return -EEXIST; 891 if (conn)
904 ev->r.retcode = transport->start_conn( 892 ev->r.retcode = transport->start_conn(conn);
905 ev->u.start_conn.conn_handle); 893 else
894 err = -EINVAL;
895
906 break; 896 break;
907 case ISCSI_UEVENT_STOP_CONN: 897 case ISCSI_UEVENT_STOP_CONN:
908 if (!iscsi_if_find_conn(ev->u.stop_conn.conn_handle)) 898 conn = iscsi_conn_lookup(ev->u.stop_conn.conn_handle);
909 return -EEXIST; 899 if (conn)
910 transport->stop_conn(ev->u.stop_conn.conn_handle, 900 transport->stop_conn(conn, ev->u.stop_conn.flag);
911 ev->u.stop_conn.flag); 901 else
902 err = -EINVAL;
912 break; 903 break;
913 case ISCSI_UEVENT_SEND_PDU: 904 case ISCSI_UEVENT_SEND_PDU:
914 if (!iscsi_if_find_conn(ev->u.send_pdu.conn_handle)) 905 conn = iscsi_conn_lookup(ev->u.send_pdu.conn_handle);
915 return -EEXIST; 906 if (conn)
916 ev->r.retcode = transport->send_pdu( 907 ev->r.retcode = transport->send_pdu(conn,
917 ev->u.send_pdu.conn_handle, 908 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
918 (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), 909 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
919 (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, 910 ev->u.send_pdu.data_size);
920 ev->u.send_pdu.data_size); 911 else
912 err = -EINVAL;
921 break; 913 break;
922 case ISCSI_UEVENT_GET_STATS: 914 case ISCSI_UEVENT_GET_STATS:
923 err = iscsi_if_get_stats(transport, skb, nlh); 915 err = iscsi_if_get_stats(transport, nlh);
924 break; 916 break;
925 default: 917 default:
926 err = -EINVAL; 918 err = -EINVAL;
927 break; 919 break;
928 } 920 }
929 921
922 module_put(transport->owner);
930 return err; 923 return err;
931} 924}
932 925
933/* Get message from skb (based on rtnetlink_rcv_skb). Each message is 926/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
934 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are 927 * processed by iscsi_if_recv_msg. Malformed skbs with wrong length are
935 * discarded silently. */ 928 * or invalid creds discarded silently. */
936static void 929static void
937iscsi_if_rx(struct sock *sk, int len) 930iscsi_if_rx(struct sock *sk, int len)
938{ 931{
@@ -940,6 +933,12 @@ iscsi_if_rx(struct sock *sk, int len)
940 933
941 mutex_lock(&rx_queue_mutex); 934 mutex_lock(&rx_queue_mutex);
942 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 935 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
936 if (NETLINK_CREDS(skb)->uid) {
937 skb_pull(skb, skb->len);
938 goto free_skb;
939 }
940 daemon_pid = NETLINK_CREDS(skb)->pid;
941
943 while (skb->len >= NLMSG_SPACE(0)) { 942 while (skb->len >= NLMSG_SPACE(0)) {
944 int err; 943 int err;
945 uint32_t rlen; 944 uint32_t rlen;
@@ -951,10 +950,12 @@ iscsi_if_rx(struct sock *sk, int len)
951 skb->len < nlh->nlmsg_len) { 950 skb->len < nlh->nlmsg_len) {
952 break; 951 break;
953 } 952 }
953
954 ev = NLMSG_DATA(nlh); 954 ev = NLMSG_DATA(nlh);
955 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 955 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
956 if (rlen > skb->len) 956 if (rlen > skb->len)
957 rlen = skb->len; 957 rlen = skb->len;
958
958 err = iscsi_if_recv_msg(skb, nlh); 959 err = iscsi_if_recv_msg(skb, nlh);
959 if (err) { 960 if (err) {
960 ev->type = ISCSI_KEVENT_IF_ERROR; 961 ev->type = ISCSI_KEVENT_IF_ERROR;
@@ -978,6 +979,7 @@ iscsi_if_rx(struct sock *sk, int len)
978 } while (err < 0 && err != -ECONNREFUSED); 979 } while (err < 0 && err != -ECONNREFUSED);
979 skb_pull(skb, rlen); 980 skb_pull(skb, rlen);
980 } 981 }
982free_skb:
981 kfree_skb(skb); 983 kfree_skb(skb);
982 } 984 }
983 mutex_unlock(&rx_queue_mutex); 985 mutex_unlock(&rx_queue_mutex);
@@ -997,7 +999,7 @@ show_conn_int_param_##param(struct class_device *cdev, char *buf) \
997 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \ 999 struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
998 struct iscsi_transport *t = conn->transport; \ 1000 struct iscsi_transport *t = conn->transport; \
999 \ 1001 \
1000 t->get_conn_param(conn->dd_data, param, &value); \ 1002 t->get_conn_param(conn, param, &value); \
1001 return snprintf(buf, 20, format"\n", value); \ 1003 return snprintf(buf, 20, format"\n", value); \
1002} 1004}
1003 1005
@@ -1024,10 +1026,9 @@ show_session_int_param_##param(struct class_device *cdev, char *buf) \
1024{ \ 1026{ \
1025 uint32_t value = 0; \ 1027 uint32_t value = 0; \
1026 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1028 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1027 struct Scsi_Host *shost = iscsi_session_to_shost(session); \
1028 struct iscsi_transport *t = session->transport; \ 1029 struct iscsi_transport *t = session->transport; \
1029 \ 1030 \
1030 t->get_session_param(shost, param, &value); \ 1031 t->get_session_param(session, param, &value); \
1031 return snprintf(buf, 20, format"\n", value); \ 1032 return snprintf(buf, 20, format"\n", value); \
1032} 1033}
1033 1034
@@ -1121,7 +1122,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1121 return NULL; 1122 return NULL;
1122 memset(priv, 0, sizeof(*priv)); 1123 memset(priv, 0, sizeof(*priv));
1123 INIT_LIST_HEAD(&priv->list); 1124 INIT_LIST_HEAD(&priv->list);
1124 INIT_LIST_HEAD(&priv->sessions);
1125 priv->iscsi_transport = tt; 1125 priv->iscsi_transport = tt;
1126 1126
1127 priv->cdev.class = &iscsi_transport_class; 1127 priv->cdev.class = &iscsi_transport_class;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 8260f040d39c..f4854c33f48d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3588,7 +3588,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int
3588 3588
3589 if (pm) { 3589 if (pm) {
3590 dp_scr = scr_to_cpu(pm->ret); 3590 dp_scr = scr_to_cpu(pm->ret);
3591 dp_ofs -= scr_to_cpu(pm->sg.size); 3591 dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff;
3592 } 3592 }
3593 3593
3594 /* 3594 /*
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index b1fc97d5f643..244e8ff11977 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2198,7 +2198,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
2198 touch_nmi_watchdog(); 2198 touch_nmi_watchdog();
2199 2199
2200 /* 2200 /*
2201 * First save the UER then disable the interrupts 2201 * First save the IER then disable the interrupts
2202 */ 2202 */
2203 ier = serial_in(up, UART_IER); 2203 ier = serial_in(up, UART_IER);
2204 2204
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0f4361c8466b..b3c561abe3f6 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -902,8 +902,8 @@ config SERIAL_JSM
902 something like this to connect more than two modems to your Linux 902 something like this to connect more than two modems to your Linux
903 box, for instance in order to become a dial-in server. This driver 903 box, for instance in order to become a dial-in server. This driver
904 supports PCI boards only. 904 supports PCI boards only.
905 If you have a card like this, say Y here and read the file 905
906 <file:Documentation/jsm.txt>. 906 If you have a card like this, say Y here, otherwise say N.
907 907
908 To compile this driver as a module, choose M here: the 908 To compile this driver as a module, choose M here: the
909 module will be called jsm. 909 module will be called jsm.
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 38d22729b129..c9a7cdf6d543 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1243,7 +1243,7 @@ static int __devexit gbefb_remove(struct platform_device* p_dev)
1243 (void *)gbe_tiles.cpu, gbe_tiles.dma); 1243 (void *)gbe_tiles.cpu, gbe_tiles.dma);
1244 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe)); 1244 release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
1245 iounmap(gbe); 1245 iounmap(gbe);
1246 gbefb_remove_sysfs(dev); 1246 gbefb_remove_sysfs(&p_dev->dev);
1247 framebuffer_release(info); 1247 framebuffer_release(info);
1248 1248
1249 return 0; 1249 return 0;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index b85e2b180a44..a2e201dc40f7 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -843,6 +843,9 @@ static int neofb_set_par(struct fb_info *info)
843 843
844 par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */ 844 par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */
845 845
846 /* Initialize: by default, we want display config register to be read */
847 par->PanelDispCntlRegRead = 1;
848
846 /* Enable any user specified display devices. */ 849 /* Enable any user specified display devices. */
847 par->PanelDispCntlReg1 = 0x00; 850 par->PanelDispCntlReg1 = 0x00;
848 if (par->internal_display) 851 if (par->internal_display)
@@ -1334,11 +1337,17 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
1334 struct neofb_par *par = info->par; 1337 struct neofb_par *par = info->par;
1335 int seqflags, lcdflags, dpmsflags, reg; 1338 int seqflags, lcdflags, dpmsflags, reg;
1336 1339
1340
1337 /* 1341 /*
1338 * Reload the value stored in the register, might have been changed via 1342 * Reload the value stored in the register, if sensible. It might have
1339 * FN keystroke 1343 * been changed via FN keystroke.
1340 */ 1344 */
1341 par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; 1345 if (par->PanelDispCntlRegRead) {
1346 neoUnlock();
1347 par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
1348 neoLock(&par->state);
1349 }
1350 par->PanelDispCntlRegRead = !blank_mode;
1342 1351
1343 switch (blank_mode) { 1352 switch (blank_mode) {
1344 case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ 1353 case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index d574dd3c9c8a..9451932fbaf2 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -82,7 +82,6 @@
82#include <linux/fb.h> 82#include <linux/fb.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/dma-mapping.h> 84#include <linux/dma-mapping.h>
85#include <linux/string.h>
86#include <linux/interrupt.h> 85#include <linux/interrupt.h>
87#include <linux/workqueue.h> 86#include <linux/workqueue.h>
88#include <linux/wait.h> 87#include <linux/wait.h>
diff --git a/fs/compat.c b/fs/compat.c
index a2ba78bdf7f7..5333c7d7427f 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1757,7 +1757,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
1757 goto sticky; 1757 goto sticky;
1758 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); 1758 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
1759 rtv.tv_sec = timeout; 1759 rtv.tv_sec = timeout;
1760 if (compat_timeval_compare(&rtv, &tv) < 0) 1760 if (compat_timeval_compare(&rtv, &tv) >= 0)
1761 rtv = tv; 1761 rtv = tv;
1762 if (copy_to_user(tvp, &rtv, sizeof(rtv))) { 1762 if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
1763sticky: 1763sticky:
@@ -1834,7 +1834,7 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
1834 rts.tv_sec++; 1834 rts.tv_sec++;
1835 rts.tv_nsec -= NSEC_PER_SEC; 1835 rts.tv_nsec -= NSEC_PER_SEC;
1836 } 1836 }
1837 if (compat_timespec_compare(&rts, &ts) < 0) 1837 if (compat_timespec_compare(&rts, &ts) >= 0)
1838 rts = ts; 1838 rts = ts;
1839 copy_to_user(tsp, &rts, sizeof(rts)); 1839 copy_to_user(tsp, &rts, sizeof(rts));
1840 } 1840 }
@@ -1934,7 +1934,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
1934 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1934 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
1935 1000; 1935 1000;
1936 rts.tv_sec = timeout; 1936 rts.tv_sec = timeout;
1937 if (compat_timespec_compare(&rts, &ts) < 0) 1937 if (compat_timespec_compare(&rts, &ts) >= 0)
1938 rts = ts; 1938 rts = ts;
1939 if (copy_to_user(tsp, &rts, sizeof(rts))) { 1939 if (copy_to_user(tsp, &rts, sizeof(rts))) {
1940sticky: 1940sticky:
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index a2ca3107d475..86ae8e93adb9 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -792,18 +792,20 @@ ext2_xattr_delete_inode(struct inode *inode)
792 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 792 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
793 get_bh(bh); 793 get_bh(bh);
794 bforget(bh); 794 bforget(bh);
795 unlock_buffer(bh);
795 } else { 796 } else {
796 HDR(bh)->h_refcount = cpu_to_le32( 797 HDR(bh)->h_refcount = cpu_to_le32(
797 le32_to_cpu(HDR(bh)->h_refcount) - 1); 798 le32_to_cpu(HDR(bh)->h_refcount) - 1);
798 if (ce) 799 if (ce)
799 mb_cache_entry_release(ce); 800 mb_cache_entry_release(ce);
801 ea_bdebug(bh, "refcount now=%d",
802 le32_to_cpu(HDR(bh)->h_refcount));
803 unlock_buffer(bh);
800 mark_buffer_dirty(bh); 804 mark_buffer_dirty(bh);
801 if (IS_SYNC(inode)) 805 if (IS_SYNC(inode))
802 sync_dirty_buffer(bh); 806 sync_dirty_buffer(bh);
803 DQUOT_FREE_BLOCK(inode, 1); 807 DQUOT_FREE_BLOCK(inode, 1);
804 } 808 }
805 ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
806 unlock_buffer(bh);
807 EXT2_I(inode)->i_file_acl = 0; 809 EXT2_I(inode)->i_file_acl = 0;
808 810
809cleanup: 811cleanup:
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index f556a0d5c0d3..0c9a2ee54c91 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -66,6 +66,12 @@ static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL); 66 sigprocmask(SIG_SETMASK, oldset, NULL);
67} 67}
68 68
69/*
70 * Reset request, so that it can be reused
71 *
72 * The caller must be _very_ careful to make sure, that it is holding
73 * the only reference to req
74 */
69void fuse_reset_request(struct fuse_req *req) 75void fuse_reset_request(struct fuse_req *req)
70{ 76{
71 int preallocated = req->preallocated; 77 int preallocated = req->preallocated;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 296351615b00..6f05379b0a0d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -116,9 +116,14 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
116/* Special case for failed iget in CREATE */ 116/* Special case for failed iget in CREATE */
117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
118{ 118{
119 u64 nodeid = req->in.h.nodeid; 119 /* If called from end_io_requests(), req has more than one
120 fuse_reset_request(req); 120 reference and fuse_reset_request() cannot work */
121 fuse_send_forget(fc, req, nodeid, 1); 121 if (fc->connected) {
122 u64 nodeid = req->in.h.nodeid;
123 fuse_reset_request(req);
124 fuse_send_forget(fc, req, nodeid, 1);
125 } else
126 fuse_put_request(fc, req);
122} 127}
123 128
124void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, 129void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 42eb53b5293b..23ceaa7127b4 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -208,6 +208,9 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
208#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 208#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
209#define DLM_LOCK_RES_MIGRATING 0x00000020 209#define DLM_LOCK_RES_MIGRATING 0x00000020
210 210
211/* max milliseconds to wait to sync up a network failure with a node death */
212#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
213
211#define DLM_PURGE_INTERVAL_MS (8 * 1000) 214#define DLM_PURGE_INTERVAL_MS (8 * 1000)
212 215
213struct dlm_lock_resource 216struct dlm_lock_resource
@@ -658,6 +661,7 @@ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
658void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); 661void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
659void dlm_wait_for_recovery(struct dlm_ctxt *dlm); 662void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
660int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); 663int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
664int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
661 665
662void dlm_put(struct dlm_ctxt *dlm); 666void dlm_put(struct dlm_ctxt *dlm);
663struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); 667struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 6001b22a997d..f66e2d818ccd 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -392,6 +392,11 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
392 } else { 392 } else {
393 mlog_errno(tmpret); 393 mlog_errno(tmpret);
394 if (dlm_is_host_down(tmpret)) { 394 if (dlm_is_host_down(tmpret)) {
395 /* instead of logging the same network error over
396 * and over, sleep here and wait for the heartbeat
397 * to notice the node is dead. times out after 5s. */
398 dlm_wait_for_node_death(dlm, res->owner,
399 DLM_NODE_DEATH_WAIT_MAX);
395 ret = DLM_RECOVERING; 400 ret = DLM_RECOVERING;
396 mlog(0, "node %u died so returning DLM_RECOVERING " 401 mlog(0, "node %u died so returning DLM_RECOVERING "
397 "from convert message!\n", res->owner); 402 "from convert message!\n", res->owner);
@@ -421,7 +426,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
421 struct dlm_lockstatus *lksb; 426 struct dlm_lockstatus *lksb;
422 enum dlm_status status = DLM_NORMAL; 427 enum dlm_status status = DLM_NORMAL;
423 u32 flags; 428 u32 flags;
424 int call_ast = 0, kick_thread = 0; 429 int call_ast = 0, kick_thread = 0, ast_reserved = 0;
425 430
426 if (!dlm_grab(dlm)) { 431 if (!dlm_grab(dlm)) {
427 dlm_error(DLM_REJECTED); 432 dlm_error(DLM_REJECTED);
@@ -490,6 +495,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
490 status = __dlm_lockres_state_to_status(res); 495 status = __dlm_lockres_state_to_status(res);
491 if (status == DLM_NORMAL) { 496 if (status == DLM_NORMAL) {
492 __dlm_lockres_reserve_ast(res); 497 __dlm_lockres_reserve_ast(res);
498 ast_reserved = 1;
493 res->state |= DLM_LOCK_RES_IN_PROGRESS; 499 res->state |= DLM_LOCK_RES_IN_PROGRESS;
494 status = __dlmconvert_master(dlm, res, lock, flags, 500 status = __dlmconvert_master(dlm, res, lock, flags,
495 cnv->requested_type, 501 cnv->requested_type,
@@ -512,10 +518,10 @@ leave:
512 else 518 else
513 dlm_lock_put(lock); 519 dlm_lock_put(lock);
514 520
515 /* either queue the ast or release it */ 521 /* either queue the ast or release it, if reserved */
516 if (call_ast) 522 if (call_ast)
517 dlm_queue_ast(dlm, lock); 523 dlm_queue_ast(dlm, lock);
518 else 524 else if (ast_reserved)
519 dlm_lockres_release_ast(dlm, res); 525 dlm_lockres_release_ast(dlm, res);
520 526
521 if (kick_thread) 527 if (kick_thread)
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index d1a0038557a3..671d4ff222cc 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -220,6 +220,17 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
220 dlm_error(status); 220 dlm_error(status);
221 dlm_revert_pending_lock(res, lock); 221 dlm_revert_pending_lock(res, lock);
222 dlm_lock_put(lock); 222 dlm_lock_put(lock);
223 } else if (dlm_is_recovery_lock(res->lockname.name,
224 res->lockname.len)) {
225 /* special case for the $RECOVERY lock.
226 * there will never be an AST delivered to put
227 * this lock on the proper secondary queue
228 * (granted), so do it manually. */
229 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
230 "mastered by %u; got lock, manually granting (no ast)\n",
231 dlm->name, dlm->node_num, res->owner);
232 list_del_init(&lock->list);
233 list_add_tail(&lock->list, &res->granted);
223 } 234 }
224 spin_unlock(&res->spinlock); 235 spin_unlock(&res->spinlock);
225 236
@@ -646,7 +657,19 @@ retry_lock:
646 mlog(0, "retrying lock with migration/" 657 mlog(0, "retrying lock with migration/"
647 "recovery/in progress\n"); 658 "recovery/in progress\n");
648 msleep(100); 659 msleep(100);
649 dlm_wait_for_recovery(dlm); 660 /* no waiting for dlm_reco_thread */
661 if (recovery) {
662 if (status == DLM_RECOVERING) {
663 mlog(0, "%s: got RECOVERING "
664 "for $REOCVERY lock, master "
665 "was %u\n", dlm->name,
666 res->owner);
667 dlm_wait_for_node_death(dlm, res->owner,
668 DLM_NODE_DEATH_WAIT_MAX);
669 }
670 } else {
671 dlm_wait_for_recovery(dlm);
672 }
650 goto retry_lock; 673 goto retry_lock;
651 } 674 }
652 675
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a3194fe173d9..2e2e95e69499 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2482,7 +2482,9 @@ top:
2482 atomic_set(&mle->woken, 1); 2482 atomic_set(&mle->woken, 1);
2483 spin_unlock(&mle->spinlock); 2483 spin_unlock(&mle->spinlock);
2484 wake_up(&mle->wq); 2484 wake_up(&mle->wq);
2485 /* final put will take care of list removal */ 2485 /* do not need events any longer, so detach
2486 * from heartbeat */
2487 __dlm_mle_detach_hb_events(dlm, mle);
2486 __dlm_put_mle(mle); 2488 __dlm_put_mle(mle);
2487 } 2489 }
2488 continue; 2490 continue;
@@ -2537,6 +2539,9 @@ top:
2537 spin_unlock(&res->spinlock); 2539 spin_unlock(&res->spinlock);
2538 dlm_lockres_put(res); 2540 dlm_lockres_put(res);
2539 2541
2542 /* about to get rid of mle, detach from heartbeat */
2543 __dlm_mle_detach_hb_events(dlm, mle);
2544
2540 /* dump the mle */ 2545 /* dump the mle */
2541 spin_lock(&dlm->master_lock); 2546 spin_lock(&dlm->master_lock);
2542 __dlm_put_mle(mle); 2547 __dlm_put_mle(mle);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 186e9a76aa58..ed76bda1a534 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -278,6 +278,24 @@ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
278 return dead; 278 return dead;
279} 279}
280 280
281int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
282{
283 if (timeout) {
284 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
285 "death of node %u\n", dlm->name, timeout, node);
286 wait_event_timeout(dlm->dlm_reco_thread_wq,
287 dlm_is_node_dead(dlm, node),
288 msecs_to_jiffies(timeout));
289 } else {
290 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
291 "of death of node %u\n", dlm->name, node);
292 wait_event(dlm->dlm_reco_thread_wq,
293 dlm_is_node_dead(dlm, node));
294 }
295 /* for now, return 0 */
296 return 0;
297}
298
281/* callers of the top-level api calls (dlmlock/dlmunlock) should 299/* callers of the top-level api calls (dlmlock/dlmunlock) should
282 * block on the dlm->reco.event when recovery is in progress. 300 * block on the dlm->reco.event when recovery is in progress.
283 * the dlm recovery thread will set this state when it begins 301 * the dlm recovery thread will set this state when it begins
@@ -2032,6 +2050,30 @@ again:
2032 dlm->reco.new_master); 2050 dlm->reco.new_master);
2033 status = -EEXIST; 2051 status = -EEXIST;
2034 } else { 2052 } else {
2053 status = 0;
2054
2055 /* see if recovery was already finished elsewhere */
2056 spin_lock(&dlm->spinlock);
2057 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2058 status = -EINVAL;
2059 mlog(0, "%s: got reco EX lock, but "
2060 "node got recovered already\n", dlm->name);
2061 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2062 mlog(ML_ERROR, "%s: new master is %u "
2063 "but no dead node!\n",
2064 dlm->name, dlm->reco.new_master);
2065 BUG();
2066 }
2067 }
2068 spin_unlock(&dlm->spinlock);
2069 }
2070
2071 /* if this node has actually become the recovery master,
2072 * set the master and send the messages to begin recovery */
2073 if (!status) {
2074 mlog(0, "%s: dead=%u, this=%u, sending "
2075 "begin_reco now\n", dlm->name,
2076 dlm->reco.dead_node, dlm->node_num);
2035 status = dlm_send_begin_reco_message(dlm, 2077 status = dlm_send_begin_reco_message(dlm,
2036 dlm->reco.dead_node); 2078 dlm->reco.dead_node);
2037 /* this always succeeds */ 2079 /* this always succeeds */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fa0bcac5ceae..d329c9df90ae 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1584,10 +1584,9 @@ static int ocfs2_commit_thread(void *arg)
1584 while (!(kthread_should_stop() && 1584 while (!(kthread_should_stop() &&
1585 atomic_read(&journal->j_num_trans) == 0)) { 1585 atomic_read(&journal->j_num_trans) == 0)) {
1586 1586
1587 wait_event_interruptible_timeout(osb->checkpoint_event, 1587 wait_event_interruptible(osb->checkpoint_event,
1588 atomic_read(&journal->j_num_trans) 1588 atomic_read(&journal->j_num_trans)
1589 || kthread_should_stop(), 1589 || kthread_should_stop());
1590 OCFS2_CHECKPOINT_INTERVAL);
1591 1590
1592 status = ocfs2_commit_cache(osb); 1591 status = ocfs2_commit_cache(osb);
1593 if (status < 0) 1592 if (status < 0)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 7d0a816184fa..2f3a6acdac45 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -29,8 +29,6 @@
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/jbd.h> 30#include <linux/jbd.h>
31 31
32#define OCFS2_CHECKPOINT_INTERVAL (8 * HZ)
33
34enum ocfs2_journal_state { 32enum ocfs2_journal_state {
35 OCFS2_JOURNAL_FREE = 0, 33 OCFS2_JOURNAL_FREE = 0,
36 OCFS2_JOURNAL_LOADED, 34 OCFS2_JOURNAL_LOADED,
diff --git a/fs/select.c b/fs/select.c
index 6ce68a9c8976..1815a57d2255 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -404,7 +404,7 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
404 goto sticky; 404 goto sticky;
405 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); 405 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
406 rtv.tv_sec = timeout; 406 rtv.tv_sec = timeout;
407 if (timeval_compare(&rtv, &tv) < 0) 407 if (timeval_compare(&rtv, &tv) >= 0)
408 rtv = tv; 408 rtv = tv;
409 if (copy_to_user(tvp, &rtv, sizeof(rtv))) { 409 if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
410sticky: 410sticky:
@@ -471,7 +471,7 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
471 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 471 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
472 1000; 472 1000;
473 rts.tv_sec = timeout; 473 rts.tv_sec = timeout;
474 if (timespec_compare(&rts, &ts) < 0) 474 if (timespec_compare(&rts, &ts) >= 0)
475 rts = ts; 475 rts = ts;
476 if (copy_to_user(tsp, &rts, sizeof(rts))) { 476 if (copy_to_user(tsp, &rts, sizeof(rts))) {
477sticky: 477sticky:
@@ -775,7 +775,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
775 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 775 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
776 1000; 776 1000;
777 rts.tv_sec = timeout; 777 rts.tv_sec = timeout;
778 if (timespec_compare(&rts, &ts) < 0) 778 if (timespec_compare(&rts, &ts) >= 0)
779 rts = ts; 779 rts = ts;
780 if (copy_to_user(tsp, &rts, sizeof(rts))) { 780 if (copy_to_user(tsp, &rts, sizeof(rts))) {
781 sticky: 781 sticky:
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
index a21515c16a43..5f24c755f577 100644
--- a/include/asm-alpha/mman.h
+++ b/include/asm-alpha/mman.h
@@ -42,9 +42,11 @@
42#define MADV_WILLNEED 3 /* will need these pages */ 42#define MADV_WILLNEED 3 /* will need these pages */
43#define MADV_SPACEAVAIL 5 /* ensure resources are available */ 43#define MADV_SPACEAVAIL 5 /* ensure resources are available */
44#define MADV_DONTNEED 6 /* don't need these pages */ 44#define MADV_DONTNEED 6 /* don't need these pages */
45#define MADV_REMOVE 7 /* remove these pages & resources */ 45
46#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 46/* common/generic parameters */
47#define MADV_DOFORK 0x31 /* do inherit across fork */ 47#define MADV_REMOVE 9 /* remove these pages & resources */
48#define MADV_DONTFORK 10 /* don't inherit across fork */
49#define MADV_DOFORK 11 /* do inherit across fork */
48 50
49/* compatibility flags */ 51/* compatibility flags */
50#define MAP_ANON MAP_ANONYMOUS 52#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h
index 693ed859e632..54570d2e95b7 100644
--- a/include/asm-arm/mman.h
+++ b/include/asm-arm/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ARM_MMAN_H__ 1#ifndef __ARM_MMAN_H__
2#define __ARM_MMAN_H__ 2#define __ARM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ARM_MMAN_H__ */ 17#endif /* __ARM_MMAN_H__ */
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index 5a72e50ca9fc..fe45f7f61223 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -42,6 +42,11 @@ extern void show_ipi_list(struct seq_file *p);
42asmlinkage void do_IPI(struct pt_regs *regs); 42asmlinkage void do_IPI(struct pt_regs *regs);
43 43
44/* 44/*
45 * Setup the SMP cpu_possible_map
46 */
47extern void smp_init_cpus(void);
48
49/*
45 * Move global data into per-processor storage. 50 * Move global data into per-processor storage.
46 */ 51 */
47extern void smp_store_cpu_info(unsigned int cpuid); 52extern void smp_store_cpu_info(unsigned int cpuid);
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 77430d6178ae..8f331bbd39a8 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -309,7 +309,7 @@
309#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279) 309#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279)
310#define __NR_waitid (__NR_SYSCALL_BASE+280) 310#define __NR_waitid (__NR_SYSCALL_BASE+280)
311 311
312#if 0 /* reserve these for un-muxing socketcall */ 312#if defined(__ARM_EABI__) /* reserve these for un-muxing socketcall */
313#define __NR_socket (__NR_SYSCALL_BASE+281) 313#define __NR_socket (__NR_SYSCALL_BASE+281)
314#define __NR_bind (__NR_SYSCALL_BASE+282) 314#define __NR_bind (__NR_SYSCALL_BASE+282)
315#define __NR_connect (__NR_SYSCALL_BASE+283) 315#define __NR_connect (__NR_SYSCALL_BASE+283)
@@ -329,7 +329,7 @@
329#define __NR_recvmsg (__NR_SYSCALL_BASE+297) 329#define __NR_recvmsg (__NR_SYSCALL_BASE+297)
330#endif 330#endif
331 331
332#if 0 /* reserve these for un-muxing ipc */ 332#if defined(__ARM_EABI__) /* reserve these for un-muxing ipc */
333#define __NR_semop (__NR_SYSCALL_BASE+298) 333#define __NR_semop (__NR_SYSCALL_BASE+298)
334#define __NR_semget (__NR_SYSCALL_BASE+299) 334#define __NR_semget (__NR_SYSCALL_BASE+299)
335#define __NR_semctl (__NR_SYSCALL_BASE+300) 335#define __NR_semctl (__NR_SYSCALL_BASE+300)
@@ -347,7 +347,7 @@
347#define __NR_request_key (__NR_SYSCALL_BASE+310) 347#define __NR_request_key (__NR_SYSCALL_BASE+310)
348#define __NR_keyctl (__NR_SYSCALL_BASE+311) 348#define __NR_keyctl (__NR_SYSCALL_BASE+311)
349 349
350#if 0 /* reserved for un-muxing ipc */ 350#if defined(__ARM_EABI__) /* reserved for un-muxing ipc */
351#define __NR_semtimedop (__NR_SYSCALL_BASE+312) 351#define __NR_semtimedop (__NR_SYSCALL_BASE+312)
352#endif 352#endif
353 353
diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h
index 2096c50df888..4000a6c1b76b 100644
--- a/include/asm-arm26/mman.h
+++ b/include/asm-arm26/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ARM_MMAN_H__ 1#ifndef __ARM_MMAN_H__
2#define __ARM_MMAN_H__ 2#define __ARM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ARM_MMAN_H__ */ 17#endif /* __ARM_MMAN_H__ */
diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h
index deddfb239ff5..1c35e1b66b46 100644
--- a/include/asm-cris/mman.h
+++ b/include/asm-cris/mman.h
@@ -3,19 +3,7 @@
3 3
4/* verbatim copy of asm-i386/ version */ 4/* verbatim copy of asm-i386/ version */
5 5
6#define PROT_READ 0x1 /* page can be read */ 6#include <asm-generic/mman.h>
7#define PROT_WRITE 0x2 /* page can be written */
8#define PROT_EXEC 0x4 /* page can be executed */
9#define PROT_SEM 0x8 /* page may be used for atomic ops */
10#define PROT_NONE 0x0 /* page can not be accessed */
11#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
12#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
13
14#define MAP_SHARED 0x01 /* Share changes */
15#define MAP_PRIVATE 0x02 /* Changes are private */
16#define MAP_TYPE 0x0f /* Mask for type of mapping */
17#define MAP_FIXED 0x10 /* Interpret addr exactly */
18#define MAP_ANONYMOUS 0x20 /* don't use a file */
19 7
20#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
21#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -25,24 +13,7 @@
25#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
26#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
27 15
28#define MS_ASYNC 1 /* sync memory asynchronously */
29#define MS_INVALIDATE 2 /* invalidate the caches */
30#define MS_SYNC 4 /* synchronous memory sync */
31
32#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
33#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
34 18
35#define MADV_NORMAL 0x0 /* default page-in behavior */
36#define MADV_RANDOM 0x1 /* page-in minimum required */
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
41#define MADV_DONTFORK 0x30 /* dont inherit across fork */
42#define MADV_DOFORK 0x31 /* do inherit across fork */
43
44/* compatibility flags */
45#define MAP_ANON MAP_ANONYMOUS
46#define MAP_FILE 0
47
48#endif /* __CRIS_MMAN_H__ */ 19#endif /* __CRIS_MMAN_H__ */
diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h
index d3bca306da82..b4371e928683 100644
--- a/include/asm-frv/mman.h
+++ b/include/asm-frv/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ASM_MMAN_H__ 1#ifndef __ASM_MMAN_H__
2#define __ASM_MMAN_H__ 2#define __ASM_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,25 +11,8 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ASM_MMAN_H__ */ 17#endif /* __ASM_MMAN_H__ */
47 18
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
new file mode 100644
index 000000000000..3b41d2bb70da
--- /dev/null
+++ b/include/asm-generic/mman.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_GENERIC_MMAN_H
2#define _ASM_GENERIC_MMAN_H
3
4/*
5 Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
6 Based on: asm-xxx/mman.h
7*/
8
9#define PROT_READ 0x1 /* page can be read */
10#define PROT_WRITE 0x2 /* page can be written */
11#define PROT_EXEC 0x4 /* page can be executed */
12#define PROT_SEM 0x8 /* page may be used for atomic ops */
13#define PROT_NONE 0x0 /* page can not be accessed */
14#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
15#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
16
17#define MAP_SHARED 0x01 /* Share changes */
18#define MAP_PRIVATE 0x02 /* Changes are private */
19#define MAP_TYPE 0x0f /* Mask for type of mapping */
20#define MAP_FIXED 0x10 /* Interpret addr exactly */
21#define MAP_ANONYMOUS 0x20 /* don't use a file */
22
23#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MADV_NORMAL 0 /* no further special treatment */
28#define MADV_RANDOM 1 /* expect random page references */
29#define MADV_SEQUENTIAL 2 /* expect sequential page references */
30#define MADV_WILLNEED 3 /* will need these pages */
31#define MADV_DONTNEED 4 /* don't need these pages */
32
33/* common parameters: try to keep these consistent across architectures */
34#define MADV_REMOVE 9 /* remove these pages & resources */
35#define MADV_DONTFORK 10 /* don't inherit across fork */
36#define MADV_DOFORK 11 /* do inherit across fork */
37
38/* compatibility flags */
39#define MAP_ANON MAP_ANONYMOUS
40#define MAP_FILE 0
41
42#endif
diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h
index ac0346f7d11d..b9f104f22a36 100644
--- a/include/asm-h8300/mman.h
+++ b/include/asm-h8300/mman.h
@@ -1,19 +1,7 @@
1#ifndef __H8300_MMAN_H__ 1#ifndef __H8300_MMAN_H__
2#define __H8300_MMAN_H__ 2#define __H8300_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __H8300_MMAN_H__ */ 17#endif /* __H8300_MMAN_H__ */
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
index ab2339a1d807..8fd9d7ab7faf 100644
--- a/include/asm-i386/mman.h
+++ b/include/asm-i386/mman.h
@@ -1,19 +1,7 @@
1#ifndef __I386_MMAN_H__ 1#ifndef __I386_MMAN_H__
2#define __I386_MMAN_H__ 2#define __I386_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __I386_MMAN_H__ */ 17#endif /* __I386_MMAN_H__ */
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index e20e99551d71..1f7d48c9ba3f 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -158,8 +158,8 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
158 158
159/* work to do on interrupt/exception return */ 159/* work to do on interrupt/exception return */
160#define _TIF_WORK_MASK \ 160#define _TIF_WORK_MASK \
161 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|\ 161 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
162 _TIF_SECCOMP|_TIF_SYSCALL_EMU)) 162 _TIF_SECCOMP | _TIF_SYSCALL_EMU))
163/* work to do on any return to u-space */ 163/* work to do on any return to u-space */
164#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 164#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
165 165
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 3a544ffc5008..f7a517654308 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -106,6 +106,8 @@ extern unsigned int can_cpei_retarget(void);
106extern unsigned int is_cpu_cpei_target(unsigned int cpu); 106extern unsigned int is_cpu_cpei_target(unsigned int cpu);
107extern void set_cpei_target_cpu(unsigned int cpu); 107extern void set_cpei_target_cpu(unsigned int cpu);
108extern unsigned int get_cpei_target_cpu(void); 108extern unsigned int get_cpei_target_cpu(void);
109extern void prefill_possible_map(void);
110extern int additional_cpus;
109 111
110#ifdef CONFIG_ACPI_NUMA 112#ifdef CONFIG_ACPI_NUMA
111/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ 113/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index e1b6cd63f49e..03d00faf03b5 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License 5 * under the terms of version 2 of the GNU General Public License
@@ -20,11 +20,6 @@
20 * License along with this program; if not, write the Free Software 20 * License along with this program; if not, write the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 * 22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see: 23 * For further information regarding this notice, see:
29 * 24 *
30 * http://oss.sgi.com/projects/GenInfo/NoticeExplan 25 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
index 357ebb780cc0..6ba179f12718 100644
--- a/include/asm-ia64/mman.h
+++ b/include/asm-ia64/mman.h
@@ -8,19 +8,7 @@
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co 8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */ 9 */
10 10
11#define PROT_READ 0x1 /* page can be read */ 11#include <asm-generic/mman.h>
12#define PROT_WRITE 0x2 /* page can be written */
13#define PROT_EXEC 0x4 /* page can be executed */
14#define PROT_SEM 0x8 /* page may be used for atomic ops */
15#define PROT_NONE 0x0 /* page can not be accessed */
16#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
17#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
18
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */
24 12
25#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ 13#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
26#define MAP_GROWSUP 0x00200 /* register stack-like segment */ 14#define MAP_GROWSUP 0x00200 /* register stack-like segment */
@@ -31,24 +19,7 @@
31#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ 19#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
32#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 20#define MAP_NONBLOCK 0x10000 /* do not block on IO */
33 21
34#define MS_ASYNC 1 /* sync memory asynchronously */
35#define MS_INVALIDATE 2 /* invalidate the caches */
36#define MS_SYNC 4 /* synchronous memory sync */
37
38#define MCL_CURRENT 1 /* lock all current mappings */ 22#define MCL_CURRENT 1 /* lock all current mappings */
39#define MCL_FUTURE 2 /* lock all future mappings */ 23#define MCL_FUTURE 2 /* lock all future mappings */
40 24
41#define MADV_NORMAL 0x0 /* default page-in behavior */
42#define MADV_RANDOM 0x1 /* page-in minimum required */
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
47#define MADV_DONTFORK 0x30 /* dont inherit across fork */
48#define MADV_DOFORK 0x31 /* do inherit across fork */
49
50/* compatibility flags */
51#define MAP_ANON MAP_ANONYMOUS
52#define MAP_FILE 0
53
54#endif /* _ASM_IA64_MMAN_H */ 25#endif /* _ASM_IA64_MMAN_H */
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h
index 1a3831c04af6..91c31be87b13 100644
--- a/include/asm-ia64/sn/arch.h
+++ b/include/asm-ia64/sn/arch.h
@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
70 * Compact node ID to nasid mappings kept in the per-cpu data areas of each 70 * Compact node ID to nasid mappings kept in the per-cpu data areas of each
71 * cpu. 71 * cpu.
72 */ 72 */
73DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); 73DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
74#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) 74#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
75 75
76 76
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
index 01e5b4103235..5335d87ca5f8 100644
--- a/include/asm-ia64/sn/bte.h
+++ b/include/asm-ia64/sn/bte.h
@@ -46,7 +46,7 @@
46#define BTES_PER_NODE (is_shub2() ? 4 : 2) 46#define BTES_PER_NODE (is_shub2() ? 4 : 2)
47#define MAX_BTES_PER_NODE 4 47#define MAX_BTES_PER_NODE 4
48 48
49#define BTE2OFF_CTRL (0) 49#define BTE2OFF_CTRL 0
50#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0) 50#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
51#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0) 51#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
52#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0) 52#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
@@ -75,11 +75,11 @@
75 : base + (BTEOFF_NOTIFY/8)) 75 : base + (BTEOFF_NOTIFY/8))
76 76
77/* Define hardware modes */ 77/* Define hardware modes */
78#define BTE_NOTIFY (IBCT_NOTIFY) 78#define BTE_NOTIFY IBCT_NOTIFY
79#define BTE_NORMAL BTE_NOTIFY 79#define BTE_NORMAL BTE_NOTIFY
80#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) 80#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
81/* Use a reserved bit to let the caller specify a wait for any BTE */ 81/* Use a reserved bit to let the caller specify a wait for any BTE */
82#define BTE_WACQUIRE (0x4000) 82#define BTE_WACQUIRE 0x4000
83/* Use the BTE on the node with the destination memory */ 83/* Use the BTE on the node with the destination memory */
84#define BTE_USE_DEST (BTE_WACQUIRE << 1) 84#define BTE_USE_DEST (BTE_WACQUIRE << 1)
85/* Use any available BTE interface on any node for the transfer */ 85/* Use any available BTE interface on any node for the transfer */
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h
index 9334078b089a..a601d3af39b6 100644
--- a/include/asm-ia64/sn/pcibr_provider.h
+++ b/include/asm-ia64/sn/pcibr_provider.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H 8#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
9#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H 9#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
@@ -115,18 +115,6 @@ struct pcibus_info {
115 spinlock_t pbi_lock; 115 spinlock_t pbi_lock;
116}; 116};
117 117
118/*
119 * pcibus_info structure locking macros
120 */
121inline static unsigned long
122pcibr_lock(struct pcibus_info *pcibus_info)
123{
124 unsigned long flag;
125 spin_lock_irqsave(&pcibus_info->pbi_lock, flag);
126 return(flag);
127}
128#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
129
130extern int pcibr_init_provider(void); 118extern int pcibr_init_provider(void);
131extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); 119extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
132extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); 120extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h
index 9ca642cad338..ff33e3bd3f8e 100644
--- a/include/asm-ia64/sn/sn_feature_sets.h
+++ b/include/asm-ia64/sn/sn_feature_sets.h
@@ -12,9 +12,6 @@
12 */ 12 */
13 13
14 14
15#include <asm/types.h>
16#include <asm/bitops.h>
17
18/* --------------------- PROM Features -----------------------------*/ 15/* --------------------- PROM Features -----------------------------*/
19extern int sn_prom_feature_available(int id); 16extern int sn_prom_feature_available(int id);
20 17
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 0c36928ffd8b..df7f5f4f3cde 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -508,19 +508,24 @@ struct xpc_channel {
508#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ 508#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
509 509
510#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ 510#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
511#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ 511#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
512#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ 512#define XPC_C_CONNECTEDCALLOUT_MADE \
513#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ 513 0x00000080 /* connected callout completed */
514 514#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
515#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ 515#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
516#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ 516
517#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ 517#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
518#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ 518#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
519 519#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
520#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ 520#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
521#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ 521
522#define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */ 522#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
523#define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */ 523#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
524#define XPC_C_DISCONNECTINGCALLOUT \
525 0x00010000 /* disconnecting callout initiated */
526#define XPC_C_DISCONNECTINGCALLOUT_MADE \
527 0x00020000 /* disconnecting callout completed */
528#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
524 529
525 530
526 531
diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h
index 414aae060440..05a6baf8a472 100644
--- a/include/asm-ia64/timex.h
+++ b/include/asm-ia64/timex.h
@@ -15,6 +15,8 @@
15 15
16typedef unsigned long cycles_t; 16typedef unsigned long cycles_t;
17 17
18extern void (*ia64_udelay)(unsigned long usecs);
19
18/* 20/*
19 * For performance reasons, we don't want to define CLOCK_TICK_TRATE as 21 * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
20 * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George 22 * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h
index 6b02fe3fcff2..695a860c024f 100644
--- a/include/asm-m32r/mman.h
+++ b/include/asm-m32r/mman.h
@@ -1,21 +1,9 @@
1#ifndef __M32R_MMAN_H__ 1#ifndef __M32R_MMAN_H__
2#define __M32R_MMAN_H__ 2#define __M32R_MMAN_H__
3 3
4/* orig : i386 2.6.0-test6 */ 4#include <asm-generic/mman.h>
5
6#define PROT_READ 0x1 /* page can be read */
7#define PROT_WRITE 0x2 /* page can be written */
8#define PROT_EXEC 0x4 /* page can be executed */
9#define PROT_SEM 0x8 /* page may be used for atomic ops */
10#define PROT_NONE 0x0 /* page can not be accessed */
11#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
12#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
13 5
14#define MAP_SHARED 0x01 /* Share changes */ 6/* orig : i386 2.6.0-test6 */
15#define MAP_PRIVATE 0x02 /* Changes are private */
16#define MAP_TYPE 0x0f /* Mask for type of mapping */
17#define MAP_FIXED 0x10 /* Interpret addr exactly */
18#define MAP_ANONYMOUS 0x20 /* don't use a file */
19 7
20#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
21#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -25,24 +13,7 @@
25#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
26#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
27 15
28#define MS_ASYNC 1 /* sync memory asynchronously */
29#define MS_INVALIDATE 2 /* invalidate the caches */
30#define MS_SYNC 4 /* synchronous memory sync */
31
32#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
33#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
34 18
35#define MADV_NORMAL 0x0 /* default page-in behavior */
36#define MADV_RANDOM 0x1 /* page-in minimum required */
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
41#define MADV_DONTFORK 0x30 /* dont inherit across fork */
42#define MADV_DOFORK 0x31 /* do inherit across fork */
43
44/* compatibility flags */
45#define MAP_ANON MAP_ANONYMOUS
46#define MAP_FILE 0
47
48#endif /* __M32R_MMAN_H__ */ 19#endif /* __M32R_MMAN_H__ */
diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h
index efd12bc4ccb7..1626d37f4898 100644
--- a/include/asm-m68k/mman.h
+++ b/include/asm-m68k/mman.h
@@ -1,19 +1,7 @@
1#ifndef __M68K_MMAN_H__ 1#ifndef __M68K_MMAN_H__
2#define __M68K_MMAN_H__ 2#define __M68K_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __M68K_MMAN_H__ */ 17#endif /* __M68K_MMAN_H__ */
diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h
index 6d01e26830fa..046cf686bee7 100644
--- a/include/asm-mips/mman.h
+++ b/include/asm-mips/mman.h
@@ -60,17 +60,19 @@
60#define MCL_CURRENT 1 /* lock all current mappings */ 60#define MCL_CURRENT 1 /* lock all current mappings */
61#define MCL_FUTURE 2 /* lock all future mappings */ 61#define MCL_FUTURE 2 /* lock all future mappings */
62 62
63#define MADV_NORMAL 0x0 /* default page-in behavior */ 63#define MADV_NORMAL 0 /* no further special treatment */
64#define MADV_RANDOM 0x1 /* page-in minimum required */ 64#define MADV_RANDOM 1 /* expect random page references */
65#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 65#define MADV_SEQUENTIAL 2 /* expect sequential page references */
66#define MADV_WILLNEED 0x3 /* pre-fault pages */ 66#define MADV_WILLNEED 3 /* will need these pages */
67#define MADV_DONTNEED 0x4 /* discard these pages */ 67#define MADV_DONTNEED 4 /* don't need these pages */
68#define MADV_REMOVE 0x5 /* remove these pages & resources */ 68
69#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 69/* common parameters: try to keep these consistent across architectures */
70#define MADV_DOFORK 0x31 /* do inherit across fork */ 70#define MADV_REMOVE 9 /* remove these pages & resources */
71#define MADV_DONTFORK 10 /* don't inherit across fork */
72#define MADV_DOFORK 11 /* do inherit across fork */
71 73
72/* compatibility flags */ 74/* compatibility flags */
73#define MAP_ANON MAP_ANONYMOUS 75#define MAP_ANON MAP_ANONYMOUS
74#define MAP_FILE 0 76#define MAP_FILE 0
75 77
76#endif /* _ASM_MMAN_H */ 78#endif /* _ASM_MMAN_H */
diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h
index a381cf5c8f55..0ef15ee0f17e 100644
--- a/include/asm-parisc/mman.h
+++ b/include/asm-parisc/mman.h
@@ -38,7 +38,11 @@
38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ 38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ 39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */ 40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
41#define MADV_REMOVE 8 /* remove these pages & resources */ 41
42/* common/generic parameters */
43#define MADV_REMOVE 9 /* remove these pages & resources */
44#define MADV_DONTFORK 10 /* don't inherit across fork */
45#define MADV_DOFORK 11 /* do inherit across fork */
42 46
43/* The range 12-64 is reserved for page size specification. */ 47/* The range 12-64 is reserved for page size specification. */
44#define MADV_4K_PAGES 12 /* Use 4K pages */ 48#define MADV_4K_PAGES 12 /* Use 4K pages */
@@ -49,8 +53,6 @@
49#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ 53#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
50#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ 54#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
51#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ 55#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
52#define MADV_DONTFORK 0x30 /* dont inherit across fork */
53#define MADV_DOFORK 0x31 /* do inherit across fork */
54 56
55/* compatibility flags */ 57/* compatibility flags */
56#define MAP_ANON MAP_ANONYMOUS 58#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h
index fcff25d13f13..24cf664a8295 100644
--- a/include/asm-powerpc/mman.h
+++ b/include/asm-powerpc/mman.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_POWERPC_MMAN_H 1#ifndef _ASM_POWERPC_MMAN_H
2#define _ASM_POWERPC_MMAN_H 2#define _ASM_POWERPC_MMAN_H
3 3
4#include <asm-generic/mman.h>
5
4/* 6/*
5 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -8,19 +10,6 @@
8 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
9 */ 11 */
10 12
11#define PROT_READ 0x1 /* page can be read */
12#define PROT_WRITE 0x2 /* page can be written */
13#define PROT_EXEC 0x4 /* page can be executed */
14#define PROT_SEM 0x8 /* page may be used for atomic ops */
15#define PROT_NONE 0x0 /* page can not be accessed */
16#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
17#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
18
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */
24#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 13#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
25#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 14#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
26#define MAP_LOCKED 0x80 15#define MAP_LOCKED 0x80
@@ -29,27 +18,10 @@
29#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 18#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
30#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 19#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
31 20
32#define MS_ASYNC 1 /* sync memory asynchronously */
33#define MS_INVALIDATE 2 /* invalidate the caches */
34#define MS_SYNC 4 /* synchronous memory sync */
35
36#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 21#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
37#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 22#define MCL_FUTURE 0x4000 /* lock all additions to address space */
38 23
39#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 24#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
40#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 25#define MAP_NONBLOCK 0x10000 /* do not block on IO */
41 26
42#define MADV_NORMAL 0x0 /* default page-in behavior */
43#define MADV_RANDOM 0x1 /* page-in minimum required */
44#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
45#define MADV_WILLNEED 0x3 /* pre-fault pages */
46#define MADV_DONTNEED 0x4 /* discard these pages */
47#define MADV_REMOVE 0x5 /* remove these pages & resources */
48#define MADV_DONTFORK 0x30 /* dont inherit across fork */
49#define MADV_DOFORK 0x31 /* do inherit across fork */
50
51/* compatibility flags */
52#define MAP_ANON MAP_ANONYMOUS
53#define MAP_FILE 0
54
55#endif /* _ASM_POWERPC_MMAN_H */ 27#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index 9f5b052784a5..a00ee002cd11 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -146,7 +146,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
146 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 146 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
147 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 147 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
148#ifndef CONFIG_PPC_64K_PAGES 148#ifndef CONFIG_PPC_64K_PAGES
149#define __pud_free_tlb(tlb, pmd) \ 149#define __pud_free_tlb(tlb, pud) \
150 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 150 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
151 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 151 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
152#endif /* CONFIG_PPC_64K_PAGES */ 152#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h
index d41ca1477010..7839767d837e 100644
--- a/include/asm-s390/mman.h
+++ b/include/asm-s390/mman.h
@@ -9,19 +9,7 @@
9#ifndef __S390_MMAN_H__ 9#ifndef __S390_MMAN_H__
10#define __S390_MMAN_H__ 10#define __S390_MMAN_H__
11 11
12#define PROT_READ 0x1 /* page can be read */ 12#include <asm-generic/mman.h>
13#define PROT_WRITE 0x2 /* page can be written */
14#define PROT_EXEC 0x4 /* page can be executed */
15#define PROT_SEM 0x8 /* page may be used for atomic ops */
16#define PROT_NONE 0x0 /* page can not be accessed */
17#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
18#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
19
20#define MAP_SHARED 0x01 /* Share changes */
21#define MAP_PRIVATE 0x02 /* Changes are private */
22#define MAP_TYPE 0x0f /* Mask for type of mapping */
23#define MAP_FIXED 0x10 /* Interpret addr exactly */
24#define MAP_ANONYMOUS 0x20 /* don't use a file */
25 13
26#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 14#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 15#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -31,24 +19,7 @@
31#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 19#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
32#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 20#define MAP_NONBLOCK 0x10000 /* do not block on IO */
33 21
34#define MS_ASYNC 1 /* sync memory asynchronously */
35#define MS_INVALIDATE 2 /* invalidate the caches */
36#define MS_SYNC 4 /* synchronous memory sync */
37
38#define MCL_CURRENT 1 /* lock all current mappings */ 22#define MCL_CURRENT 1 /* lock all current mappings */
39#define MCL_FUTURE 2 /* lock all future mappings */ 23#define MCL_FUTURE 2 /* lock all future mappings */
40 24
41#define MADV_NORMAL 0x0 /* default page-in behavior */
42#define MADV_RANDOM 0x1 /* page-in minimum required */
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
47#define MADV_DONTFORK 0x30 /* dont inherit across fork */
48#define MADV_DOFORK 0x31 /* do inherit across fork */
49
50/* compatibility flags */
51#define MAP_ANON MAP_ANONYMOUS
52#define MAP_FILE 0
53
54#endif /* __S390_MMAN_H__ */ 25#endif /* __S390_MMAN_H__ */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 9c6e9c300eb9..444dae5912e6 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -31,6 +31,7 @@ typedef struct
31 __u16 cpu; 31 __u16 cpu;
32} sigp_info; 32} sigp_info;
33 33
34extern void smp_setup_cpu_possible_map(void);
34extern int smp_call_function_on(void (*func) (void *info), void *info, 35extern int smp_call_function_on(void (*func) (void *info), void *info,
35 int nonatomic, int wait, int cpu); 36 int nonatomic, int wait, int cpu);
36#define NO_PROC_ID 0xFF /* No processor magic marker */ 37#define NO_PROC_ID 0xFF /* No processor magic marker */
@@ -104,6 +105,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
104#define smp_cpu_not_running(cpu) 1 105#define smp_cpu_not_running(cpu) 1
105#define smp_get_cpu(cpu) ({ 0; }) 106#define smp_get_cpu(cpu) ({ 0; })
106#define smp_put_cpu(cpu) ({ 0; }) 107#define smp_put_cpu(cpu) ({ 0; })
108#define smp_setup_cpu_possible_map()
107#endif 109#endif
108 110
109#endif 111#endif
diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h
index 0e08d0573abc..156eb0225cf6 100644
--- a/include/asm-sh/mman.h
+++ b/include/asm-sh/mman.h
@@ -1,19 +1,7 @@
1#ifndef __ASM_SH_MMAN_H 1#ifndef __ASM_SH_MMAN_H
2#define __ASM_SH_MMAN_H 2#define __ASM_SH_MMAN_H
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17 5
18#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
19#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -23,24 +11,7 @@
23#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */ 11#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
24#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 12#define MAP_NONBLOCK 0x10000 /* do not block on IO */
25 13
26#define MS_ASYNC 1 /* sync memory asynchronously */
27#define MS_INVALIDATE 2 /* invalidate the caches */
28#define MS_SYNC 4 /* synchronous memory sync */
29
30#define MCL_CURRENT 1 /* lock all current mappings */ 14#define MCL_CURRENT 1 /* lock all current mappings */
31#define MCL_FUTURE 2 /* lock all future mappings */ 15#define MCL_FUTURE 2 /* lock all future mappings */
32 16
33#define MADV_NORMAL 0x0 /* default page-in behavior */
34#define MADV_RANDOM 0x1 /* page-in minimum required */
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
39#define MADV_DONTFORK 0x30 /* dont inherit across fork */
40#define MADV_DOFORK 0x31 /* do inherit across fork */
41
42/* compatibility flags */
43#define MAP_ANON MAP_ANONYMOUS
44#define MAP_FILE 0
45
46#endif /* __ASM_SH_MMAN_H */ 17#endif /* __ASM_SH_MMAN_H */
diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h
index 4a298b2be859..88d1886abf3b 100644
--- a/include/asm-sparc/mman.h
+++ b/include/asm-sparc/mman.h
@@ -2,21 +2,10 @@
2#ifndef __SPARC_MMAN_H__ 2#ifndef __SPARC_MMAN_H__
3#define __SPARC_MMAN_H__ 3#define __SPARC_MMAN_H__
4 4
5/* SunOS'ified... */ 5#include <asm-generic/mman.h>
6 6
7#define PROT_READ 0x1 /* page can be read */ 7/* SunOS'ified... */
8#define PROT_WRITE 0x2 /* page can be written */
9#define PROT_EXEC 0x4 /* page can be executed */
10#define PROT_SEM 0x8 /* page may be used for atomic ops */
11#define PROT_NONE 0x0 /* page can not be accessed */
12#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
13#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
14 8
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_TYPE 0x0f /* Mask for type of mapping */
18#define MAP_FIXED 0x10 /* Interpret addr exactly */
19#define MAP_ANONYMOUS 0x20 /* don't use a file */
20#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 9#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
21#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 10#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
22#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ 11#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
@@ -27,10 +16,6 @@
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 16#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
28#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 17#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
29 18
30#define MS_ASYNC 1 /* sync memory asynchronously */
31#define MS_INVALIDATE 2 /* invalidate the caches */
32#define MS_SYNC 4 /* synchronous memory sync */
33
34#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 19#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
35#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 20#define MCL_FUTURE 0x4000 /* lock all additions to address space */
36 21
@@ -48,18 +33,6 @@
48#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ 33#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */
49#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ 34#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */
50 35
51#define MADV_NORMAL 0x0 /* default page-in behavior */
52#define MADV_RANDOM 0x1 /* page-in minimum required */
53#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 36#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
58#define MADV_DONTFORK 0x30 /* dont inherit across fork */
59#define MADV_DOFORK 0x31 /* do inherit across fork */
60
61/* compatibility flags */
62#define MAP_ANON MAP_ANONYMOUS
63#define MAP_FILE 0
64 37
65#endif /* __SPARC_MMAN_H__ */ 38#endif /* __SPARC_MMAN_H__ */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index d705ec92da8b..6fd878e61435 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -2,21 +2,10 @@
2#ifndef __SPARC64_MMAN_H__ 2#ifndef __SPARC64_MMAN_H__
3#define __SPARC64_MMAN_H__ 3#define __SPARC64_MMAN_H__
4 4
5/* SunOS'ified... */ 5#include <asm-generic/mman.h>
6 6
7#define PROT_READ 0x1 /* page can be read */ 7/* SunOS'ified... */
8#define PROT_WRITE 0x2 /* page can be written */
9#define PROT_EXEC 0x4 /* page can be executed */
10#define PROT_SEM 0x8 /* page may be used for atomic ops */
11#define PROT_NONE 0x0 /* page can not be accessed */
12#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
13#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
14 8
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_TYPE 0x0f /* Mask for type of mapping */
18#define MAP_FIXED 0x10 /* Interpret addr exactly */
19#define MAP_ANONYMOUS 0x20 /* don't use a file */
20#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 9#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
21#define MAP_NORESERVE 0x40 /* don't reserve swap pages */ 10#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
22#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ 11#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
@@ -27,10 +16,6 @@
27#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 16#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
28#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 17#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
29 18
30#define MS_ASYNC 1 /* sync memory asynchronously */
31#define MS_INVALIDATE 2 /* invalidate the caches */
32#define MS_SYNC 4 /* synchronous memory sync */
33
34#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */ 19#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
35#define MCL_FUTURE 0x4000 /* lock all additions to address space */ 20#define MCL_FUTURE 0x4000 /* lock all additions to address space */
36 21
@@ -48,18 +33,6 @@
48#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */ 33#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */
49#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */ 34#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */
50 35
51#define MADV_NORMAL 0x0 /* default page-in behavior */
52#define MADV_RANDOM 0x1 /* page-in minimum required */
53#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 36#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
58#define MADV_DONTFORK 0x30 /* dont inherit across fork */
59#define MADV_DOFORK 0x31 /* do inherit across fork */
60
61/* compatibility flags */
62#define MAP_ANON MAP_ANONYMOUS
63#define MAP_FILE 0
64 37
65#endif /* __SPARC64_MMAN_H__ */ 38#endif /* __SPARC64_MMAN_H__ */
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h
index 7b851c310e41..edbf6edbfb37 100644
--- a/include/asm-v850/mman.h
+++ b/include/asm-v850/mman.h
@@ -1,18 +1,7 @@
1#ifndef __V850_MMAN_H__ 1#ifndef __V850_MMAN_H__
2#define __V850_MMAN_H__ 2#define __V850_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_NONE 0x0 /* page can not be accessed */
8#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
9#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
10
11#define MAP_SHARED 0x01 /* Share changes */
12#define MAP_PRIVATE 0x02 /* Changes are private */
13#define MAP_TYPE 0x0f /* Mask for type of mapping */
14#define MAP_FIXED 0x10 /* Interpret addr exactly */
15#define MAP_ANONYMOUS 0x20 /* don't use a file */
16 5
17#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
18#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 7#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
@@ -20,24 +9,7 @@
20#define MAP_LOCKED 0x2000 /* pages are locked */ 9#define MAP_LOCKED 0x2000 /* pages are locked */
21#define MAP_NORESERVE 0x4000 /* don't check for reservations */ 10#define MAP_NORESERVE 0x4000 /* don't check for reservations */
22 11
23#define MS_ASYNC 1 /* sync memory asynchronously */
24#define MS_INVALIDATE 2 /* invalidate the caches */
25#define MS_SYNC 4 /* synchronous memory sync */
26
27#define MCL_CURRENT 1 /* lock all current mappings */ 12#define MCL_CURRENT 1 /* lock all current mappings */
28#define MCL_FUTURE 2 /* lock all future mappings */ 13#define MCL_FUTURE 2 /* lock all future mappings */
29 14
30#define MADV_NORMAL 0x0 /* default page-in behavior */
31#define MADV_RANDOM 0x1 /* page-in minimum required */
32#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
33#define MADV_WILLNEED 0x3 /* pre-fault pages */
34#define MADV_DONTNEED 0x4 /* discard these pages */
35#define MADV_REMOVE 0x5 /* remove these pages & resources */
36#define MADV_DONTFORK 0x30 /* dont inherit across fork */
37#define MADV_DOFORK 0x31 /* do inherit across fork */
38
39/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS
41#define MAP_FILE 0
42
43#endif /* __V850_MMAN_H__ */ 15#endif /* __V850_MMAN_H__ */
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index b699a38c1c3c..dd5cb0534d37 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -1,19 +1,8 @@
1#ifndef __X8664_MMAN_H__ 1#ifndef __X8664_MMAN_H__
2#define __X8664_MMAN_H__ 2#define __X8664_MMAN_H__
3 3
4#define PROT_READ 0x1 /* page can be read */ 4#include <asm-generic/mman.h>
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_NONE 0x0 /* page can not be accessed */
8#define PROT_SEM 0x8
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11 5
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping */
15#define MAP_FIXED 0x10 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x20 /* don't use a file */
17#define MAP_32BIT 0x40 /* only give out 32bit addresses */ 6#define MAP_32BIT 0x40 /* only give out 32bit addresses */
18 7
19#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
@@ -24,24 +13,7 @@
24#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
25#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
26 15
27#define MS_ASYNC 1 /* sync memory asynchronously */
28#define MS_INVALIDATE 2 /* invalidate the caches */
29#define MS_SYNC 4 /* synchronous memory sync */
30
31#define MCL_CURRENT 1 /* lock all current mappings */ 16#define MCL_CURRENT 1 /* lock all current mappings */
32#define MCL_FUTURE 2 /* lock all future mappings */ 17#define MCL_FUTURE 2 /* lock all future mappings */
33 18
34#define MADV_NORMAL 0x0 /* default page-in behavior */
35#define MADV_RANDOM 0x1 /* page-in minimum required */
36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
37#define MADV_WILLNEED 0x3 /* pre-fault pages */
38#define MADV_DONTNEED 0x4 /* discard these pages */
39#define MADV_REMOVE 0x5 /* remove these pages & resources */
40#define MADV_DONTFORK 0x30 /* dont inherit across fork */
41#define MADV_DOFORK 0x31 /* do inherit across fork */
42
43/* compatibility flags */
44#define MAP_ANON MAP_ANONYMOUS
45#define MAP_FILE 0
46
47#endif 19#endif
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index c99832e7bf3f..eca3f2d633db 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -133,6 +133,7 @@ extern int fix_aperture;
133extern int force_iommu; 133extern int force_iommu;
134 134
135extern int reboot_force; 135extern int reboot_force;
136extern int notsc_setup(char *);
136 137
137extern void smp_local_timer_interrupt(struct pt_regs * regs); 138extern void smp_local_timer_interrupt(struct pt_regs * regs);
138 139
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h
index e2d7afb679c8..ba394cbb4807 100644
--- a/include/asm-xtensa/mman.h
+++ b/include/asm-xtensa/mman.h
@@ -67,17 +67,19 @@
67#define MCL_CURRENT 1 /* lock all current mappings */ 67#define MCL_CURRENT 1 /* lock all current mappings */
68#define MCL_FUTURE 2 /* lock all future mappings */ 68#define MCL_FUTURE 2 /* lock all future mappings */
69 69
70#define MADV_NORMAL 0x0 /* default page-in behavior */ 70#define MADV_NORMAL 0 /* no further special treatment */
71#define MADV_RANDOM 0x1 /* page-in minimum required */ 71#define MADV_RANDOM 1 /* expect random page references */
72#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 72#define MADV_SEQUENTIAL 2 /* expect sequential page references */
73#define MADV_WILLNEED 0x3 /* pre-fault pages */ 73#define MADV_WILLNEED 3 /* will need these pages */
74#define MADV_DONTNEED 0x4 /* discard these pages */ 74#define MADV_DONTNEED 4 /* don't need these pages */
75#define MADV_REMOVE 0x5 /* remove these pages & resources */ 75
76#define MADV_DONTFORK 0x30 /* dont inherit across fork */ 76/* common parameters: try to keep these consistent across architectures */
77#define MADV_DOFORK 0x31 /* do inherit across fork */ 77#define MADV_REMOVE 9 /* remove these pages & resources */
78#define MADV_DONTFORK 10 /* don't inherit across fork */
79#define MADV_DOFORK 11 /* do inherit across fork */
78 80
79/* compatibility flags */ 81/* compatibility flags */
80#define MAP_ANON MAP_ANONYMOUS 82#define MAP_ANON MAP_ANONYMOUS
81#define MAP_FILE 0 83#define MAP_FILE 0
82 84
83#endif /* _XTENSA_MMAN_H */ 85#endif /* _XTENSA_MMAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b49affa0ac5a..3b507bf05d09 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -326,12 +326,6 @@ struct sysinfo {
326/* Force a compilation error if condition is true */ 326/* Force a compilation error if condition is true */
327#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) 327#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
328 328
329#ifdef CONFIG_SYSCTL
330extern int randomize_va_space;
331#else
332#define randomize_va_space 1
333#endif
334
335/* Trap pasters of __FUNCTION__ at compile-time */ 329/* Trap pasters of __FUNCTION__ at compile-time */
336#define __FUNCTION__ (__func__) 330#define __FUNCTION__ (__func__)
337 331
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 6aca67a569a2..f3dec45ef874 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -96,10 +96,16 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
96 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) 96 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
97 97
98/* convert a timespec to ktime_t format: */ 98/* convert a timespec to ktime_t format: */
99#define timespec_to_ktime(ts) ktime_set((ts).tv_sec, (ts).tv_nsec) 99static inline ktime_t timespec_to_ktime(struct timespec ts)
100{
101 return ktime_set(ts.tv_sec, ts.tv_nsec);
102}
100 103
101/* convert a timeval to ktime_t format: */ 104/* convert a timeval to ktime_t format: */
102#define timeval_to_ktime(tv) ktime_set((tv).tv_sec, (tv).tv_usec * 1000) 105static inline ktime_t timeval_to_ktime(struct timeval tv)
106{
107 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
108}
103 109
104/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 110/* Map the ktime_t to timespec conversion to ns_to_timespec function */
105#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 111#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75e9f0724997..26e1663a5cbe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1051,5 +1051,7 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1051void drop_pagecache(void); 1051void drop_pagecache(void);
1052void drop_slab(void); 1052void drop_slab(void);
1053 1053
1054extern int randomize_va_space;
1055
1054#endif /* __KERNEL__ */ 1056#endif /* __KERNEL__ */
1055#endif /* _LINUX_MM_H */ 1057#endif /* _LINUX_MM_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 4cf6088625c1..468896939843 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -184,8 +184,11 @@ static inline int nf_hook_thresh(int pf, unsigned int hook,
184 struct sk_buff **pskb, 184 struct sk_buff **pskb,
185 struct net_device *indev, 185 struct net_device *indev,
186 struct net_device *outdev, 186 struct net_device *outdev,
187 int (*okfn)(struct sk_buff *), int thresh) 187 int (*okfn)(struct sk_buff *), int thresh,
188 int cond)
188{ 189{
190 if (!cond)
191 return 1;
189#ifndef CONFIG_NETFILTER_DEBUG 192#ifndef CONFIG_NETFILTER_DEBUG
190 if (list_empty(&nf_hooks[pf][hook])) 193 if (list_empty(&nf_hooks[pf][hook]))
191 return 1; 194 return 1;
@@ -197,7 +200,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
197 struct net_device *indev, struct net_device *outdev, 200 struct net_device *indev, struct net_device *outdev,
198 int (*okfn)(struct sk_buff *)) 201 int (*okfn)(struct sk_buff *))
199{ 202{
200 return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN); 203 return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN, 1);
201} 204}
202 205
203/* Activate hook; either okfn or kfree_skb called, unless a hook 206/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -224,7 +227,13 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
224 227
225#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \ 228#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
226({int __ret; \ 229({int __ret; \
227if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1)\ 230if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh, 1)) == 1)\
231 __ret = (okfn)(skb); \
232__ret;})
233
234#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) \
235({int __ret; \
236if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, INT_MIN, cond)) == 1)\
228 __ret = (okfn)(skb); \ 237 __ret = (okfn)(skb); \
229__ret;}) 238__ret;})
230 239
@@ -295,11 +304,13 @@ extern struct proc_dir_entry *proc_net_netfilter;
295 304
296#else /* !CONFIG_NETFILTER */ 305#else /* !CONFIG_NETFILTER */
297#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) 306#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
307#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
298static inline int nf_hook_thresh(int pf, unsigned int hook, 308static inline int nf_hook_thresh(int pf, unsigned int hook,
299 struct sk_buff **pskb, 309 struct sk_buff **pskb,
300 struct net_device *indev, 310 struct net_device *indev,
301 struct net_device *outdev, 311 struct net_device *outdev,
302 int (*okfn)(struct sk_buff *), int thresh) 312 int (*okfn)(struct sk_buff *), int thresh,
313 int cond)
303{ 314{
304 return okfn(*pskb); 315 return okfn(*pskb);
305} 316}
@@ -307,7 +318,7 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
307 struct net_device *indev, struct net_device *outdev, 318 struct net_device *indev, struct net_device *outdev,
308 int (*okfn)(struct sk_buff *)) 319 int (*okfn)(struct sk_buff *))
309{ 320{
310 return okfn(*pskb); 321 return 1;
311} 322}
312static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 323static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
313struct flowi; 324struct flowi;
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 04a4a8cb4ed3..b7ca1204e42a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -345,6 +345,9 @@ time_interpolator_reset(void)
345 345
346#endif /* !CONFIG_TIME_INTERPOLATION */ 346#endif /* !CONFIG_TIME_INTERPOLATION */
347 347
348/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
349extern u64 current_tick_length(void);
350
348#endif /* KERNEL */ 351#endif /* KERNEL */
349 352
350#endif /* LINUX_TIMEX_H */ 353#endif /* LINUX_TIMEX_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 8de0697b364c..fab3d5b3ab1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -41,6 +41,7 @@ struct inet_skb_parm
41#define IPSKB_XFRM_TUNNEL_SIZE 2 41#define IPSKB_XFRM_TUNNEL_SIZE 2
42#define IPSKB_XFRM_TRANSFORMED 4 42#define IPSKB_XFRM_TRANSFORMED 4
43#define IPSKB_FRAG_COMPLETE 8 43#define IPSKB_FRAG_COMPLETE 8
44#define IPSKB_REROUTED 16
44}; 45};
45 46
46struct ipcm_cookie 47struct ipcm_cookie
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d09ca0e7d139..d6111a2f0a23 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -866,7 +866,6 @@ extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
866extern int xfrm_init_state(struct xfrm_state *x); 866extern int xfrm_init_state(struct xfrm_state *x);
867extern int xfrm4_rcv(struct sk_buff *skb); 867extern int xfrm4_rcv(struct sk_buff *skb);
868extern int xfrm4_output(struct sk_buff *skb); 868extern int xfrm4_output(struct sk_buff *skb);
869extern int xfrm4_output_finish(struct sk_buff *skb);
870extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler); 869extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
871extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler); 870extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
872extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi); 871extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 3e5cb5ab2d34..e5618b90996e 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -163,9 +163,6 @@ enum iscsi_param {
163}; 163};
164#define ISCSI_PARAM_MAX 14 164#define ISCSI_PARAM_MAX 14
165 165
166typedef uint64_t iscsi_sessionh_t; /* iSCSI Data-Path session handle */
167typedef uint64_t iscsi_connh_t; /* iSCSI Data-Path connection handle */
168
169#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 166#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
170#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 167#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
171#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata)) 168#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index c60b8ff2f5e4..9c331258bc27 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -433,4 +433,6 @@ struct scsi_lun {
433/* Used to obtain the PCI location of a device */ 433/* Used to obtain the PCI location of a device */
434#define SCSI_IOCTL_GET_PCI 0x5387 434#define SCSI_IOCTL_GET_PCI 0x5387
435 435
436int scsi_execute_in_process_context(void (*fn)(void *data), void *data);
437
436#endif /* _SCSI_SCSI_H */ 438#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 16602a547a63..b41cf077e54b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -63,25 +63,28 @@ struct iscsi_transport {
63 int max_lun; 63 int max_lun;
64 unsigned int max_conn; 64 unsigned int max_conn;
65 unsigned int max_cmd_len; 65 unsigned int max_cmd_len;
66 struct Scsi_Host *(*create_session) (struct scsi_transport_template *t, 66 struct iscsi_cls_session *(*create_session)
67 uint32_t initial_cmdsn); 67 (struct scsi_transport_template *t, uint32_t sn, uint32_t *sid);
68 void (*destroy_session) (struct Scsi_Host *shost); 68 void (*destroy_session) (struct iscsi_cls_session *session);
69 struct iscsi_cls_conn *(*create_conn) (struct Scsi_Host *shost, 69 struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
70 uint32_t cid); 70 uint32_t cid);
71 int (*bind_conn) (iscsi_sessionh_t session, iscsi_connh_t conn, 71 int (*bind_conn) (struct iscsi_cls_session *session,
72 struct iscsi_cls_conn *cls_conn,
72 uint32_t transport_fd, int is_leading); 73 uint32_t transport_fd, int is_leading);
73 int (*start_conn) (iscsi_connh_t conn); 74 int (*start_conn) (struct iscsi_cls_conn *conn);
74 void (*stop_conn) (iscsi_connh_t conn, int flag); 75 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag);
75 void (*destroy_conn) (struct iscsi_cls_conn *conn); 76 void (*destroy_conn) (struct iscsi_cls_conn *conn);
76 int (*set_param) (iscsi_connh_t conn, enum iscsi_param param, 77 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
77 uint32_t value); 78 uint32_t value);
78 int (*get_conn_param) (void *conndata, enum iscsi_param param, 79 int (*get_conn_param) (struct iscsi_cls_conn *conn,
80 enum iscsi_param param,
79 uint32_t *value); 81 uint32_t *value);
80 int (*get_session_param) (struct Scsi_Host *shost, 82 int (*get_session_param) (struct iscsi_cls_session *session,
81 enum iscsi_param param, uint32_t *value); 83 enum iscsi_param param, uint32_t *value);
82 int (*send_pdu) (iscsi_connh_t conn, struct iscsi_hdr *hdr, 84 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
83 char *data, uint32_t data_size); 85 char *data, uint32_t data_size);
84 void (*get_stats) (iscsi_connh_t conn, struct iscsi_stats *stats); 86 void (*get_stats) (struct iscsi_cls_conn *conn,
87 struct iscsi_stats *stats);
85}; 88};
86 89
87/* 90/*
@@ -93,15 +96,14 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt);
93/* 96/*
94 * control plane upcalls 97 * control plane upcalls
95 */ 98 */
96extern void iscsi_conn_error(iscsi_connh_t conn, enum iscsi_err error); 99extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
97extern int iscsi_recv_pdu(iscsi_connh_t conn, struct iscsi_hdr *hdr, 100extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
98 char *data, uint32_t data_size); 101 char *data, uint32_t data_size);
99 102
100struct iscsi_cls_conn { 103struct iscsi_cls_conn {
101 struct list_head conn_list; /* item in connlist */ 104 struct list_head conn_list; /* item in connlist */
102 void *dd_data; /* LLD private data */ 105 void *dd_data; /* LLD private data */
103 struct iscsi_transport *transport; 106 struct iscsi_transport *transport;
104 iscsi_connh_t connh;
105 int active; /* must be accessed with the connlock */ 107 int active; /* must be accessed with the connlock */
106 struct device dev; /* sysfs transport/container device */ 108 struct device dev; /* sysfs transport/container device */
107 struct mempool_zone *z_error; 109 struct mempool_zone *z_error;
@@ -113,7 +115,7 @@ struct iscsi_cls_conn {
113 container_of(_dev, struct iscsi_cls_conn, dev) 115 container_of(_dev, struct iscsi_cls_conn, dev)
114 116
115struct iscsi_cls_session { 117struct iscsi_cls_session {
116 struct list_head list; /* item in session_list */ 118 struct list_head sess_list; /* item in session_list */
117 struct iscsi_transport *transport; 119 struct iscsi_transport *transport;
118 struct device dev; /* sysfs transport/container device */ 120 struct device dev; /* sysfs transport/container device */
119}; 121};
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index 1d69049bd4c1..78b1f15a538f 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -159,6 +159,7 @@ struct neofb_par {
159 unsigned char PanelDispCntlReg1; 159 unsigned char PanelDispCntlReg1;
160 unsigned char PanelDispCntlReg2; 160 unsigned char PanelDispCntlReg2;
161 unsigned char PanelDispCntlReg3; 161 unsigned char PanelDispCntlReg3;
162 unsigned char PanelDispCntlRegRead;
162 unsigned char PanelVertCenterReg1; 163 unsigned char PanelVertCenterReg1;
163 unsigned char PanelVertCenterReg2; 164 unsigned char PanelVertCenterReg2;
164 unsigned char PanelVertCenterReg3; 165 unsigned char PanelVertCenterReg3;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ba42b0a76961..12815d3f1a05 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1977,6 +1977,39 @@ void cpuset_fork(struct task_struct *child)
1977 * We don't need to task_lock() this reference to tsk->cpuset, 1977 * We don't need to task_lock() this reference to tsk->cpuset,
1978 * because tsk is already marked PF_EXITING, so attach_task() won't 1978 * because tsk is already marked PF_EXITING, so attach_task() won't
1979 * mess with it, or task is a failed fork, never visible to attach_task. 1979 * mess with it, or task is a failed fork, never visible to attach_task.
1980 *
1981 * Hack:
1982 *
1983 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
1984 *
1985 * Don't leave a task unable to allocate memory, as that is an
1986 * accident waiting to happen should someone add a callout in
1987 * do_exit() after the cpuset_exit() call that might allocate.
1988 * If a task tries to allocate memory with an invalid cpuset,
1989 * it will oops in cpuset_update_task_memory_state().
1990 *
1991 * We call cpuset_exit() while the task is still competent to
1992 * handle notify_on_release(), then leave the task attached to
1993 * the root cpuset (top_cpuset) for the remainder of its exit.
1994 *
1995 * To do this properly, we would increment the reference count on
1996 * top_cpuset, and near the very end of the kernel/exit.c do_exit()
1997 * code we would add a second cpuset function call, to drop that
1998 * reference. This would just create an unnecessary hot spot on
1999 * the top_cpuset reference count, to no avail.
2000 *
2001 * Normally, holding a reference to a cpuset without bumping its
2002 * count is unsafe. The cpuset could go away, or someone could
2003 * attach us to a different cpuset, decrementing the count on
2004 * the first cpuset that we never incremented. But in this case,
2005 * top_cpuset isn't going away, and either task has PF_EXITING set,
2006 * which wards off any attach_task() attempts, or task is a failed
2007 * fork, never visible to attach_task.
2008 *
2009 * Another way to do this would be to set the cpuset pointer
2010 * to NULL here, and check in cpuset_update_task_memory_state()
2011 * for a NULL pointer. This hack avoids that NULL check, for no
2012 * cost (other than this way too long comment ;).
1980 **/ 2013 **/
1981 2014
1982void cpuset_exit(struct task_struct *tsk) 2015void cpuset_exit(struct task_struct *tsk)
@@ -1984,7 +2017,7 @@ void cpuset_exit(struct task_struct *tsk)
1984 struct cpuset *cs; 2017 struct cpuset *cs;
1985 2018
1986 cs = tsk->cpuset; 2019 cs = tsk->cpuset;
1987 tsk->cpuset = NULL; 2020 tsk->cpuset = &top_cpuset; /* Hack - see comment above */
1988 2021
1989 if (notify_on_release(cs)) { 2022 if (notify_on_release(cs)) {
1990 char *pathbuf = NULL; 2023 char *pathbuf = NULL;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 41f66365f0d8..8d5a5986d621 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -91,10 +91,8 @@ static int save_highmem_zone(struct zone *zone)
91 * corrected eventually when the cases giving rise to this 91 * corrected eventually when the cases giving rise to this
92 * are better understood. 92 * are better understood.
93 */ 93 */
94 if (PageReserved(page)) { 94 if (PageReserved(page))
95 printk("highmem reserved page?!\n");
96 continue; 95 continue;
97 }
98 BUG_ON(PageNosave(page)); 96 BUG_ON(PageNosave(page));
99 if (PageNosaveFree(page)) 97 if (PageNosaveFree(page))
100 continue; 98 continue;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 4e90905f0e87..2d9d08f72f76 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -153,13 +153,11 @@ static int swsusp_swap_check(void) /* This is called before saving image */
153{ 153{
154 int i; 154 int i;
155 155
156 if (!swsusp_resume_device)
157 return -ENODEV;
158 spin_lock(&swap_lock); 156 spin_lock(&swap_lock);
159 for (i = 0; i < MAX_SWAPFILES; i++) { 157 for (i = 0; i < MAX_SWAPFILES; i++) {
160 if (!(swap_info[i].flags & SWP_WRITEOK)) 158 if (!(swap_info[i].flags & SWP_WRITEOK))
161 continue; 159 continue;
162 if (is_resume_device(swap_info + i)) { 160 if (!swsusp_resume_device || is_resume_device(swap_info + i)) {
163 spin_unlock(&swap_lock); 161 spin_unlock(&swap_lock);
164 root_swap = i; 162 root_swap = i;
165 return 0; 163 return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 66d957227de9..12d291bf3379 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5058,7 +5058,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5058#define MAX_DOMAIN_DISTANCE 32 5058#define MAX_DOMAIN_DISTANCE 32
5059 5059
5060static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = 5060static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
5061 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL }; 5061 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
5062/*
5063 * Architectures may override the migration cost and thus avoid
5064 * boot-time calibration. Unit is nanoseconds. Mostly useful for
5065 * virtualized hardware:
5066 */
5067#ifdef CONFIG_DEFAULT_MIGRATION_COST
5068 CONFIG_DEFAULT_MIGRATION_COST
5069#else
5070 -1LL
5071#endif
5072};
5062 5073
5063/* 5074/*
5064 * Allow override of migration cost - in units of microseconds. 5075 * Allow override of migration cost - in units of microseconds.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 71dd6f62efec..7654d55c47f5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -126,8 +126,6 @@ extern int sysctl_hz_timer;
126extern int acct_parm[]; 126extern int acct_parm[];
127#endif 127#endif
128 128
129int randomize_va_space = 1;
130
131static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 129static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
132 ctl_table *, void **); 130 ctl_table *, void **);
133static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 131static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
diff --git a/kernel/timer.c b/kernel/timer.c
index b9dad3994676..fe3a9a9f8328 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -717,12 +717,16 @@ static void second_overflow(void)
717#endif 717#endif
718} 718}
719 719
720/* in the NTP reference this is called "hardclock()" */ 720/*
721static void update_wall_time_one_tick(void) 721 * Returns how many microseconds we need to add to xtime this tick
722 * in doing an adjustment requested with adjtime.
723 */
724static long adjtime_adjustment(void)
722{ 725{
723 long time_adjust_step, delta_nsec; 726 long time_adjust_step;
724 727
725 if ((time_adjust_step = time_adjust) != 0 ) { 728 time_adjust_step = time_adjust;
729 if (time_adjust_step) {
726 /* 730 /*
727 * We are doing an adjtime thing. Prepare time_adjust_step to 731 * We are doing an adjtime thing. Prepare time_adjust_step to
728 * be within bounds. Note that a positive time_adjust means we 732 * be within bounds. Note that a positive time_adjust means we
@@ -733,10 +737,19 @@ static void update_wall_time_one_tick(void)
733 */ 737 */
734 time_adjust_step = min(time_adjust_step, (long)tickadj); 738 time_adjust_step = min(time_adjust_step, (long)tickadj);
735 time_adjust_step = max(time_adjust_step, (long)-tickadj); 739 time_adjust_step = max(time_adjust_step, (long)-tickadj);
740 }
741 return time_adjust_step;
742}
736 743
744/* in the NTP reference this is called "hardclock()" */
745static void update_wall_time_one_tick(void)
746{
747 long time_adjust_step, delta_nsec;
748
749 time_adjust_step = adjtime_adjustment();
750 if (time_adjust_step)
737 /* Reduce by this step the amount of time left */ 751 /* Reduce by this step the amount of time left */
738 time_adjust -= time_adjust_step; 752 time_adjust -= time_adjust_step;
739 }
740 delta_nsec = tick_nsec + time_adjust_step * 1000; 753 delta_nsec = tick_nsec + time_adjust_step * 1000;
741 /* 754 /*
742 * Advance the phase, once it gets to one microsecond, then 755 * Advance the phase, once it gets to one microsecond, then
@@ -759,6 +772,22 @@ static void update_wall_time_one_tick(void)
759} 772}
760 773
761/* 774/*
775 * Return how long ticks are at the moment, that is, how much time
776 * update_wall_time_one_tick will add to xtime next time we call it
777 * (assuming no calls to do_adjtimex in the meantime).
778 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
779 * bits to the right of the binary point.
780 * This function has no side-effects.
781 */
782u64 current_tick_length(void)
783{
784 long delta_nsec;
785
786 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
787 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
788}
789
790/*
762 * Using a loop looks inefficient, but "ticks" is 791 * Using a loop looks inefficient, but "ticks" is
763 * usually just one (we shouldn't be losing ticks, 792 * usually just one (we shouldn't be losing ticks,
764 * we're doing this this way mainly for interrupt 793 * we're doing this this way mainly for interrupt
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c0bd4a914803..1e5b17dc7e3d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -752,12 +752,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
752 */ 752 */
753 nr_cleared_tags = 0; 753 nr_cleared_tags = 0;
754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
755 tags[tag] = 1;
755 if (tag_get(pathp->node, tag, pathp->offset)) { 756 if (tag_get(pathp->node, tag, pathp->offset)) {
756 tag_clear(pathp->node, tag, pathp->offset); 757 tag_clear(pathp->node, tag, pathp->offset);
757 tags[tag] = 0; 758 if (!any_tag_set(pathp->node, tag)) {
758 nr_cleared_tags++; 759 tags[tag] = 0;
759 } else 760 nr_cleared_tags++;
760 tags[tag] = 1; 761 }
762 }
761 } 763 }
762 764
763 for (pathp--; nr_cleared_tags && pathp->node; pathp--) { 765 for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
diff --git a/mm/memory.c b/mm/memory.c
index 2bee1f21aa8a..9abc6008544b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -82,6 +82,16 @@ EXPORT_SYMBOL(num_physpages);
82EXPORT_SYMBOL(high_memory); 82EXPORT_SYMBOL(high_memory);
83EXPORT_SYMBOL(vmalloc_earlyreserve); 83EXPORT_SYMBOL(vmalloc_earlyreserve);
84 84
85int randomize_va_space __read_mostly = 1;
86
87static int __init disable_randmaps(char *s)
88{
89 randomize_va_space = 0;
90 return 0;
91}
92__setup("norandmaps", disable_randmaps);
93
94
85/* 95/*
86 * If a p?d_bad entry is found while walking page tables, report 96 * If a p?d_bad entry is found while walking page tables, report
87 * the error, before resetting entry to p?d_none. Usually (but 97 * the error, before resetting entry to p?d_none. Usually (but
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3bd7fb7e4b75..bedfa4f09c80 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -132,19 +132,29 @@ static int mpol_check_policy(int mode, nodemask_t *nodes)
132 } 132 }
133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; 133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
134} 134}
135
135/* Generate a custom zonelist for the BIND policy. */ 136/* Generate a custom zonelist for the BIND policy. */
136static struct zonelist *bind_zonelist(nodemask_t *nodes) 137static struct zonelist *bind_zonelist(nodemask_t *nodes)
137{ 138{
138 struct zonelist *zl; 139 struct zonelist *zl;
139 int num, max, nd; 140 int num, max, nd, k;
140 141
141 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); 142 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
142 zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); 143 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
143 if (!zl) 144 if (!zl)
144 return NULL; 145 return NULL;
145 num = 0; 146 num = 0;
146 for_each_node_mask(nd, *nodes) 147 /* First put in the highest zones from all nodes, then all the next
147 zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; 148 lower zones etc. Avoid empty zones because the memory allocator
149 doesn't like them. If you implement node hot removal you
150 have to fix that. */
151 for (k = policy_zone; k >= 0; k--) {
152 for_each_node_mask(nd, *nodes) {
153 struct zone *z = &NODE_DATA(nd)->node_zones[k];
154 if (z->present_pages > 0)
155 zl->zones[num++] = z;
156 }
157 }
148 zl->zones[num] = NULL; 158 zl->zones[num] = NULL;
149 return zl; 159 return zl;
150} 160}
@@ -798,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
798 nodes_clear(*nodes); 808 nodes_clear(*nodes);
799 if (maxnode == 0 || !nmask) 809 if (maxnode == 0 || !nmask)
800 return 0; 810 return 0;
811 if (maxnode > PAGE_SIZE)
812 return -EINVAL;
801 813
802 nlongs = BITS_TO_LONGS(maxnode); 814 nlongs = BITS_TO_LONGS(maxnode);
803 if ((maxnode % BITS_PER_LONG) == 0) 815 if ((maxnode % BITS_PER_LONG) == 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 62c122528587..208812b25597 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1541,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES];
1541 */ 1541 */
1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1543{ 1543{
1544 int i, n, val; 1544 int n, val;
1545 int min_val = INT_MAX; 1545 int min_val = INT_MAX;
1546 int best_node = -1; 1546 int best_node = -1;
1547 1547
1548 for_each_online_node(i) { 1548 /* Use the local node if we haven't already */
1549 cpumask_t tmp; 1549 if (!node_isset(node, *used_node_mask)) {
1550 node_set(node, *used_node_mask);
1551 return node;
1552 }
1550 1553
1551 /* Start from local node */ 1554 for_each_online_node(n) {
1552 n = (node+i) % num_online_nodes(); 1555 cpumask_t tmp;
1553 1556
1554 /* Don't want a node to appear more than once */ 1557 /* Don't want a node to appear more than once */
1555 if (node_isset(n, *used_node_mask)) 1558 if (node_isset(n, *used_node_mask))
1556 continue; 1559 continue;
1557 1560
1558 /* Use the local node if we haven't already */
1559 if (!node_isset(node, *used_node_mask)) {
1560 best_node = node;
1561 break;
1562 }
1563
1564 /* Use the distance array to find the distance */ 1561 /* Use the distance array to find the distance */
1565 val = node_distance(node, n); 1562 val = node_distance(node, n);
1566 1563
1564 /* Penalize nodes under us ("prefer the next node") */
1565 val += (n < node);
1566
1567 /* Give preference to headless and unused nodes */ 1567 /* Give preference to headless and unused nodes */
1568 tmp = node_to_cpumask(n); 1568 tmp = node_to_cpumask(n);
1569 if (!cpus_empty(tmp)) 1569 if (!cpus_empty(tmp))
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index abe23923e4e7..9981dcd68f11 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -830,7 +830,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
830 skb->h.raw = skb->nh.raw; 830 skb->h.raw = skb->nh.raw;
831 skb->nh.raw = skb_push(skb, gre_hlen); 831 skb->nh.raw = skb_push(skb, gre_hlen);
832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 832 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 833 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
834 IPSKB_REROUTED);
834 dst_release(skb->dst); 835 dst_release(skb->dst);
835 skb->dst = &rt->u.dst; 836 skb->dst = &rt->u.dst;
836 837
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3324fbfe528a..57d290d89ec2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -207,8 +207,10 @@ static inline int ip_finish_output(struct sk_buff *skb)
207{ 207{
208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209 /* Policy lookup after SNAT yielded a new policy */ 209 /* Policy lookup after SNAT yielded a new policy */
210 if (skb->dst->xfrm != NULL) 210 if (skb->dst->xfrm != NULL) {
211 return xfrm4_output_finish(skb); 211 IPCB(skb)->flags |= IPSKB_REROUTED;
212 return dst_output(skb);
213 }
212#endif 214#endif
213 if (skb->len > dst_mtu(skb->dst) && 215 if (skb->len > dst_mtu(skb->dst) &&
214 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) 216 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
@@ -271,8 +273,9 @@ int ip_mc_output(struct sk_buff *skb)
271 newskb->dev, ip_dev_loopback_xmit); 273 newskb->dev, ip_dev_loopback_xmit);
272 } 274 }
273 275
274 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, 276 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275 ip_finish_output); 277 ip_finish_output,
278 !(IPCB(skb)->flags & IPSKB_REROUTED));
276} 279}
277 280
278int ip_output(struct sk_buff *skb) 281int ip_output(struct sk_buff *skb)
@@ -284,8 +287,9 @@ int ip_output(struct sk_buff *skb)
284 skb->dev = dev; 287 skb->dev = dev;
285 skb->protocol = htons(ETH_P_IP); 288 skb->protocol = htons(ETH_P_IP);
286 289
287 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, 290 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
288 ip_finish_output); 291 ip_finish_output,
292 !(IPCB(skb)->flags & IPSKB_REROUTED));
289} 293}
290 294
291int ip_queue_xmit(struct sk_buff *skb, int ipfragok) 295int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e5cbe72c6b80..03d13742a4b8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -622,7 +622,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
622 skb->h.raw = skb->nh.raw; 622 skb->h.raw = skb->nh.raw;
623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 623 skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 624 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); 625 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
626 IPSKB_REROUTED);
626 dst_release(skb->dst); 627 dst_release(skb->dst);
627 skb->dst = &rt->u.dst; 628 skb->dst = &rt->u.dst;
628 629
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 167619f638c6..6c8624a54933 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -529,15 +529,10 @@ static int init_or_cleanup(int init)
529 goto cleanup_localinops; 529 goto cleanup_localinops;
530 } 530 }
531#endif 531#endif
532
533 /* For use by REJECT target */
534 ip_ct_attach = __nf_conntrack_attach;
535
536 return ret; 532 return ret;
537 533
538 cleanup: 534 cleanup:
539 synchronize_net(); 535 synchronize_net();
540 ip_ct_attach = NULL;
541#ifdef CONFIG_SYSCTL 536#ifdef CONFIG_SYSCTL
542 unregister_sysctl_table(nf_ct_ipv4_sysctl_header); 537 unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
543 cleanup_localinops: 538 cleanup_localinops:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index d4df0ddd424b..32ad229b4fed 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -152,10 +152,16 @@ error_nolock:
152 goto out_exit; 152 goto out_exit;
153} 153}
154 154
155int xfrm4_output_finish(struct sk_buff *skb) 155static int xfrm4_output_finish(struct sk_buff *skb)
156{ 156{
157 int err; 157 int err;
158 158
159#ifdef CONFIG_NETFILTER
160 if (!skb->dst->xfrm) {
161 IPCB(skb)->flags |= IPSKB_REROUTED;
162 return dst_output(skb);
163 }
164#endif
159 while (likely((err = xfrm4_output_one(skb)) == 0)) { 165 while (likely((err = xfrm4_output_one(skb)) == 0)) {
160 nf_reset(skb); 166 nf_reset(skb);
161 167
@@ -178,6 +184,7 @@ int xfrm4_output_finish(struct sk_buff *skb)
178 184
179int xfrm4_output(struct sk_buff *skb) 185int xfrm4_output(struct sk_buff *skb)
180{ 186{
181 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, 187 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
182 xfrm4_output_finish); 188 xfrm4_output_finish,
189 !(IPCB(skb)->flags & IPSKB_REROUTED));
183} 190}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fcf883183cef..21eb725e885f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -42,6 +42,7 @@
42#include <linux/net.h> 42#include <linux/net.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/init.h> 44#include <linux/init.h>
45#include <linux/netfilter.h>
45 46
46#ifdef CONFIG_SYSCTL 47#ifdef CONFIG_SYSCTL
47#include <linux/sysctl.h> 48#include <linux/sysctl.h>
@@ -255,6 +256,7 @@ out:
255struct icmpv6_msg { 256struct icmpv6_msg {
256 struct sk_buff *skb; 257 struct sk_buff *skb;
257 int offset; 258 int offset;
259 uint8_t type;
258}; 260};
259 261
260static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 262static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -266,6 +268,8 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
266 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, 268 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
267 to, len, csum); 269 to, len, csum);
268 skb->csum = csum_block_add(skb->csum, csum, odd); 270 skb->csum = csum_block_add(skb->csum, csum, odd);
271 if (!(msg->type & ICMPV6_INFOMSG_MASK))
272 nf_ct_attach(skb, org_skb);
269 return 0; 273 return 0;
270} 274}
271 275
@@ -403,6 +407,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
403 407
404 msg.skb = skb; 408 msg.skb = skb;
405 msg.offset = skb->nh.raw - skb->data; 409 msg.offset = skb->nh.raw - skb->data;
410 msg.type = type;
406 411
407 len = skb->len - msg.offset; 412 len = skb->len - msg.offset;
408 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); 413 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
@@ -500,6 +505,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
500 505
501 msg.skb = skb; 506 msg.skb = skb;
502 msg.offset = 0; 507 msg.offset = 0;
508 msg.type = ICMPV6_ECHO_REPLY;
503 509
504 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 510 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
505 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, 511 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index c745717b4ce2..0e6d1d4bbd5c 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -160,6 +160,8 @@ static void send_reset(struct sk_buff *oldskb)
160 csum_partial((char *)tcph, 160 csum_partial((char *)tcph,
161 sizeof(struct tcphdr), 0)); 161 sizeof(struct tcphdr), 0));
162 162
163 nf_ct_attach(nskb, oldskb);
164
163 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, 165 NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
164 dst_output); 166 dst_output);
165} 167}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0e550127fa7e..a8e5544da93e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -126,7 +126,7 @@ config NETFILTER_XT_TARGET_CONNMARK
126 tristate '"CONNMARK" target support' 126 tristate '"CONNMARK" target support'
127 depends on NETFILTER_XTABLES 127 depends on NETFILTER_XTABLES
128 depends on IP_NF_MANGLE || IP6_NF_MANGLE 128 depends on IP_NF_MANGLE || IP6_NF_MANGLE
129 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) 129 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
130 help 130 help
131 This option adds a `CONNMARK' target, which allows one to manipulate 131 This option adds a `CONNMARK' target, which allows one to manipulate
132 the connection mark value. Similar to the MARK target, but 132 the connection mark value. Similar to the MARK target, but
@@ -187,7 +187,7 @@ config NETFILTER_XT_MATCH_COMMENT
187config NETFILTER_XT_MATCH_CONNBYTES 187config NETFILTER_XT_MATCH_CONNBYTES
188 tristate '"connbytes" per-connection counter match support' 188 tristate '"connbytes" per-connection counter match support'
189 depends on NETFILTER_XTABLES 189 depends on NETFILTER_XTABLES
190 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT 190 depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK)
191 help 191 help
192 This option adds a `connbytes' match, which allows you to match the 192 This option adds a `connbytes' match, which allows you to match the
193 number of bytes and/or packets for each direction within a connection. 193 number of bytes and/or packets for each direction within a connection.
@@ -198,7 +198,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
198config NETFILTER_XT_MATCH_CONNMARK 198config NETFILTER_XT_MATCH_CONNMARK
199 tristate '"connmark" connection mark match support' 199 tristate '"connmark" connection mark match support'
200 depends on NETFILTER_XTABLES 200 depends on NETFILTER_XTABLES
201 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK 201 depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK)
202 help 202 help
203 This option adds a `connmark' match, which allows you to match the 203 This option adds a `connmark' match, which allows you to match the
204 connection mark value previously set for the session by `CONNMARK'. 204 connection mark value previously set for the session by `CONNMARK'.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0ce337a1d974..d622ddf08bb0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1556,6 +1556,8 @@ void nf_conntrack_cleanup(void)
1556{ 1556{
1557 int i; 1557 int i;
1558 1558
1559 ip_ct_attach = NULL;
1560
1559 /* This makes sure all current packets have passed through 1561 /* This makes sure all current packets have passed through
1560 netfilter framework. Roll on, two-stage module 1562 netfilter framework. Roll on, two-stage module
1561 delete... */ 1563 delete... */
@@ -1715,6 +1717,9 @@ int __init nf_conntrack_init(void)
1715 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; 1717 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
1716 write_unlock_bh(&nf_conntrack_lock); 1718 write_unlock_bh(&nf_conntrack_lock);
1717 1719
1720 /* For use by REJECT target */
1721 ip_ct_attach = __nf_conntrack_attach;
1722
1718 /* Set up fake conntrack: 1723 /* Set up fake conntrack:
1719 - to never be deleted, not in any hashes */ 1724 - to never be deleted, not in any hashes */
1720 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1725 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index df99138c3b3b..6492ed66fb3c 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -864,7 +864,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
864{ 864{
865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
866 skb->len - dataoff, IPPROTO_TCP, 866 skb->len - dataoff, IPPROTO_TCP,
867 skb->ip_summed == CHECKSUM_HW ? skb->csum 867 skb->ip_summed == CHECKSUM_HW
868 ? csum_sub(skb->csum,
869 skb_checksum(skb, 0, dataoff, 0))
868 : skb_checksum(skb, dataoff, skb->len - dataoff, 870 : skb_checksum(skb, dataoff, skb->len - dataoff,
869 0)); 871 0));
870} 872}
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 4264dd079a16..831d206344e0 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -161,7 +161,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff)
161{ 161{
162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
163 skb->len - dataoff, IPPROTO_UDP, 163 skb->len - dataoff, IPPROTO_UDP,
164 skb->ip_summed == CHECKSUM_HW ? skb->csum 164 skb->ip_summed == CHECKSUM_HW
165 ? csum_sub(skb->csum,
166 skb_checksum(skb, 0, dataoff, 0))
165 : skb_checksum(skb, dataoff, skb->len - dataoff, 167 : skb_checksum(skb, dataoff, skb->len - dataoff,
166 0)); 168 0));
167} 169}