aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/spinlocks.txt184
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.debug8
-rw-r--r--arch/arm/configs/zeus_defconfig2032
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/early_printk.c57
-rw-r--r--arch/arm/kernel/smp_twd.c1
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-footbridge/common.c22
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h15
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h3
-rw-r--r--arch/arm/mach-ixp2000/include/mach/memory.h12
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-lh7a40x/clocks.c8
-rw-r--r--arch/arm/mach-pxa/Kconfig14
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/em-x270.c11
-rw-r--r--arch/arm/mach-pxa/include/mach/arcom-pcmcia.h11
-rw-r--r--arch/arm/mach-pxa/include/mach/viper.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/zeus.h82
-rw-r--r--arch/arm/mach-pxa/viper.c20
-rw-r--r--arch/arm/mach-pxa/zeus.c820
-rw-r--r--arch/arm/mach-realview/Kconfig2
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/Kconfig13
-rw-r--r--arch/arm/mach-sa1100/generic.c12
-rw-r--r--arch/arm/vfp/vfpmodule.c83
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c27
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c129
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/m68k/sun3/mmu_emu.c8
-rw-r--r--arch/mn10300/kernel/kprobes.c61
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c14
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/sparc/kernel/nmi.c8
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c28
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/time.c24
-rw-r--r--crypto/cryptd.c2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/cpufreq/cpufreq.c16
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/crypto/padlock-aes.c12
-rw-r--r--drivers/dma/dmaengine.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/oprofile/cpu_buffer.c19
-rw-r--r--drivers/oprofile/cpu_buffer.h4
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c19
-rw-r--r--drivers/pcmcia/pxa2xx_base.h3
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c2
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c119
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/nfs/callback.c13
-rw-r--r--fs/nfs/callback.h16
-rw-r--r--fs/nfs/callback_proc.c64
-rw-r--r--fs/nfs/callback_xdr.c34
-rw-r--r--fs/nfs/client.c14
-rw-r--r--fs/nfs/delegation.c77
-rw-r--r--fs/nfs/delegation.h7
-rw-r--r--fs/nfs/dir.c67
-rw-r--r--fs/nfs/dns_resolve.c4
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/iostat.h24
-rw-r--r--fs/nfs/nfs4_fs.h12
-rw-r--r--fs/nfs/nfs4proc.c458
-rw-r--r--fs/nfs/nfs4state.c225
-rw-r--r--fs/nfs/nfs4xdr.c135
-rw-r--r--fs/nfs/read.c12
-rw-r--r--fs/nfs/super.c104
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/xfs/xfs_mount.c12
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h13
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/vmstat.h10
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/snmp.h50
-rw-r--r--kernel/lockdep.c11
-rw-r--r--kernel/module.c150
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/time/timer_stats.c11
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--kernel/trace/trace_hw_branches.c51
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c177
-rw-r--r--mm/percpu.c24
-rw-r--r--mm/slab.c18
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmstat.c7
-rw-r--r--net/sunrpc/addr.c10
-rw-r--r--net/sunrpc/auth.c39
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c6
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c104
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c2
143 files changed, 5659 insertions, 1679 deletions
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 619699dde593..178c831b907d 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -1,73 +1,8 @@
1SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 1Lesson 1: Spin locks
2are hence deprecated.
3 2
4Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 3The most basic primitive for locking is spinlock.
5__SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate for static
6initialization.
7
8Most of the time, you can simply turn:
9
10 static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
11
12into:
13
14 static DEFINE_SPINLOCK(xxx_lock);
15
16Static structure member variables go from:
17
18 struct foo bar {
19 .lock = SPIN_LOCK_UNLOCKED;
20 };
21
22to:
23
24 struct foo bar {
25 .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
26 };
27
28Declaration of static rw_locks undergo a similar transformation.
29
30Dynamic initialization, when necessary, may be performed as
31demonstrated below.
32
33 spinlock_t xxx_lock;
34 rwlock_t xxx_rw_lock;
35
36 static int __init xxx_init(void)
37 {
38 spin_lock_init(&xxx_lock);
39 rwlock_init(&xxx_rw_lock);
40 ...
41 }
42
43 module_init(xxx_init);
44
45The following discussion is still valid, however, with the dynamic
46initialization of spinlocks or with DEFINE_SPINLOCK, etc., used
47instead of SPIN_LOCK_UNLOCKED.
48
49-----------------------
50
51On Fri, 2 Jan 1998, Doug Ledford wrote:
52>
53> I'm working on making the aic7xxx driver more SMP friendly (as well as
54> importing the latest FreeBSD sequencer code to have 7895 support) and wanted
55> to get some info from you. The goal here is to make the various routines
56> SMP safe as well as UP safe during interrupts and other manipulating
57> routines. So far, I've added a spin_lock variable to things like my queue
58> structs. Now, from what I recall, there are some spin lock functions I can
59> use to lock these spin locks from other use as opposed to a (nasty)
60> save_flags(); cli(); stuff; restore_flags(); construct. Where do I find
61> these routines and go about making use of them? Do they only lock on a
62> per-processor basis or can they also lock say an interrupt routine from
63> mucking with a queue if the queue routine was manipulating it when the
64> interrupt occurred, or should I still use a cli(); based construct on that
65> one?
66
67See <asm/spinlock.h>. The basic version is:
68
69 spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
70 4
5static DEFINE_SPINLOCK(xxx_lock);
71 6
72 unsigned long flags; 7 unsigned long flags;
73 8
@@ -75,13 +10,11 @@ See <asm/spinlock.h>. The basic version is:
75 ... critical section here .. 10 ... critical section here ..
76 spin_unlock_irqrestore(&xxx_lock, flags); 11 spin_unlock_irqrestore(&xxx_lock, flags);
77 12
78and the above is always safe. It will disable interrupts _locally_, but the 13The above is always safe. It will disable interrupts _locally_, but the
79spinlock itself will guarantee the global lock, so it will guarantee that 14spinlock itself will guarantee the global lock, so it will guarantee that
80there is only one thread-of-control within the region(s) protected by that 15there is only one thread-of-control within the region(s) protected by that
81lock. 16lock. This works well even under UP. The above sequence under UP
82 17essentially is just the same as doing
83Note that it works well even under UP - the above sequence under UP
84essentially is just the same as doing a
85 18
86 unsigned long flags; 19 unsigned long flags;
87 20
@@ -91,15 +24,13 @@ essentially is just the same as doing a
91 24
92so the code does _not_ need to worry about UP vs SMP issues: the spinlocks 25so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
93work correctly under both (and spinlocks are actually more efficient on 26work correctly under both (and spinlocks are actually more efficient on
94architectures that allow doing the "save_flags + cli" in one go because I 27architectures that allow doing the "save_flags + cli" in one operation).
95don't export that interface normally). 28
29 NOTE! Implications of spin_locks for memory are further described in:
96 30
97NOTE NOTE NOTE! The reason the spinlock is so much faster than a global 31 Documentation/memory-barriers.txt
98interrupt lock under SMP is exactly because it disables interrupts only on 32 (5) LOCK operations.
99the local CPU. The spin-lock is safe only when you _also_ use the lock 33 (6) UNLOCK operations.
100itself to do locking across CPU's, which implies that EVERYTHING that
101touches a shared variable has to agree about the spinlock they want to
102use.
103 34
104The above is usually pretty simple (you usually need and want only one 35The above is usually pretty simple (you usually need and want only one
105spinlock for most things - using more than one spinlock can make things a 36spinlock for most things - using more than one spinlock can make things a
@@ -120,20 +51,24 @@ and another sequence that does
120then they are NOT mutually exclusive, and the critical regions can happen 51then they are NOT mutually exclusive, and the critical regions can happen
121at the same time on two different CPU's. That's fine per se, but the 52at the same time on two different CPU's. That's fine per se, but the
122critical regions had better be critical for different things (ie they 53critical regions had better be critical for different things (ie they
123can't stomp on each other). 54can't stomp on each other).
124 55
125The above is a problem mainly if you end up mixing code - for example the 56The above is a problem mainly if you end up mixing code - for example the
126routines in ll_rw_block() tend to use cli/sti to protect the atomicity of 57routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
127their actions, and if a driver uses spinlocks instead then you should 58their actions, and if a driver uses spinlocks instead then you should
128think about issues like the above.. 59think about issues like the above.
129 60
130This is really the only really hard part about spinlocks: once you start 61This is really the only really hard part about spinlocks: once you start
131using spinlocks they tend to expand to areas you might not have noticed 62using spinlocks they tend to expand to areas you might not have noticed
132before, because you have to make sure the spinlocks correctly protect the 63before, because you have to make sure the spinlocks correctly protect the
133shared data structures _everywhere_ they are used. The spinlocks are most 64shared data structures _everywhere_ they are used. The spinlocks are most
134easily added to places that are completely independent of other code (ie 65easily added to places that are completely independent of other code (for
135internal driver data structures that nobody else ever touches, for 66example, internal driver data structures that nobody else ever touches).
136example). 67
68 NOTE! The spin-lock is safe only when you _also_ use the lock itself
69 to do locking across CPU's, which implies that EVERYTHING that
70 touches a shared variable has to agree about the spinlock they want
71 to use.
137 72
138---- 73----
139 74
@@ -141,13 +76,17 @@ Lesson 2: reader-writer spinlocks.
141 76
142If your data accesses have a very natural pattern where you usually tend 77If your data accesses have a very natural pattern where you usually tend
143to mostly read from the shared variables, the reader-writer locks 78to mostly read from the shared variables, the reader-writer locks
144(rw_lock) versions of the spinlocks are often nicer. They allow multiple 79(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple
145readers to be in the same critical region at once, but if somebody wants 80readers to be in the same critical region at once, but if somebody wants
146to change the variables it has to get an exclusive write lock. The 81to change the variables it has to get an exclusive write lock.
147routines look the same as above:
148 82
149 rwlock_t xxx_lock = RW_LOCK_UNLOCKED; 83 NOTE! reader-writer locks require more atomic memory operations than
84 simple spinlocks. Unless the reader critical section is long, you
85 are better off just using spinlocks.
150 86
87The routines look the same as above:
88
89 rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
151 90
152 unsigned long flags; 91 unsigned long flags;
153 92
@@ -159,18 +98,21 @@ routines look the same as above:
159 .. read and write exclusive access to the info ... 98 .. read and write exclusive access to the info ...
160 write_unlock_irqrestore(&xxx_lock, flags); 99 write_unlock_irqrestore(&xxx_lock, flags);
161 100
162The above kind of lock is useful for complex data structures like linked 101The above kind of lock may be useful for complex data structures like
163lists etc, especially when you know that most of the work is to just 102linked lists, especially searching for entries without changing the list
164traverse the list searching for entries without changing the list itself, 103itself. The read lock allows many concurrent readers. Anything that
165for example. Then you can use the read lock for that kind of list 104_changes_ the list will have to get the write lock.
166traversal, which allows many concurrent readers. Anything that _changes_ 105
167the list will have to get the write lock. 106 NOTE! RCU is better for list traversal, but requires careful
107 attention to design detail (see Documentation/RCU/listRCU.txt).
168 108
169Note: you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ 109Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
170time need to do any changes (even if you don't do it every time), you have 110time need to do any changes (even if you don't do it every time), you have
171to get the write-lock at the very beginning. I could fairly easily add a 111to get the write-lock at the very beginning.
172primitive to create a "upgradeable" read-lock, but it hasn't been an issue 112
173yet. Tell me if you'd want one. 113 NOTE! We are working hard to remove reader-writer spinlocks in most
114 cases, so please don't add a new one without consensus. (Instead, see
115 Documentation/RCU/rcu.txt for complete information.)
174 116
175---- 117----
176 118
@@ -233,4 +175,46 @@ indeed), while write-locks need to protect themselves against interrupts.
233 175
234 Linus 176 Linus
235 177
178----
179
180Reference information:
181
182For dynamic initialization, use spin_lock_init() or rwlock_init() as
183appropriate:
184
185 spinlock_t xxx_lock;
186 rwlock_t xxx_rw_lock;
187
188 static int __init xxx_init(void)
189 {
190 spin_lock_init(&xxx_lock);
191 rwlock_init(&xxx_rw_lock);
192 ...
193 }
194
195 module_init(xxx_init);
196
197For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
198__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
199
200SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
201with lockdep state tracking.
202
203Most of the time, you can simply turn:
204 static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
205into:
206 static DEFINE_SPINLOCK(xxx_lock);
207
208Static structure member variables go from:
209
210 struct foo bar {
211 .lock = SPIN_LOCK_UNLOCKED;
212 };
213
214to:
236 215
216 struct foo bar {
217 .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
218 };
219
220Declaration of static rw_locks undergo a similar transformation.
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf8a99f19dc4..233a222752c0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -603,6 +603,7 @@ config ARCH_SA1100
603 select ARCH_SPARSEMEM_ENABLE 603 select ARCH_SPARSEMEM_ENABLE
604 select ARCH_MTD_XIP 604 select ARCH_MTD_XIP
605 select ARCH_HAS_CPUFREQ 605 select ARCH_HAS_CPUFREQ
606 select CPU_FREQ
606 select GENERIC_GPIO 607 select GENERIC_GPIO
607 select GENERIC_TIME 608 select GENERIC_TIME
608 select GENERIC_CLOCKEVENTS 609 select GENERIC_CLOCKEVENTS
@@ -1359,13 +1360,9 @@ source "drivers/cpufreq/Kconfig"
1359 1360
1360config CPU_FREQ_SA1100 1361config CPU_FREQ_SA1100
1361 bool 1362 bool
1362 depends on CPU_FREQ && (SA1100_H3100 || SA1100_H3600 || SA1100_LART || SA1100_PLEB || SA1100_BADGE4 || SA1100_HACKKIT)
1363 default y
1364 1363
1365config CPU_FREQ_SA1110 1364config CPU_FREQ_SA1110
1366 bool 1365 bool
1367 depends on CPU_FREQ && (SA1100_ASSABET || SA1100_CERF || SA1100_PT_SYSTEM3)
1368 default y
1369 1366
1370config CPU_FREQ_INTEGRATOR 1367config CPU_FREQ_INTEGRATOR
1371 tristate "CPUfreq driver for ARM Integrator CPUs" 1368 tristate "CPUfreq driver for ARM Integrator CPUs"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ff54c23d085e..5cb9326df7a7 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -71,6 +71,14 @@ config DEBUG_LL
71 in the kernel. This is helpful if you are debugging code that 71 in the kernel. This is helpful if you are debugging code that
72 executes before the console is initialized. 72 executes before the console is initialized.
73 73
74config EARLY_PRINTK
75 bool "Early printk"
76 depends on DEBUG_LL
77 help
78 Say Y here if you want to have an early console using the
79 kernel low-level debugging functions. Add earlyprintk to your
80 kernel parameters to enable this console.
81
74config DEBUG_ICEDCC 82config DEBUG_ICEDCC
75 bool "Kernel low-level debugging via EmbeddedICE DCC channel" 83 bool "Kernel low-level debugging via EmbeddedICE DCC channel"
76 depends on DEBUG_LL 84 depends on DEBUG_LL
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
new file mode 100644
index 000000000000..823b11e7091a
--- /dev/null
+++ b/arch/arm/configs/zeus_defconfig
@@ -0,0 +1,2032 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32
4# Tue Dec 8 20:27:05 2009
5#
6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y
9CONFIG_GENERIC_TIME=y
10CONFIG_GENERIC_CLOCKEVENTS=y
11CONFIG_GENERIC_HARDIRQS=y
12CONFIG_STACKTRACE_SUPPORT=y
13CONFIG_HAVE_LATENCYTOP_SUPPORT=y
14CONFIG_LOCKDEP_SUPPORT=y
15CONFIG_TRACE_IRQFLAGS_SUPPORT=y
16CONFIG_HARDIRQS_SW_RESEND=y
17CONFIG_GENERIC_IRQ_PROBE=y
18CONFIG_RWSEM_GENERIC_SPINLOCK=y
19CONFIG_ARCH_HAS_CPUFREQ=y
20CONFIG_GENERIC_HWEIGHT=y
21CONFIG_GENERIC_CALIBRATE_DELAY=y
22CONFIG_ARCH_MTD_XIP=y
23CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
24CONFIG_VECTORS_BASE=0xffff0000
25CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
26CONFIG_CONSTRUCTORS=y
27
28#
29# General setup
30#
31CONFIG_EXPERIMENTAL=y
32CONFIG_BROKEN_ON_SMP=y
33CONFIG_INIT_ENV_ARG_LIMIT=32
34CONFIG_LOCALVERSION=""
35CONFIG_LOCALVERSION_AUTO=y
36CONFIG_SWAP=y
37CONFIG_SYSVIPC=y
38CONFIG_SYSVIPC_SYSCTL=y
39# CONFIG_POSIX_MQUEUE is not set
40# CONFIG_BSD_PROCESS_ACCT is not set
41# CONFIG_TASKSTATS is not set
42# CONFIG_AUDIT is not set
43
44#
45# RCU Subsystem
46#
47# CONFIG_TREE_RCU is not set
48# CONFIG_TREE_PREEMPT_RCU is not set
49CONFIG_TINY_RCU=y
50# CONFIG_TREE_RCU_TRACE is not set
51# CONFIG_IKCONFIG is not set
52CONFIG_LOG_BUF_SHIFT=13
53# CONFIG_GROUP_SCHED is not set
54# CONFIG_CGROUPS is not set
55# CONFIG_SYSFS_DEPRECATED_V2 is not set
56# CONFIG_RELAY is not set
57CONFIG_NAMESPACES=y
58# CONFIG_UTS_NS is not set
59# CONFIG_IPC_NS is not set
60# CONFIG_USER_NS is not set
61# CONFIG_PID_NS is not set
62# CONFIG_NET_NS is not set
63# CONFIG_BLK_DEV_INITRD is not set
64CONFIG_CC_OPTIMIZE_FOR_SIZE=y
65CONFIG_SYSCTL=y
66CONFIG_ANON_INODES=y
67# CONFIG_EMBEDDED is not set
68CONFIG_UID16=y
69CONFIG_SYSCTL_SYSCALL=y
70CONFIG_KALLSYMS=y
71# CONFIG_KALLSYMS_ALL is not set
72# CONFIG_KALLSYMS_EXTRA_PASS is not set
73CONFIG_HOTPLUG=y
74CONFIG_PRINTK=y
75CONFIG_BUG=y
76CONFIG_ELF_CORE=y
77CONFIG_BASE_FULL=y
78CONFIG_FUTEX=y
79CONFIG_EPOLL=y
80CONFIG_SIGNALFD=y
81CONFIG_TIMERFD=y
82CONFIG_EVENTFD=y
83CONFIG_SHMEM=y
84CONFIG_AIO=y
85
86#
87# Kernel Performance Events And Counters
88#
89CONFIG_VM_EVENT_COUNTERS=y
90CONFIG_SLUB_DEBUG=y
91CONFIG_COMPAT_BRK=y
92# CONFIG_SLAB is not set
93CONFIG_SLUB=y
94# CONFIG_SLOB is not set
95# CONFIG_PROFILING is not set
96CONFIG_HAVE_OPROFILE=y
97# CONFIG_KPROBES is not set
98CONFIG_HAVE_KPROBES=y
99CONFIG_HAVE_KRETPROBES=y
100CONFIG_HAVE_CLK=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_SLOW_WORK is not set
106CONFIG_HAVE_GENERIC_DMA_COHERENT=y
107CONFIG_SLABINFO=y
108CONFIG_RT_MUTEXES=y
109CONFIG_BASE_SMALL=0
110CONFIG_MODULES=y
111# CONFIG_MODULE_FORCE_LOAD is not set
112CONFIG_MODULE_UNLOAD=y
113# CONFIG_MODULE_FORCE_UNLOAD is not set
114# CONFIG_MODVERSIONS is not set
115# CONFIG_MODULE_SRCVERSION_ALL is not set
116CONFIG_BLOCK=y
117CONFIG_LBDAF=y
118# CONFIG_BLK_DEV_BSG is not set
119# CONFIG_BLK_DEV_INTEGRITY is not set
120
121#
122# IO Schedulers
123#
124CONFIG_IOSCHED_NOOP=y
125CONFIG_IOSCHED_DEADLINE=y
126# CONFIG_IOSCHED_CFQ is not set
127CONFIG_DEFAULT_DEADLINE=y
128# CONFIG_DEFAULT_CFQ is not set
129# CONFIG_DEFAULT_NOOP is not set
130CONFIG_DEFAULT_IOSCHED="deadline"
131# CONFIG_INLINE_SPIN_TRYLOCK is not set
132# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
133# CONFIG_INLINE_SPIN_LOCK is not set
134# CONFIG_INLINE_SPIN_LOCK_BH is not set
135# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
136# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
137CONFIG_INLINE_SPIN_UNLOCK=y
138# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
139CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
140# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
141# CONFIG_INLINE_READ_TRYLOCK is not set
142# CONFIG_INLINE_READ_LOCK is not set
143# CONFIG_INLINE_READ_LOCK_BH is not set
144# CONFIG_INLINE_READ_LOCK_IRQ is not set
145# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
146CONFIG_INLINE_READ_UNLOCK=y
147# CONFIG_INLINE_READ_UNLOCK_BH is not set
148CONFIG_INLINE_READ_UNLOCK_IRQ=y
149# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
150# CONFIG_INLINE_WRITE_TRYLOCK is not set
151# CONFIG_INLINE_WRITE_LOCK is not set
152# CONFIG_INLINE_WRITE_LOCK_BH is not set
153# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
154# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
155CONFIG_INLINE_WRITE_UNLOCK=y
156# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
157CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
158# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
159# CONFIG_MUTEX_SPIN_ON_OWNER is not set
160CONFIG_FREEZER=y
161
162#
163# System Type
164#
165CONFIG_MMU=y
166# CONFIG_ARCH_AAEC2000 is not set
167# CONFIG_ARCH_INTEGRATOR is not set
168# CONFIG_ARCH_REALVIEW is not set
169# CONFIG_ARCH_VERSATILE is not set
170# CONFIG_ARCH_AT91 is not set
171# CONFIG_ARCH_CLPS711X is not set
172# CONFIG_ARCH_GEMINI is not set
173# CONFIG_ARCH_EBSA110 is not set
174# CONFIG_ARCH_EP93XX is not set
175# CONFIG_ARCH_FOOTBRIDGE is not set
176# CONFIG_ARCH_MXC is not set
177# CONFIG_ARCH_STMP3XXX is not set
178# CONFIG_ARCH_NETX is not set
179# CONFIG_ARCH_H720X is not set
180# CONFIG_ARCH_NOMADIK is not set
181# CONFIG_ARCH_IOP13XX is not set
182# CONFIG_ARCH_IOP32X is not set
183# CONFIG_ARCH_IOP33X is not set
184# CONFIG_ARCH_IXP23XX is not set
185# CONFIG_ARCH_IXP2000 is not set
186# CONFIG_ARCH_IXP4XX is not set
187# CONFIG_ARCH_L7200 is not set
188# CONFIG_ARCH_DOVE is not set
189# CONFIG_ARCH_KIRKWOOD is not set
190# CONFIG_ARCH_LOKI is not set
191# CONFIG_ARCH_MV78XX0 is not set
192# CONFIG_ARCH_ORION5X is not set
193# CONFIG_ARCH_MMP is not set
194# CONFIG_ARCH_KS8695 is not set
195# CONFIG_ARCH_NS9XXX is not set
196# CONFIG_ARCH_W90X900 is not set
197# CONFIG_ARCH_PNX4008 is not set
198CONFIG_ARCH_PXA=y
199# CONFIG_ARCH_MSM is not set
200# CONFIG_ARCH_RPC is not set
201# CONFIG_ARCH_SA1100 is not set
202# CONFIG_ARCH_S3C2410 is not set
203# CONFIG_ARCH_S3C64XX is not set
204# CONFIG_ARCH_S5PC1XX is not set
205# CONFIG_ARCH_SHARK is not set
206# CONFIG_ARCH_LH7A40X is not set
207# CONFIG_ARCH_U300 is not set
208# CONFIG_ARCH_DAVINCI is not set
209# CONFIG_ARCH_OMAP is not set
210# CONFIG_ARCH_BCMRING is not set
211# CONFIG_ARCH_U8500 is not set
212
213#
214# Intel PXA2xx/PXA3xx Implementations
215#
216
217#
218# Intel/Marvell Dev Platforms (sorted by hardware release time)
219#
220# CONFIG_ARCH_LUBBOCK is not set
221# CONFIG_MACH_MAINSTONE is not set
222# CONFIG_MACH_ZYLONITE300 is not set
223# CONFIG_MACH_ZYLONITE320 is not set
224# CONFIG_MACH_LITTLETON is not set
225# CONFIG_MACH_TAVOREVB is not set
226# CONFIG_MACH_SAAR is not set
227
228#
229# Third Party Dev Platforms (sorted by vendor name)
230#
231# CONFIG_ARCH_PXA_IDP is not set
232# CONFIG_ARCH_VIPER is not set
233CONFIG_MACH_ARCOM_ZEUS=y
234# CONFIG_MACH_BALLOON3 is not set
235# CONFIG_MACH_CSB726 is not set
236# CONFIG_MACH_ARMCORE is not set
237# CONFIG_MACH_EM_X270 is not set
238# CONFIG_MACH_EXEDA is not set
239# CONFIG_MACH_CM_X300 is not set
240# CONFIG_ARCH_GUMSTIX is not set
241# CONFIG_MACH_INTELMOTE2 is not set
242# CONFIG_MACH_STARGATE2 is not set
243# CONFIG_MACH_XCEP is not set
244# CONFIG_TRIZEPS_PXA is not set
245CONFIG_ARCOM_PCMCIA=y
246# CONFIG_MACH_LOGICPD_PXA270 is not set
247# CONFIG_MACH_PCM027 is not set
248# CONFIG_MACH_COLIBRI is not set
249# CONFIG_MACH_COLIBRI300 is not set
250# CONFIG_MACH_COLIBRI320 is not set
251
252#
253# End-user Products (sorted by vendor name)
254#
255# CONFIG_MACH_H4700 is not set
256# CONFIG_MACH_H5000 is not set
257# CONFIG_MACH_HIMALAYA is not set
258# CONFIG_MACH_MAGICIAN is not set
259# CONFIG_MACH_MIOA701 is not set
260# CONFIG_PXA_EZX is not set
261# CONFIG_MACH_MP900C is not set
262# CONFIG_ARCH_PXA_PALM is not set
263# CONFIG_PXA_SHARPSL is not set
264# CONFIG_ARCH_PXA_ESERIES is not set
265CONFIG_PXA27x=y
266CONFIG_PXA_SSP=y
267CONFIG_PXA_HAVE_BOARD_IRQS=y
268CONFIG_PXA_HAVE_ISA_IRQS=y
269CONFIG_PLAT_PXA=y
270
271#
272# Processor Type
273#
274CONFIG_CPU_32=y
275CONFIG_CPU_XSCALE=y
276CONFIG_CPU_32v5=y
277CONFIG_CPU_ABRT_EV5T=y
278CONFIG_CPU_PABRT_LEGACY=y
279CONFIG_CPU_CACHE_VIVT=y
280CONFIG_CPU_TLB_V4WBI=y
281CONFIG_CPU_CP15=y
282CONFIG_CPU_CP15_MMU=y
283
284#
285# Processor Features
286#
287CONFIG_ARM_THUMB=y
288# CONFIG_CPU_DCACHE_DISABLE is not set
289CONFIG_ARM_L1_CACHE_SHIFT=5
290CONFIG_IWMMXT=y
291CONFIG_XSCALE_PMU=y
292CONFIG_COMMON_CLKDEV=y
293
294#
295# Bus support
296#
297CONFIG_ISA=y
298# CONFIG_PCI_SYSCALL is not set
299# CONFIG_ARCH_SUPPORTS_MSI is not set
300CONFIG_PCCARD=m
301CONFIG_PCMCIA=m
302CONFIG_PCMCIA_LOAD_CIS=y
303CONFIG_PCMCIA_IOCTL=y
304
305#
306# PC-card bridges
307#
308# CONFIG_I82365 is not set
309# CONFIG_TCIC is not set
310CONFIG_PCMCIA_SOC_COMMON=m
311CONFIG_PCMCIA_PXA2XX=m
312# CONFIG_PCMCIA_DEBUG is not set
313CONFIG_PCMCIA_PROBE=y
314
315#
316# Kernel Features
317#
318CONFIG_TICK_ONESHOT=y
319# CONFIG_NO_HZ is not set
320# CONFIG_HIGH_RES_TIMERS is not set
321CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
322CONFIG_VMSPLIT_3G=y
323# CONFIG_VMSPLIT_2G is not set
324# CONFIG_VMSPLIT_1G is not set
325CONFIG_PAGE_OFFSET=0xC0000000
326CONFIG_PREEMPT_NONE=y
327# CONFIG_PREEMPT_VOLUNTARY is not set
328# CONFIG_PREEMPT is not set
329CONFIG_HZ=100
330CONFIG_AEABI=y
331CONFIG_OABI_COMPAT=y
332# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
333# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
334# CONFIG_HIGHMEM is not set
335CONFIG_SELECT_MEMORY_MODEL=y
336CONFIG_FLATMEM_MANUAL=y
337# CONFIG_DISCONTIGMEM_MANUAL is not set
338# CONFIG_SPARSEMEM_MANUAL is not set
339CONFIG_FLATMEM=y
340CONFIG_FLAT_NODE_MEM_MAP=y
341CONFIG_PAGEFLAGS_EXTENDED=y
342CONFIG_SPLIT_PTLOCK_CPUS=4096
343# CONFIG_PHYS_ADDR_T_64BIT is not set
344CONFIG_ZONE_DMA_FLAG=0
345CONFIG_VIRT_TO_BUS=y
346CONFIG_HAVE_MLOCK=y
347CONFIG_HAVE_MLOCKED_PAGE_BIT=y
348# CONFIG_KSM is not set
349CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
350CONFIG_ALIGNMENT_TRAP=y
351# CONFIG_UACCESS_WITH_MEMCPY is not set
352
353#
354# Boot options
355#
356CONFIG_ZBOOT_ROM_TEXT=0x0
357CONFIG_ZBOOT_ROM_BSS=0x0
358CONFIG_CMDLINE="root=31:02 rootfstype=jffs2 ro console=ttyS0,115200"
359# CONFIG_XIP_KERNEL is not set
360# CONFIG_KEXEC is not set
361
362#
363# CPU Power Management
364#
365CONFIG_CPU_FREQ=y
366CONFIG_CPU_FREQ_TABLE=y
367# CONFIG_CPU_FREQ_DEBUG is not set
368CONFIG_CPU_FREQ_STAT=y
369# CONFIG_CPU_FREQ_STAT_DETAILS is not set
370CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
371# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
372# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
373# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
374# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
375CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
376CONFIG_CPU_FREQ_GOV_POWERSAVE=m
377CONFIG_CPU_FREQ_GOV_USERSPACE=m
378CONFIG_CPU_FREQ_GOV_ONDEMAND=m
379CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
380# CONFIG_CPU_IDLE is not set
381
382#
383# Floating point emulation
384#
385
386#
387# At least one emulation must be selected
388#
389CONFIG_FPE_NWFPE=y
390# CONFIG_FPE_NWFPE_XP is not set
391# CONFIG_FPE_FASTFPE is not set
392
393#
394# Userspace binary formats
395#
396CONFIG_BINFMT_ELF=y
397# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
398CONFIG_HAVE_AOUT=y
399# CONFIG_BINFMT_AOUT is not set
400# CONFIG_BINFMT_MISC is not set
401
402#
403# Power management options
404#
405CONFIG_PM=y
406# CONFIG_PM_DEBUG is not set
407CONFIG_PM_SLEEP=y
408CONFIG_SUSPEND=y
409CONFIG_SUSPEND_FREEZER=y
410CONFIG_APM_EMULATION=y
411# CONFIG_PM_RUNTIME is not set
412CONFIG_ARCH_SUSPEND_POSSIBLE=y
413CONFIG_NET=y
414
415#
416# Networking options
417#
418CONFIG_PACKET=y
419# CONFIG_PACKET_MMAP is not set
420CONFIG_UNIX=y
421CONFIG_XFRM=y
422# CONFIG_XFRM_USER is not set
423# CONFIG_XFRM_SUB_POLICY is not set
424# CONFIG_XFRM_MIGRATE is not set
425# CONFIG_XFRM_STATISTICS is not set
426# CONFIG_NET_KEY is not set
427CONFIG_INET=y
428# CONFIG_IP_MULTICAST is not set
429# CONFIG_IP_ADVANCED_ROUTER is not set
430CONFIG_IP_FIB_HASH=y
431CONFIG_IP_PNP=y
432CONFIG_IP_PNP_DHCP=y
433# CONFIG_IP_PNP_BOOTP is not set
434# CONFIG_IP_PNP_RARP is not set
435# CONFIG_NET_IPIP is not set
436# CONFIG_NET_IPGRE is not set
437# CONFIG_ARPD is not set
438CONFIG_SYN_COOKIES=y
439# CONFIG_INET_AH is not set
440# CONFIG_INET_ESP is not set
441# CONFIG_INET_IPCOMP is not set
442# CONFIG_INET_XFRM_TUNNEL is not set
443# CONFIG_INET_TUNNEL is not set
444CONFIG_INET_XFRM_MODE_TRANSPORT=y
445CONFIG_INET_XFRM_MODE_TUNNEL=y
446CONFIG_INET_XFRM_MODE_BEET=y
447# CONFIG_INET_LRO is not set
448CONFIG_INET_DIAG=y
449CONFIG_INET_TCP_DIAG=y
450# CONFIG_TCP_CONG_ADVANCED is not set
451CONFIG_TCP_CONG_CUBIC=y
452CONFIG_DEFAULT_TCP_CONG="cubic"
453# CONFIG_TCP_MD5SIG is not set
454# CONFIG_IPV6 is not set
455# CONFIG_NETWORK_SECMARK is not set
456# CONFIG_NETFILTER is not set
457# CONFIG_IP_DCCP is not set
458# CONFIG_IP_SCTP is not set
459# CONFIG_RDS is not set
460# CONFIG_TIPC is not set
461# CONFIG_ATM is not set
462# CONFIG_BRIDGE is not set
463# CONFIG_NET_DSA is not set
464# CONFIG_VLAN_8021Q is not set
465# CONFIG_DECNET is not set
466# CONFIG_LLC2 is not set
467# CONFIG_IPX is not set
468# CONFIG_ATALK is not set
469# CONFIG_X25 is not set
470# CONFIG_LAPB is not set
471# CONFIG_ECONET is not set
472# CONFIG_WAN_ROUTER is not set
473# CONFIG_PHONET is not set
474# CONFIG_IEEE802154 is not set
475# CONFIG_NET_SCHED is not set
476# CONFIG_DCB is not set
477
478#
479# Network testing
480#
481# CONFIG_NET_PKTGEN is not set
482# CONFIG_HAMRADIO is not set
483# CONFIG_CAN is not set
484# CONFIG_IRDA is not set
485CONFIG_BT=m
486CONFIG_BT_L2CAP=m
487# CONFIG_BT_SCO is not set
488CONFIG_BT_RFCOMM=m
489CONFIG_BT_RFCOMM_TTY=y
490CONFIG_BT_BNEP=m
491# CONFIG_BT_BNEP_MC_FILTER is not set
492# CONFIG_BT_BNEP_PROTO_FILTER is not set
493# CONFIG_BT_HIDP is not set
494
495#
496# Bluetooth device drivers
497#
498# CONFIG_BT_HCIBTUSB is not set
499# CONFIG_BT_HCIBTSDIO is not set
500CONFIG_BT_HCIUART=m
501CONFIG_BT_HCIUART_H4=y
502CONFIG_BT_HCIUART_BCSP=y
503# CONFIG_BT_HCIUART_LL is not set
504# CONFIG_BT_HCIBCM203X is not set
505# CONFIG_BT_HCIBPA10X is not set
506# CONFIG_BT_HCIBFUSB is not set
507# CONFIG_BT_HCIDTL1 is not set
508# CONFIG_BT_HCIBT3C is not set
509# CONFIG_BT_HCIBLUECARD is not set
510# CONFIG_BT_HCIBTUART is not set
511# CONFIG_BT_HCIVHCI is not set
512# CONFIG_BT_MRVL is not set
513# CONFIG_AF_RXRPC is not set
514CONFIG_WIRELESS=y
515CONFIG_WIRELESS_EXT=y
516CONFIG_WEXT_CORE=y
517CONFIG_WEXT_PROC=y
518CONFIG_WEXT_SPY=y
519CONFIG_WEXT_PRIV=y
520CONFIG_CFG80211=m
521# CONFIG_NL80211_TESTMODE is not set
522# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
523# CONFIG_CFG80211_REG_DEBUG is not set
524CONFIG_CFG80211_DEFAULT_PS=y
525# CONFIG_WIRELESS_OLD_REGULATORY is not set
526CONFIG_CFG80211_WEXT=y
527CONFIG_WIRELESS_EXT_SYSFS=y
528CONFIG_LIB80211=m
529# CONFIG_LIB80211_DEBUG is not set
530CONFIG_MAC80211=m
531CONFIG_MAC80211_RC_MINSTREL=y
532# CONFIG_MAC80211_RC_DEFAULT_PID is not set
533CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
534CONFIG_MAC80211_RC_DEFAULT="minstrel"
535# CONFIG_MAC80211_MESH is not set
536# CONFIG_MAC80211_LEDS is not set
537# CONFIG_MAC80211_DEBUG_MENU is not set
538# CONFIG_WIMAX is not set
539# CONFIG_RFKILL is not set
540# CONFIG_NET_9P is not set
541
542#
543# Device Drivers
544#
545
546#
547# Generic Driver Options
548#
549CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
550# CONFIG_DEVTMPFS is not set
551CONFIG_STANDALONE=y
552CONFIG_PREVENT_FIRMWARE_BUILD=y
553CONFIG_FW_LOADER=y
554CONFIG_FIRMWARE_IN_KERNEL=y
555CONFIG_EXTRA_FIRMWARE=""
556# CONFIG_DEBUG_DRIVER is not set
557# CONFIG_DEBUG_DEVRES is not set
558# CONFIG_SYS_HYPERVISOR is not set
559# CONFIG_CONNECTOR is not set
560CONFIG_MTD=y
561# CONFIG_MTD_DEBUG is not set
562# CONFIG_MTD_TESTS is not set
563# CONFIG_MTD_CONCAT is not set
564CONFIG_MTD_PARTITIONS=y
565CONFIG_MTD_REDBOOT_PARTS=y
566CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
567# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
568CONFIG_MTD_REDBOOT_PARTS_READONLY=y
569# CONFIG_MTD_CMDLINE_PARTS is not set
570# CONFIG_MTD_AFS_PARTS is not set
571# CONFIG_MTD_AR7_PARTS is not set
572
573#
574# User Modules And Translation Layers
575#
576CONFIG_MTD_CHAR=m
577CONFIG_MTD_BLKDEVS=y
578CONFIG_MTD_BLOCK=y
579# CONFIG_FTL is not set
580# CONFIG_NFTL is not set
581# CONFIG_INFTL is not set
582# CONFIG_RFD_FTL is not set
583# CONFIG_SSFDC is not set
584# CONFIG_MTD_OOPS is not set
585
586#
587# RAM/ROM/Flash chip drivers
588#
589CONFIG_MTD_CFI=y
590CONFIG_MTD_JEDECPROBE=y
591CONFIG_MTD_GEN_PROBE=y
592CONFIG_MTD_CFI_ADV_OPTIONS=y
593CONFIG_MTD_CFI_NOSWAP=y
594# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
595# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
596CONFIG_MTD_CFI_GEOMETRY=y
597CONFIG_MTD_MAP_BANK_WIDTH_1=y
598CONFIG_MTD_MAP_BANK_WIDTH_2=y
599# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
600# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
601# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
602# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
603CONFIG_MTD_CFI_I1=y
604# CONFIG_MTD_CFI_I2 is not set
605# CONFIG_MTD_CFI_I4 is not set
606# CONFIG_MTD_CFI_I8 is not set
607# CONFIG_MTD_OTP is not set
608CONFIG_MTD_CFI_INTELEXT=y
609CONFIG_MTD_CFI_AMDSTD=y
610# CONFIG_MTD_CFI_STAA is not set
611CONFIG_MTD_CFI_UTIL=y
612CONFIG_MTD_RAM=y
613# CONFIG_MTD_ROM is not set
614# CONFIG_MTD_ABSENT is not set
615# CONFIG_MTD_XIP is not set
616
617#
618# Mapping drivers for chip access
619#
620CONFIG_MTD_COMPLEX_MAPPINGS=y
621CONFIG_MTD_PHYSMAP=y
622# CONFIG_MTD_PHYSMAP_COMPAT is not set
623CONFIG_MTD_PXA2XX=y
624# CONFIG_MTD_ARM_INTEGRATOR is not set
625# CONFIG_MTD_IMPA7 is not set
626# CONFIG_MTD_GPIO_ADDR is not set
627# CONFIG_MTD_PLATRAM is not set
628
629#
630# Self-contained MTD device drivers
631#
632# CONFIG_MTD_DATAFLASH is not set
633# CONFIG_MTD_M25P80 is not set
634# CONFIG_MTD_SST25L is not set
635# CONFIG_MTD_SLRAM is not set
636# CONFIG_MTD_PHRAM is not set
637# CONFIG_MTD_MTDRAM is not set
638# CONFIG_MTD_BLOCK2MTD is not set
639
640#
641# Disk-On-Chip Device Drivers
642#
643# CONFIG_MTD_DOC2000 is not set
644# CONFIG_MTD_DOC2001 is not set
645# CONFIG_MTD_DOC2001PLUS is not set
646# CONFIG_MTD_NAND is not set
647# CONFIG_MTD_ONENAND is not set
648
649#
650# LPDDR flash memory drivers
651#
652# CONFIG_MTD_LPDDR is not set
653
654#
655# UBI - Unsorted block images
656#
657# CONFIG_MTD_UBI is not set
658# CONFIG_PARPORT is not set
659# CONFIG_PNP is not set
660CONFIG_BLK_DEV=y
661# CONFIG_BLK_DEV_COW_COMMON is not set
662CONFIG_BLK_DEV_LOOP=m
663# CONFIG_BLK_DEV_CRYPTOLOOP is not set
664
665#
666# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
667#
668# CONFIG_BLK_DEV_NBD is not set
669# CONFIG_BLK_DEV_UB is not set
670# CONFIG_BLK_DEV_RAM is not set
671# CONFIG_CDROM_PKTCDVD is not set
672# CONFIG_ATA_OVER_ETH is not set
673# CONFIG_MG_DISK is not set
674CONFIG_MISC_DEVICES=y
675# CONFIG_ICS932S401 is not set
676# CONFIG_ENCLOSURE_SERVICES is not set
677# CONFIG_ISL29003 is not set
678# CONFIG_DS1682 is not set
679# CONFIG_C2PORT is not set
680
681#
682# EEPROM support
683#
684CONFIG_EEPROM_AT24=m
685# CONFIG_EEPROM_AT25 is not set
686# CONFIG_EEPROM_LEGACY is not set
687# CONFIG_EEPROM_MAX6875 is not set
688# CONFIG_EEPROM_93CX6 is not set
689# CONFIG_IWMC3200TOP is not set
690CONFIG_HAVE_IDE=y
691# CONFIG_IDE is not set
692
693#
694# SCSI device support
695#
696# CONFIG_RAID_ATTRS is not set
697CONFIG_SCSI=m
698CONFIG_SCSI_DMA=y
699# CONFIG_SCSI_TGT is not set
700# CONFIG_SCSI_NETLINK is not set
701# CONFIG_SCSI_PROC_FS is not set
702
703#
704# SCSI support type (disk, tape, CD-ROM)
705#
706CONFIG_BLK_DEV_SD=m
707# CONFIG_CHR_DEV_ST is not set
708# CONFIG_CHR_DEV_OSST is not set
709# CONFIG_BLK_DEV_SR is not set
710# CONFIG_CHR_DEV_SG is not set
711# CONFIG_CHR_DEV_SCH is not set
712# CONFIG_SCSI_MULTI_LUN is not set
713# CONFIG_SCSI_CONSTANTS is not set
714# CONFIG_SCSI_LOGGING is not set
715# CONFIG_SCSI_SCAN_ASYNC is not set
716CONFIG_SCSI_WAIT_SCAN=m
717
718#
719# SCSI Transports
720#
721# CONFIG_SCSI_SPI_ATTRS is not set
722# CONFIG_SCSI_FC_ATTRS is not set
723# CONFIG_SCSI_ISCSI_ATTRS is not set
724# CONFIG_SCSI_SAS_LIBSAS is not set
725# CONFIG_SCSI_SRP_ATTRS is not set
726CONFIG_SCSI_LOWLEVEL=y
727# CONFIG_ISCSI_TCP is not set
728# CONFIG_SCSI_AHA152X is not set
729# CONFIG_SCSI_AIC7XXX_OLD is not set
730# CONFIG_SCSI_ADVANSYS is not set
731# CONFIG_SCSI_IN2000 is not set
732# CONFIG_LIBFC is not set
733# CONFIG_LIBFCOE is not set
734# CONFIG_SCSI_DTC3280 is not set
735# CONFIG_SCSI_FUTURE_DOMAIN is not set
736# CONFIG_SCSI_GENERIC_NCR5380 is not set
737# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
738# CONFIG_SCSI_NCR53C406A is not set
739# CONFIG_SCSI_PAS16 is not set
740# CONFIG_SCSI_QLOGIC_FAS is not set
741# CONFIG_SCSI_SYM53C416 is not set
742# CONFIG_SCSI_T128 is not set
743# CONFIG_SCSI_DEBUG is not set
744# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
745# CONFIG_SCSI_DH is not set
746# CONFIG_SCSI_OSD_INITIATOR is not set
747CONFIG_ATA=m
748# CONFIG_ATA_NONSTANDARD is not set
749CONFIG_ATA_VERBOSE_ERROR=y
750# CONFIG_SATA_PMP is not set
751CONFIG_ATA_SFF=y
752# CONFIG_SATA_MV is not set
753# CONFIG_PATA_LEGACY is not set
754CONFIG_PATA_PCMCIA=m
755# CONFIG_PATA_QDI is not set
756# CONFIG_PATA_WINBOND_VLB is not set
757# CONFIG_MD is not set
758CONFIG_NETDEVICES=y
759# CONFIG_DUMMY is not set
760# CONFIG_BONDING is not set
761# CONFIG_MACVLAN is not set
762# CONFIG_EQUALIZER is not set
763# CONFIG_TUN is not set
764# CONFIG_VETH is not set
765# CONFIG_ARCNET is not set
766# CONFIG_PHYLIB is not set
767CONFIG_NET_ETHERNET=y
768CONFIG_MII=y
769# CONFIG_AX88796 is not set
770# CONFIG_NET_VENDOR_3COM is not set
771# CONFIG_NET_VENDOR_SMC is not set
772# CONFIG_SMC91X is not set
773CONFIG_DM9000=y
774CONFIG_DM9000_DEBUGLEVEL=4
775# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
776# CONFIG_ENC28J60 is not set
777# CONFIG_ETHOC is not set
778# CONFIG_SMC911X is not set
779# CONFIG_SMSC911X is not set
780# CONFIG_NET_VENDOR_RACAL is not set
781# CONFIG_DNET is not set
782# CONFIG_AT1700 is not set
783# CONFIG_DEPCA is not set
784# CONFIG_HP100 is not set
785# CONFIG_NET_ISA is not set
786# CONFIG_IBM_NEW_EMAC_ZMII is not set
787# CONFIG_IBM_NEW_EMAC_RGMII is not set
788# CONFIG_IBM_NEW_EMAC_TAH is not set
789# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
790# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
791# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
792# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
793# CONFIG_NET_PCI is not set
794# CONFIG_B44 is not set
795# CONFIG_CS89x0 is not set
796# CONFIG_KS8842 is not set
797# CONFIG_KS8851 is not set
798# CONFIG_KS8851_MLL is not set
799# CONFIG_NETDEV_1000 is not set
800# CONFIG_NETDEV_10000 is not set
801# CONFIG_TR is not set
802CONFIG_WLAN=y
803# CONFIG_PCMCIA_RAYCS is not set
804# CONFIG_LIBERTAS_THINFIRM is not set
805# CONFIG_ATMEL is not set
806# CONFIG_AT76C50X_USB is not set
807# CONFIG_AIRO_CS is not set
808# CONFIG_PCMCIA_WL3501 is not set
809# CONFIG_USB_ZD1201 is not set
810# CONFIG_USB_NET_RNDIS_WLAN is not set
811# CONFIG_RTL8187 is not set
812# CONFIG_MAC80211_HWSIM is not set
813# CONFIG_ATH_COMMON is not set
814# CONFIG_B43 is not set
815# CONFIG_B43LEGACY is not set
816# CONFIG_HOSTAP is not set
817# CONFIG_IWM is not set
818# CONFIG_LIBERTAS is not set
819CONFIG_HERMES=m
820CONFIG_HERMES_CACHE_FW_ON_INIT=y
821CONFIG_PCMCIA_HERMES=m
822# CONFIG_PCMCIA_SPECTRUM is not set
823# CONFIG_P54_COMMON is not set
824CONFIG_RT2X00=m
825# CONFIG_RT2500USB is not set
826CONFIG_RT73USB=m
827# CONFIG_RT2800USB is not set
828CONFIG_RT2X00_LIB_USB=m
829CONFIG_RT2X00_LIB=m
830CONFIG_RT2X00_LIB_FIRMWARE=y
831CONFIG_RT2X00_LIB_CRYPTO=y
832CONFIG_RT2X00_LIB_LEDS=y
833# CONFIG_RT2X00_DEBUG is not set
834# CONFIG_WL12XX is not set
835# CONFIG_ZD1211RW is not set
836
837#
838# Enable WiMAX (Networking options) to see the WiMAX drivers
839#
840
841#
842# USB Network Adapters
843#
844# CONFIG_USB_CATC is not set
845# CONFIG_USB_KAWETH is not set
846# CONFIG_USB_PEGASUS is not set
847# CONFIG_USB_RTL8150 is not set
848# CONFIG_USB_USBNET is not set
849CONFIG_NET_PCMCIA=y
850# CONFIG_PCMCIA_3C589 is not set
851# CONFIG_PCMCIA_3C574 is not set
852# CONFIG_PCMCIA_FMVJ18X is not set
853# CONFIG_PCMCIA_PCNET is not set
854# CONFIG_PCMCIA_NMCLAN is not set
855# CONFIG_PCMCIA_SMC91C92 is not set
856# CONFIG_PCMCIA_XIRC2PS is not set
857# CONFIG_PCMCIA_AXNET is not set
858# CONFIG_WAN is not set
859CONFIG_PPP=m
860# CONFIG_PPP_MULTILINK is not set
861# CONFIG_PPP_FILTER is not set
862CONFIG_PPP_ASYNC=m
863# CONFIG_PPP_SYNC_TTY is not set
864CONFIG_PPP_DEFLATE=m
865CONFIG_PPP_BSDCOMP=m
866# CONFIG_PPP_MPPE is not set
867# CONFIG_PPPOE is not set
868# CONFIG_PPPOL2TP is not set
869# CONFIG_SLIP is not set
870CONFIG_SLHC=m
871# CONFIG_NETCONSOLE is not set
872# CONFIG_NETPOLL is not set
873# CONFIG_NET_POLL_CONTROLLER is not set
874# CONFIG_ISDN is not set
875# CONFIG_PHONE is not set
876
877#
878# Input device support
879#
880CONFIG_INPUT=y
881# CONFIG_INPUT_FF_MEMLESS is not set
882# CONFIG_INPUT_POLLDEV is not set
883
884#
885# Userland interfaces
886#
887CONFIG_INPUT_MOUSEDEV=y
888# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
889CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
890CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
891# CONFIG_INPUT_JOYDEV is not set
892CONFIG_INPUT_EVDEV=m
893# CONFIG_INPUT_EVBUG is not set
894
895#
896# Input Device Drivers
897#
898# CONFIG_INPUT_KEYBOARD is not set
899# CONFIG_INPUT_MOUSE is not set
900# CONFIG_INPUT_JOYSTICK is not set
901# CONFIG_INPUT_TABLET is not set
902CONFIG_INPUT_TOUCHSCREEN=y
903# CONFIG_TOUCHSCREEN_ADS7846 is not set
904# CONFIG_TOUCHSCREEN_AD7877 is not set
905# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
906# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
907# CONFIG_TOUCHSCREEN_AD7879 is not set
908# CONFIG_TOUCHSCREEN_EETI is not set
909CONFIG_TOUCHSCREEN_FUJITSU=m
910# CONFIG_TOUCHSCREEN_GUNZE is not set
911CONFIG_TOUCHSCREEN_ELO=m
912# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
913# CONFIG_TOUCHSCREEN_MCS5000 is not set
914CONFIG_TOUCHSCREEN_MTOUCH=m
915CONFIG_TOUCHSCREEN_INEXIO=m
916# CONFIG_TOUCHSCREEN_MK712 is not set
917CONFIG_TOUCHSCREEN_HTCPEN=m
918CONFIG_TOUCHSCREEN_PENMOUNT=m
919CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
920CONFIG_TOUCHSCREEN_TOUCHWIN=m
921# CONFIG_TOUCHSCREEN_WM97XX is not set
922# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
923CONFIG_TOUCHSCREEN_TOUCHIT213=m
924# CONFIG_TOUCHSCREEN_TSC2007 is not set
925# CONFIG_TOUCHSCREEN_W90X900 is not set
926CONFIG_INPUT_MISC=y
927# CONFIG_INPUT_ATI_REMOTE is not set
928# CONFIG_INPUT_ATI_REMOTE2 is not set
929# CONFIG_INPUT_KEYSPAN_REMOTE is not set
930# CONFIG_INPUT_POWERMATE is not set
931# CONFIG_INPUT_YEALINK is not set
932# CONFIG_INPUT_CM109 is not set
933CONFIG_INPUT_UINPUT=m
934# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
935
936#
937# Hardware I/O ports
938#
939CONFIG_SERIO=y
940CONFIG_SERIO_SERPORT=y
941# CONFIG_SERIO_RAW is not set
942# CONFIG_GAMEPORT is not set
943
944#
945# Character devices
946#
947CONFIG_VT=y
948CONFIG_CONSOLE_TRANSLATIONS=y
949CONFIG_VT_CONSOLE=y
950CONFIG_HW_CONSOLE=y
951# CONFIG_VT_HW_CONSOLE_BINDING is not set
952CONFIG_DEVKMEM=y
953# CONFIG_SERIAL_NONSTANDARD is not set
954
955#
956# Serial drivers
957#
958CONFIG_SERIAL_8250=y
959CONFIG_SERIAL_8250_CONSOLE=y
960# CONFIG_SERIAL_8250_CS is not set
961CONFIG_SERIAL_8250_NR_UARTS=7
962CONFIG_SERIAL_8250_RUNTIME_UARTS=7
963# CONFIG_SERIAL_8250_EXTENDED is not set
964
965#
966# Non-8250 serial port support
967#
968# CONFIG_SERIAL_MAX3100 is not set
969# CONFIG_SERIAL_PXA is not set
970CONFIG_SERIAL_CORE=y
971CONFIG_SERIAL_CORE_CONSOLE=y
972CONFIG_UNIX98_PTYS=y
973# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
974# CONFIG_LEGACY_PTYS is not set
975# CONFIG_IPMI_HANDLER is not set
976CONFIG_HW_RANDOM=m
977# CONFIG_HW_RANDOM_TIMERIOMEM is not set
978# CONFIG_DTLK is not set
979# CONFIG_R3964 is not set
980
981#
982# PCMCIA character devices
983#
984# CONFIG_SYNCLINK_CS is not set
985# CONFIG_CARDMAN_4000 is not set
986# CONFIG_CARDMAN_4040 is not set
987# CONFIG_IPWIRELESS is not set
988# CONFIG_RAW_DRIVER is not set
989# CONFIG_TCG_TPM is not set
990CONFIG_DEVPORT=y
991CONFIG_I2C=y
992CONFIG_I2C_BOARDINFO=y
993CONFIG_I2C_COMPAT=y
994CONFIG_I2C_CHARDEV=y
995# CONFIG_I2C_HELPER_AUTO is not set
996
997#
998# I2C Algorithms
999#
1000CONFIG_I2C_ALGOBIT=y
1001# CONFIG_I2C_ALGOPCF is not set
1002# CONFIG_I2C_ALGOPCA is not set
1003
1004#
1005# I2C Hardware Bus support
1006#
1007
1008#
1009# I2C system bus drivers (mostly embedded / system-on-chip)
1010#
1011# CONFIG_I2C_DESIGNWARE is not set
1012CONFIG_I2C_GPIO=y
1013# CONFIG_I2C_OCORES is not set
1014CONFIG_I2C_PXA=y
1015# CONFIG_I2C_PXA_SLAVE is not set
1016# CONFIG_I2C_SIMTEC is not set
1017
1018#
1019# External I2C/SMBus adapter drivers
1020#
1021# CONFIG_I2C_PARPORT_LIGHT is not set
1022# CONFIG_I2C_TAOS_EVM is not set
1023# CONFIG_I2C_TINY_USB is not set
1024
1025#
1026# Other I2C/SMBus bus drivers
1027#
1028# CONFIG_I2C_ELEKTOR is not set
1029# CONFIG_I2C_PCA_ISA is not set
1030# CONFIG_I2C_PCA_PLATFORM is not set
1031# CONFIG_I2C_STUB is not set
1032
1033#
1034# Miscellaneous I2C Chip support
1035#
1036# CONFIG_SENSORS_TSL2550 is not set
1037# CONFIG_I2C_DEBUG_CORE is not set
1038# CONFIG_I2C_DEBUG_ALGO is not set
1039# CONFIG_I2C_DEBUG_BUS is not set
1040# CONFIG_I2C_DEBUG_CHIP is not set
1041CONFIG_SPI=y
1042# CONFIG_SPI_DEBUG is not set
1043CONFIG_SPI_MASTER=y
1044
1045#
1046# SPI Master Controller Drivers
1047#
1048# CONFIG_SPI_BITBANG is not set
1049# CONFIG_SPI_GPIO is not set
1050CONFIG_SPI_PXA2XX=y
1051
1052#
1053# SPI Protocol Masters
1054#
1055# CONFIG_SPI_SPIDEV is not set
1056# CONFIG_SPI_TLE62X0 is not set
1057
1058#
1059# PPS support
1060#
1061# CONFIG_PPS is not set
1062CONFIG_ARCH_REQUIRE_GPIOLIB=y
1063CONFIG_GPIOLIB=y
1064# CONFIG_DEBUG_GPIO is not set
1065CONFIG_GPIO_SYSFS=y
1066
1067#
1068# Memory mapped GPIO expanders:
1069#
1070
1071#
1072# I2C GPIO expanders:
1073#
1074# CONFIG_GPIO_MAX732X is not set
1075CONFIG_GPIO_PCA953X=y
1076# CONFIG_GPIO_PCF857X is not set
1077
1078#
1079# PCI GPIO expanders:
1080#
1081
1082#
1083# SPI GPIO expanders:
1084#
1085# CONFIG_GPIO_MAX7301 is not set
1086# CONFIG_GPIO_MCP23S08 is not set
1087# CONFIG_GPIO_MC33880 is not set
1088
1089#
1090# AC97 GPIO expanders:
1091#
1092# CONFIG_W1 is not set
1093# CONFIG_POWER_SUPPLY is not set
1094CONFIG_HWMON=y
1095# CONFIG_HWMON_VID is not set
1096# CONFIG_HWMON_DEBUG_CHIP is not set
1097
1098#
1099# Native drivers
1100#
1101# CONFIG_SENSORS_AD7414 is not set
1102# CONFIG_SENSORS_AD7418 is not set
1103# CONFIG_SENSORS_ADCXX is not set
1104# CONFIG_SENSORS_ADM1021 is not set
1105# CONFIG_SENSORS_ADM1025 is not set
1106# CONFIG_SENSORS_ADM1026 is not set
1107# CONFIG_SENSORS_ADM1029 is not set
1108# CONFIG_SENSORS_ADM1031 is not set
1109# CONFIG_SENSORS_ADM9240 is not set
1110# CONFIG_SENSORS_ADT7462 is not set
1111# CONFIG_SENSORS_ADT7470 is not set
1112# CONFIG_SENSORS_ADT7473 is not set
1113# CONFIG_SENSORS_ADT7475 is not set
1114# CONFIG_SENSORS_ATXP1 is not set
1115# CONFIG_SENSORS_DS1621 is not set
1116# CONFIG_SENSORS_F71805F is not set
1117# CONFIG_SENSORS_F71882FG is not set
1118# CONFIG_SENSORS_F75375S is not set
1119# CONFIG_SENSORS_G760A is not set
1120# CONFIG_SENSORS_GL518SM is not set
1121# CONFIG_SENSORS_GL520SM is not set
1122# CONFIG_SENSORS_IT87 is not set
1123# CONFIG_SENSORS_LM63 is not set
1124# CONFIG_SENSORS_LM70 is not set
1125CONFIG_SENSORS_LM75=m
1126# CONFIG_SENSORS_LM77 is not set
1127# CONFIG_SENSORS_LM78 is not set
1128# CONFIG_SENSORS_LM80 is not set
1129# CONFIG_SENSORS_LM83 is not set
1130# CONFIG_SENSORS_LM85 is not set
1131# CONFIG_SENSORS_LM87 is not set
1132# CONFIG_SENSORS_LM90 is not set
1133# CONFIG_SENSORS_LM92 is not set
1134# CONFIG_SENSORS_LM93 is not set
1135# CONFIG_SENSORS_LTC4215 is not set
1136# CONFIG_SENSORS_LTC4245 is not set
1137# CONFIG_SENSORS_LM95241 is not set
1138# CONFIG_SENSORS_MAX1111 is not set
1139# CONFIG_SENSORS_MAX1619 is not set
1140# CONFIG_SENSORS_MAX6650 is not set
1141# CONFIG_SENSORS_PC87360 is not set
1142# CONFIG_SENSORS_PC87427 is not set
1143# CONFIG_SENSORS_PCF8591 is not set
1144# CONFIG_SENSORS_SHT15 is not set
1145# CONFIG_SENSORS_DME1737 is not set
1146# CONFIG_SENSORS_SMSC47M1 is not set
1147# CONFIG_SENSORS_SMSC47M192 is not set
1148# CONFIG_SENSORS_SMSC47B397 is not set
1149# CONFIG_SENSORS_ADS7828 is not set
1150# CONFIG_SENSORS_THMC50 is not set
1151# CONFIG_SENSORS_TMP401 is not set
1152# CONFIG_SENSORS_TMP421 is not set
1153# CONFIG_SENSORS_VT1211 is not set
1154# CONFIG_SENSORS_W83781D is not set
1155# CONFIG_SENSORS_W83791D is not set
1156# CONFIG_SENSORS_W83792D is not set
1157# CONFIG_SENSORS_W83793 is not set
1158# CONFIG_SENSORS_W83L785TS is not set
1159# CONFIG_SENSORS_W83L786NG is not set
1160# CONFIG_SENSORS_W83627HF is not set
1161# CONFIG_SENSORS_W83627EHF is not set
1162# CONFIG_SENSORS_LIS3_SPI is not set
1163# CONFIG_THERMAL is not set
1164CONFIG_WATCHDOG=y
1165# CONFIG_WATCHDOG_NOWAYOUT is not set
1166
1167#
1168# Watchdog Device Drivers
1169#
1170# CONFIG_SOFT_WATCHDOG is not set
1171# CONFIG_SA1100_WATCHDOG is not set
1172
1173#
1174# ISA-based Watchdog Cards
1175#
1176# CONFIG_PCWATCHDOG is not set
1177# CONFIG_MIXCOMWD is not set
1178# CONFIG_WDT is not set
1179
1180#
1181# USB-based Watchdog Cards
1182#
1183# CONFIG_USBPCWATCHDOG is not set
1184CONFIG_SSB_POSSIBLE=y
1185
1186#
1187# Sonics Silicon Backplane
1188#
1189# CONFIG_SSB is not set
1190
1191#
1192# Multifunction device drivers
1193#
1194# CONFIG_MFD_CORE is not set
1195# CONFIG_MFD_SM501 is not set
1196# CONFIG_MFD_ASIC3 is not set
1197# CONFIG_HTC_EGPIO is not set
1198# CONFIG_HTC_PASIC3 is not set
1199# CONFIG_UCB1400_CORE is not set
1200# CONFIG_TPS65010 is not set
1201# CONFIG_TWL4030_CORE is not set
1202# CONFIG_MFD_TMIO is not set
1203# CONFIG_MFD_T7L66XB is not set
1204# CONFIG_MFD_TC6387XB is not set
1205# CONFIG_MFD_TC6393XB is not set
1206# CONFIG_PMIC_DA903X is not set
1207# CONFIG_MFD_WM8400 is not set
1208# CONFIG_MFD_WM831X is not set
1209# CONFIG_MFD_WM8350_I2C is not set
1210# CONFIG_MFD_PCF50633 is not set
1211# CONFIG_MFD_MC13783 is not set
1212# CONFIG_AB3100_CORE is not set
1213# CONFIG_EZX_PCAP is not set
1214# CONFIG_REGULATOR is not set
1215# CONFIG_MEDIA_SUPPORT is not set
1216
1217#
1218# Graphics support
1219#
1220# CONFIG_VGASTATE is not set
1221# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1222CONFIG_FB=y
1223# CONFIG_FIRMWARE_EDID is not set
1224# CONFIG_FB_DDC is not set
1225# CONFIG_FB_BOOT_VESA_SUPPORT is not set
1226CONFIG_FB_CFB_FILLRECT=m
1227CONFIG_FB_CFB_COPYAREA=m
1228CONFIG_FB_CFB_IMAGEBLIT=m
1229# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1230# CONFIG_FB_SYS_FILLRECT is not set
1231# CONFIG_FB_SYS_COPYAREA is not set
1232# CONFIG_FB_SYS_IMAGEBLIT is not set
1233# CONFIG_FB_FOREIGN_ENDIAN is not set
1234# CONFIG_FB_SYS_FOPS is not set
1235# CONFIG_FB_SVGALIB is not set
1236# CONFIG_FB_MACMODES is not set
1237# CONFIG_FB_BACKLIGHT is not set
1238# CONFIG_FB_MODE_HELPERS is not set
1239# CONFIG_FB_TILEBLITTING is not set
1240
1241#
1242# Frame buffer hardware drivers
1243#
1244# CONFIG_FB_S1D13XXX is not set
1245CONFIG_FB_PXA=m
1246# CONFIG_FB_PXA_OVERLAY is not set
1247# CONFIG_FB_PXA_SMARTPANEL is not set
1248CONFIG_FB_PXA_PARAMETERS=y
1249# CONFIG_FB_MBX is not set
1250# CONFIG_FB_W100 is not set
1251# CONFIG_FB_VIRTUAL is not set
1252# CONFIG_FB_METRONOME is not set
1253# CONFIG_FB_MB862XX is not set
1254# CONFIG_FB_BROADSHEET is not set
1255CONFIG_BACKLIGHT_LCD_SUPPORT=y
1256CONFIG_LCD_CLASS_DEVICE=m
1257# CONFIG_LCD_LMS283GF05 is not set
1258# CONFIG_LCD_LTV350QV is not set
1259# CONFIG_LCD_ILI9320 is not set
1260# CONFIG_LCD_TDO24M is not set
1261# CONFIG_LCD_VGG2432A4 is not set
1262# CONFIG_LCD_PLATFORM is not set
1263CONFIG_BACKLIGHT_CLASS_DEVICE=m
1264CONFIG_BACKLIGHT_GENERIC=m
1265
1266#
1267# Display device support
1268#
1269# CONFIG_DISPLAY_SUPPORT is not set
1270
1271#
1272# Console display driver support
1273#
1274# CONFIG_VGA_CONSOLE is not set
1275# CONFIG_MDA_CONSOLE is not set
1276CONFIG_DUMMY_CONSOLE=y
1277CONFIG_FRAMEBUFFER_CONSOLE=m
1278# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1279# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1280# CONFIG_FONTS is not set
1281CONFIG_FONT_8x8=y
1282CONFIG_FONT_8x16=y
1283CONFIG_LOGO=y
1284CONFIG_LOGO_LINUX_MONO=y
1285CONFIG_LOGO_LINUX_VGA16=y
1286CONFIG_LOGO_LINUX_CLUT224=y
1287CONFIG_SOUND=m
1288CONFIG_SOUND_OSS_CORE=y
1289CONFIG_SOUND_OSS_CORE_PRECLAIM=y
1290CONFIG_SND=m
1291CONFIG_SND_TIMER=m
1292CONFIG_SND_PCM=m
1293CONFIG_SND_JACK=y
1294# CONFIG_SND_SEQUENCER is not set
1295CONFIG_SND_OSSEMUL=y
1296CONFIG_SND_MIXER_OSS=m
1297CONFIG_SND_PCM_OSS=m
1298CONFIG_SND_PCM_OSS_PLUGINS=y
1299# CONFIG_SND_DYNAMIC_MINORS is not set
1300# CONFIG_SND_SUPPORT_OLD_API is not set
1301CONFIG_SND_VERBOSE_PROCFS=y
1302# CONFIG_SND_VERBOSE_PRINTK is not set
1303# CONFIG_SND_DEBUG is not set
1304CONFIG_SND_VMASTER=y
1305# CONFIG_SND_RAWMIDI_SEQ is not set
1306# CONFIG_SND_OPL3_LIB_SEQ is not set
1307# CONFIG_SND_OPL4_LIB_SEQ is not set
1308# CONFIG_SND_SBAWE_SEQ is not set
1309# CONFIG_SND_EMU10K1_SEQ is not set
1310CONFIG_SND_AC97_CODEC=m
1311CONFIG_SND_DRIVERS=y
1312# CONFIG_SND_DUMMY is not set
1313# CONFIG_SND_MTPAV is not set
1314# CONFIG_SND_SERIAL_U16550 is not set
1315# CONFIG_SND_MPU401 is not set
1316# CONFIG_SND_AC97_POWER_SAVE is not set
1317CONFIG_SND_ARM=y
1318CONFIG_SND_PXA2XX_PCM=m
1319CONFIG_SND_PXA2XX_LIB=m
1320CONFIG_SND_PXA2XX_LIB_AC97=y
1321CONFIG_SND_PXA2XX_AC97=m
1322# CONFIG_SND_SPI is not set
1323CONFIG_SND_USB=y
1324# CONFIG_SND_USB_AUDIO is not set
1325# CONFIG_SND_USB_CAIAQ is not set
1326# CONFIG_SND_PCMCIA is not set
1327CONFIG_SND_SOC=m
1328CONFIG_SND_PXA2XX_SOC=m
1329CONFIG_SND_SOC_I2C_AND_SPI=m
1330# CONFIG_SND_SOC_ALL_CODECS is not set
1331# CONFIG_SOUND_PRIME is not set
1332CONFIG_AC97_BUS=m
1333# CONFIG_HID_SUPPORT is not set
1334CONFIG_USB_SUPPORT=y
1335CONFIG_USB_ARCH_HAS_HCD=y
1336CONFIG_USB_ARCH_HAS_OHCI=y
1337# CONFIG_USB_ARCH_HAS_EHCI is not set
1338CONFIG_USB=m
1339# CONFIG_USB_DEBUG is not set
1340# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
1341
1342#
1343# Miscellaneous USB options
1344#
1345CONFIG_USB_DEVICEFS=y
1346CONFIG_USB_DEVICE_CLASS=y
1347# CONFIG_USB_DYNAMIC_MINORS is not set
1348CONFIG_USB_SUSPEND=y
1349# CONFIG_USB_OTG is not set
1350# CONFIG_USB_MON is not set
1351# CONFIG_USB_WUSB is not set
1352# CONFIG_USB_WUSB_CBAF is not set
1353
1354#
1355# USB Host Controller Drivers
1356#
1357# CONFIG_USB_C67X00_HCD is not set
1358# CONFIG_USB_OXU210HP_HCD is not set
1359# CONFIG_USB_ISP116X_HCD is not set
1360# CONFIG_USB_ISP1760_HCD is not set
1361# CONFIG_USB_ISP1362_HCD is not set
1362CONFIG_USB_OHCI_HCD=m
1363# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1364# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1365CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1366# CONFIG_USB_SL811_HCD is not set
1367# CONFIG_USB_R8A66597_HCD is not set
1368# CONFIG_USB_HWA_HCD is not set
1369# CONFIG_USB_MUSB_HDRC is not set
1370# CONFIG_USB_GADGET_MUSB_HDRC is not set
1371
1372#
1373# USB Device Class drivers
1374#
1375CONFIG_USB_ACM=m
1376# CONFIG_USB_PRINTER is not set
1377# CONFIG_USB_WDM is not set
1378# CONFIG_USB_TMC is not set
1379
1380#
1381# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1382#
1383
1384#
1385# also be needed; see USB_STORAGE Help for more info
1386#
1387CONFIG_USB_STORAGE=m
1388# CONFIG_USB_STORAGE_DEBUG is not set
1389# CONFIG_USB_STORAGE_DATAFAB is not set
1390# CONFIG_USB_STORAGE_FREECOM is not set
1391# CONFIG_USB_STORAGE_ISD200 is not set
1392# CONFIG_USB_STORAGE_USBAT is not set
1393# CONFIG_USB_STORAGE_SDDR09 is not set
1394# CONFIG_USB_STORAGE_SDDR55 is not set
1395# CONFIG_USB_STORAGE_JUMPSHOT is not set
1396# CONFIG_USB_STORAGE_ALAUDA is not set
1397# CONFIG_USB_STORAGE_ONETOUCH is not set
1398# CONFIG_USB_STORAGE_KARMA is not set
1399# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1400# CONFIG_USB_LIBUSUAL is not set
1401
1402#
1403# USB Imaging devices
1404#
1405# CONFIG_USB_MDC800 is not set
1406# CONFIG_USB_MICROTEK is not set
1407
1408#
1409# USB port drivers
1410#
1411CONFIG_USB_SERIAL=m
1412# CONFIG_USB_EZUSB is not set
1413CONFIG_USB_SERIAL_GENERIC=y
1414# CONFIG_USB_SERIAL_AIRCABLE is not set
1415# CONFIG_USB_SERIAL_ARK3116 is not set
1416# CONFIG_USB_SERIAL_BELKIN is not set
1417# CONFIG_USB_SERIAL_CH341 is not set
1418# CONFIG_USB_SERIAL_WHITEHEAT is not set
1419# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
1420# CONFIG_USB_SERIAL_CP210X is not set
1421# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
1422# CONFIG_USB_SERIAL_EMPEG is not set
1423# CONFIG_USB_SERIAL_FTDI_SIO is not set
1424# CONFIG_USB_SERIAL_FUNSOFT is not set
1425# CONFIG_USB_SERIAL_VISOR is not set
1426# CONFIG_USB_SERIAL_IPAQ is not set
1427# CONFIG_USB_SERIAL_IR is not set
1428# CONFIG_USB_SERIAL_EDGEPORT is not set
1429# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
1430# CONFIG_USB_SERIAL_GARMIN is not set
1431# CONFIG_USB_SERIAL_IPW is not set
1432# CONFIG_USB_SERIAL_IUU is not set
1433# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
1434# CONFIG_USB_SERIAL_KEYSPAN is not set
1435# CONFIG_USB_SERIAL_KLSI is not set
1436# CONFIG_USB_SERIAL_KOBIL_SCT is not set
1437CONFIG_USB_SERIAL_MCT_U232=m
1438# CONFIG_USB_SERIAL_MOS7720 is not set
1439# CONFIG_USB_SERIAL_MOS7840 is not set
1440# CONFIG_USB_SERIAL_MOTOROLA is not set
1441# CONFIG_USB_SERIAL_NAVMAN is not set
1442# CONFIG_USB_SERIAL_PL2303 is not set
1443# CONFIG_USB_SERIAL_OTI6858 is not set
1444# CONFIG_USB_SERIAL_QUALCOMM is not set
1445# CONFIG_USB_SERIAL_SPCP8X5 is not set
1446# CONFIG_USB_SERIAL_HP4X is not set
1447# CONFIG_USB_SERIAL_SAFE is not set
1448# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
1449# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
1450# CONFIG_USB_SERIAL_SYMBOL is not set
1451# CONFIG_USB_SERIAL_TI is not set
1452# CONFIG_USB_SERIAL_CYBERJACK is not set
1453# CONFIG_USB_SERIAL_XIRCOM is not set
1454# CONFIG_USB_SERIAL_OPTION is not set
1455# CONFIG_USB_SERIAL_OMNINET is not set
1456# CONFIG_USB_SERIAL_OPTICON is not set
1457# CONFIG_USB_SERIAL_DEBUG is not set
1458
1459#
1460# USB Miscellaneous drivers
1461#
1462# CONFIG_USB_EMI62 is not set
1463# CONFIG_USB_EMI26 is not set
1464# CONFIG_USB_ADUTUX is not set
1465# CONFIG_USB_SEVSEG is not set
1466# CONFIG_USB_RIO500 is not set
1467# CONFIG_USB_LEGOTOWER is not set
1468# CONFIG_USB_LCD is not set
1469# CONFIG_USB_BERRY_CHARGE is not set
1470# CONFIG_USB_LED is not set
1471# CONFIG_USB_CYPRESS_CY7C63 is not set
1472# CONFIG_USB_CYTHERM is not set
1473# CONFIG_USB_IDMOUSE is not set
1474# CONFIG_USB_FTDI_ELAN is not set
1475# CONFIG_USB_APPLEDISPLAY is not set
1476# CONFIG_USB_LD is not set
1477# CONFIG_USB_TRANCEVIBRATOR is not set
1478# CONFIG_USB_IOWARRIOR is not set
1479# CONFIG_USB_TEST is not set
1480# CONFIG_USB_ISIGHTFW is not set
1481# CONFIG_USB_VST is not set
1482CONFIG_USB_GADGET=m
1483# CONFIG_USB_GADGET_DEBUG is not set
1484# CONFIG_USB_GADGET_DEBUG_FILES is not set
1485CONFIG_USB_GADGET_VBUS_DRAW=2
1486CONFIG_USB_GADGET_SELECTED=y
1487# CONFIG_USB_GADGET_AT91 is not set
1488# CONFIG_USB_GADGET_ATMEL_USBA is not set
1489# CONFIG_USB_GADGET_FSL_USB2 is not set
1490# CONFIG_USB_GADGET_LH7A40X is not set
1491# CONFIG_USB_GADGET_OMAP is not set
1492# CONFIG_USB_GADGET_PXA25X is not set
1493# CONFIG_USB_GADGET_R8A66597 is not set
1494CONFIG_USB_GADGET_PXA27X=y
1495CONFIG_USB_PXA27X=m
1496# CONFIG_USB_GADGET_S3C_HSOTG is not set
1497# CONFIG_USB_GADGET_IMX is not set
1498# CONFIG_USB_GADGET_S3C2410 is not set
1499# CONFIG_USB_GADGET_M66592 is not set
1500# CONFIG_USB_GADGET_AMD5536UDC is not set
1501# CONFIG_USB_GADGET_FSL_QE is not set
1502# CONFIG_USB_GADGET_CI13XXX is not set
1503# CONFIG_USB_GADGET_NET2280 is not set
1504# CONFIG_USB_GADGET_GOKU is not set
1505# CONFIG_USB_GADGET_LANGWELL is not set
1506# CONFIG_USB_GADGET_DUMMY_HCD is not set
1507# CONFIG_USB_GADGET_DUALSPEED is not set
1508# CONFIG_USB_ZERO is not set
1509# CONFIG_USB_AUDIO is not set
1510CONFIG_USB_ETH=m
1511CONFIG_USB_ETH_RNDIS=y
1512# CONFIG_USB_ETH_EEM is not set
1513CONFIG_USB_GADGETFS=m
1514CONFIG_USB_FILE_STORAGE=m
1515# CONFIG_USB_FILE_STORAGE_TEST is not set
1516CONFIG_USB_G_SERIAL=m
1517# CONFIG_USB_MIDI_GADGET is not set
1518CONFIG_USB_G_PRINTER=m
1519# CONFIG_USB_CDC_COMPOSITE is not set
1520
1521#
1522# OTG and related infrastructure
1523#
1524CONFIG_USB_OTG_UTILS=y
1525# CONFIG_USB_GPIO_VBUS is not set
1526# CONFIG_NOP_USB_XCEIV is not set
1527CONFIG_MMC=y
1528# CONFIG_MMC_DEBUG is not set
1529# CONFIG_MMC_UNSAFE_RESUME is not set
1530
1531#
1532# MMC/SD/SDIO Card Drivers
1533#
1534CONFIG_MMC_BLOCK=y
1535# CONFIG_MMC_BLOCK_BOUNCE is not set
1536# CONFIG_SDIO_UART is not set
1537# CONFIG_MMC_TEST is not set
1538
1539#
1540# MMC/SD/SDIO Host Controller Drivers
1541#
1542CONFIG_MMC_PXA=y
1543# CONFIG_MMC_SDHCI is not set
1544# CONFIG_MMC_AT91 is not set
1545# CONFIG_MMC_ATMELMCI is not set
1546# CONFIG_MMC_SPI is not set
1547# CONFIG_MEMSTICK is not set
1548CONFIG_NEW_LEDS=y
1549CONFIG_LEDS_CLASS=m
1550
1551#
1552# LED drivers
1553#
1554# CONFIG_LEDS_PCA9532 is not set
1555CONFIG_LEDS_GPIO=m
1556CONFIG_LEDS_GPIO_PLATFORM=y
1557# CONFIG_LEDS_LP3944 is not set
1558# CONFIG_LEDS_PCA955X is not set
1559# CONFIG_LEDS_DAC124S085 is not set
1560# CONFIG_LEDS_BD2802 is not set
1561
1562#
1563# LED Triggers
1564#
1565CONFIG_LEDS_TRIGGERS=y
1566CONFIG_LEDS_TRIGGER_TIMER=m
1567CONFIG_LEDS_TRIGGER_HEARTBEAT=m
1568CONFIG_LEDS_TRIGGER_BACKLIGHT=m
1569CONFIG_LEDS_TRIGGER_GPIO=m
1570CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
1571
1572#
1573# iptables trigger is under Netfilter config (LED target)
1574#
1575# CONFIG_ACCESSIBILITY is not set
1576CONFIG_RTC_LIB=y
1577CONFIG_RTC_CLASS=m
1578
1579#
1580# RTC interfaces
1581#
1582CONFIG_RTC_INTF_SYSFS=y
1583CONFIG_RTC_INTF_PROC=y
1584CONFIG_RTC_INTF_DEV=y
1585# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1586# CONFIG_RTC_DRV_TEST is not set
1587
1588#
1589# I2C RTC drivers
1590#
1591# CONFIG_RTC_DRV_DS1307 is not set
1592# CONFIG_RTC_DRV_DS1374 is not set
1593# CONFIG_RTC_DRV_DS1672 is not set
1594# CONFIG_RTC_DRV_MAX6900 is not set
1595# CONFIG_RTC_DRV_RS5C372 is not set
1596CONFIG_RTC_DRV_ISL1208=m
1597# CONFIG_RTC_DRV_X1205 is not set
1598# CONFIG_RTC_DRV_PCF8563 is not set
1599# CONFIG_RTC_DRV_PCF8583 is not set
1600# CONFIG_RTC_DRV_M41T80 is not set
1601# CONFIG_RTC_DRV_S35390A is not set
1602# CONFIG_RTC_DRV_FM3130 is not set
1603# CONFIG_RTC_DRV_RX8581 is not set
1604# CONFIG_RTC_DRV_RX8025 is not set
1605
1606#
1607# SPI RTC drivers
1608#
1609# CONFIG_RTC_DRV_M41T94 is not set
1610# CONFIG_RTC_DRV_DS1305 is not set
1611# CONFIG_RTC_DRV_DS1390 is not set
1612# CONFIG_RTC_DRV_MAX6902 is not set
1613# CONFIG_RTC_DRV_R9701 is not set
1614# CONFIG_RTC_DRV_RS5C348 is not set
1615# CONFIG_RTC_DRV_DS3234 is not set
1616# CONFIG_RTC_DRV_PCF2123 is not set
1617
1618#
1619# Platform RTC drivers
1620#
1621# CONFIG_RTC_DRV_CMOS is not set
1622# CONFIG_RTC_DRV_DS1286 is not set
1623# CONFIG_RTC_DRV_DS1511 is not set
1624# CONFIG_RTC_DRV_DS1553 is not set
1625# CONFIG_RTC_DRV_DS1742 is not set
1626# CONFIG_RTC_DRV_STK17TA8 is not set
1627# CONFIG_RTC_DRV_M48T86 is not set
1628# CONFIG_RTC_DRV_M48T35 is not set
1629# CONFIG_RTC_DRV_M48T59 is not set
1630# CONFIG_RTC_DRV_MSM6242 is not set
1631# CONFIG_RTC_DRV_BQ4802 is not set
1632# CONFIG_RTC_DRV_RP5C01 is not set
1633# CONFIG_RTC_DRV_V3020 is not set
1634
1635#
1636# on-CPU RTC drivers
1637#
1638# CONFIG_RTC_DRV_SA1100 is not set
1639CONFIG_RTC_DRV_PXA=m
1640# CONFIG_DMADEVICES is not set
1641# CONFIG_AUXDISPLAY is not set
1642# CONFIG_UIO is not set
1643
1644#
1645# TI VLYNQ
1646#
1647# CONFIG_STAGING is not set
1648
1649#
1650# File systems
1651#
1652CONFIG_EXT2_FS=y
1653# CONFIG_EXT2_FS_XATTR is not set
1654# CONFIG_EXT2_FS_XIP is not set
1655CONFIG_EXT3_FS=y
1656# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
1657# CONFIG_EXT3_FS_XATTR is not set
1658# CONFIG_EXT4_FS is not set
1659CONFIG_JBD=y
1660# CONFIG_REISERFS_FS is not set
1661# CONFIG_JFS_FS is not set
1662# CONFIG_FS_POSIX_ACL is not set
1663# CONFIG_XFS_FS is not set
1664# CONFIG_GFS2_FS is not set
1665# CONFIG_OCFS2_FS is not set
1666# CONFIG_BTRFS_FS is not set
1667# CONFIG_NILFS2_FS is not set
1668CONFIG_FILE_LOCKING=y
1669CONFIG_FSNOTIFY=y
1670# CONFIG_DNOTIFY is not set
1671CONFIG_INOTIFY=y
1672CONFIG_INOTIFY_USER=y
1673# CONFIG_QUOTA is not set
1674# CONFIG_AUTOFS_FS is not set
1675# CONFIG_AUTOFS4_FS is not set
1676# CONFIG_FUSE_FS is not set
1677
1678#
1679# Caches
1680#
1681# CONFIG_FSCACHE is not set
1682
1683#
1684# CD-ROM/DVD Filesystems
1685#
1686# CONFIG_ISO9660_FS is not set
1687# CONFIG_UDF_FS is not set
1688
1689#
1690# DOS/FAT/NT Filesystems
1691#
1692CONFIG_FAT_FS=m
1693# CONFIG_MSDOS_FS is not set
1694CONFIG_VFAT_FS=m
1695CONFIG_FAT_DEFAULT_CODEPAGE=437
1696CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1697# CONFIG_NTFS_FS is not set
1698
1699#
1700# Pseudo filesystems
1701#
1702CONFIG_PROC_FS=y
1703CONFIG_PROC_SYSCTL=y
1704CONFIG_PROC_PAGE_MONITOR=y
1705CONFIG_SYSFS=y
1706CONFIG_TMPFS=y
1707# CONFIG_TMPFS_POSIX_ACL is not set
1708# CONFIG_HUGETLB_PAGE is not set
1709# CONFIG_CONFIGFS_FS is not set
1710CONFIG_MISC_FILESYSTEMS=y
1711# CONFIG_ADFS_FS is not set
1712# CONFIG_AFFS_FS is not set
1713# CONFIG_HFS_FS is not set
1714# CONFIG_HFSPLUS_FS is not set
1715# CONFIG_BEFS_FS is not set
1716# CONFIG_BFS_FS is not set
1717# CONFIG_EFS_FS is not set
1718CONFIG_JFFS2_FS=y
1719CONFIG_JFFS2_FS_DEBUG=0
1720CONFIG_JFFS2_FS_WRITEBUFFER=y
1721# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1722# CONFIG_JFFS2_SUMMARY is not set
1723# CONFIG_JFFS2_FS_XATTR is not set
1724# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1725CONFIG_JFFS2_ZLIB=y
1726# CONFIG_JFFS2_LZO is not set
1727CONFIG_JFFS2_RTIME=y
1728# CONFIG_JFFS2_RUBIN is not set
1729# CONFIG_CRAMFS is not set
1730# CONFIG_SQUASHFS is not set
1731# CONFIG_VXFS_FS is not set
1732# CONFIG_MINIX_FS is not set
1733# CONFIG_OMFS_FS is not set
1734# CONFIG_HPFS_FS is not set
1735# CONFIG_QNX4FS_FS is not set
1736# CONFIG_ROMFS_FS is not set
1737# CONFIG_SYSV_FS is not set
1738# CONFIG_UFS_FS is not set
1739CONFIG_NETWORK_FILESYSTEMS=y
1740CONFIG_NFS_FS=y
1741CONFIG_NFS_V3=y
1742# CONFIG_NFS_V3_ACL is not set
1743# CONFIG_NFS_V4 is not set
1744CONFIG_ROOT_NFS=y
1745CONFIG_NFSD=m
1746CONFIG_NFSD_V3=y
1747# CONFIG_NFSD_V3_ACL is not set
1748# CONFIG_NFSD_V4 is not set
1749CONFIG_LOCKD=y
1750CONFIG_LOCKD_V4=y
1751CONFIG_EXPORTFS=m
1752CONFIG_NFS_COMMON=y
1753CONFIG_SUNRPC=y
1754# CONFIG_RPCSEC_GSS_KRB5 is not set
1755# CONFIG_RPCSEC_GSS_SPKM3 is not set
1756# CONFIG_SMB_FS is not set
1757# CONFIG_CIFS is not set
1758# CONFIG_NCP_FS is not set
1759# CONFIG_CODA_FS is not set
1760# CONFIG_AFS_FS is not set
1761
1762#
1763# Partition Types
1764#
1765CONFIG_PARTITION_ADVANCED=y
1766# CONFIG_ACORN_PARTITION is not set
1767# CONFIG_OSF_PARTITION is not set
1768# CONFIG_AMIGA_PARTITION is not set
1769# CONFIG_ATARI_PARTITION is not set
1770# CONFIG_MAC_PARTITION is not set
1771CONFIG_MSDOS_PARTITION=y
1772# CONFIG_BSD_DISKLABEL is not set
1773# CONFIG_MINIX_SUBPARTITION is not set
1774# CONFIG_SOLARIS_X86_PARTITION is not set
1775# CONFIG_UNIXWARE_DISKLABEL is not set
1776# CONFIG_LDM_PARTITION is not set
1777# CONFIG_SGI_PARTITION is not set
1778# CONFIG_ULTRIX_PARTITION is not set
1779# CONFIG_SUN_PARTITION is not set
1780# CONFIG_KARMA_PARTITION is not set
1781# CONFIG_EFI_PARTITION is not set
1782# CONFIG_SYSV68_PARTITION is not set
1783CONFIG_NLS=m
1784CONFIG_NLS_DEFAULT="iso8859-1"
1785CONFIG_NLS_CODEPAGE_437=m
1786# CONFIG_NLS_CODEPAGE_737 is not set
1787# CONFIG_NLS_CODEPAGE_775 is not set
1788CONFIG_NLS_CODEPAGE_850=m
1789# CONFIG_NLS_CODEPAGE_852 is not set
1790# CONFIG_NLS_CODEPAGE_855 is not set
1791# CONFIG_NLS_CODEPAGE_857 is not set
1792# CONFIG_NLS_CODEPAGE_860 is not set
1793# CONFIG_NLS_CODEPAGE_861 is not set
1794# CONFIG_NLS_CODEPAGE_862 is not set
1795# CONFIG_NLS_CODEPAGE_863 is not set
1796# CONFIG_NLS_CODEPAGE_864 is not set
1797# CONFIG_NLS_CODEPAGE_865 is not set
1798# CONFIG_NLS_CODEPAGE_866 is not set
1799# CONFIG_NLS_CODEPAGE_869 is not set
1800# CONFIG_NLS_CODEPAGE_936 is not set
1801# CONFIG_NLS_CODEPAGE_950 is not set
1802# CONFIG_NLS_CODEPAGE_932 is not set
1803# CONFIG_NLS_CODEPAGE_949 is not set
1804# CONFIG_NLS_CODEPAGE_874 is not set
1805# CONFIG_NLS_ISO8859_8 is not set
1806# CONFIG_NLS_CODEPAGE_1250 is not set
1807# CONFIG_NLS_CODEPAGE_1251 is not set
1808# CONFIG_NLS_ASCII is not set
1809CONFIG_NLS_ISO8859_1=m
1810# CONFIG_NLS_ISO8859_2 is not set
1811# CONFIG_NLS_ISO8859_3 is not set
1812# CONFIG_NLS_ISO8859_4 is not set
1813# CONFIG_NLS_ISO8859_5 is not set
1814# CONFIG_NLS_ISO8859_6 is not set
1815# CONFIG_NLS_ISO8859_7 is not set
1816# CONFIG_NLS_ISO8859_9 is not set
1817# CONFIG_NLS_ISO8859_13 is not set
1818# CONFIG_NLS_ISO8859_14 is not set
1819CONFIG_NLS_ISO8859_15=m
1820# CONFIG_NLS_KOI8_R is not set
1821# CONFIG_NLS_KOI8_U is not set
1822CONFIG_NLS_UTF8=m
1823# CONFIG_DLM is not set
1824
1825#
1826# Kernel hacking
1827#
1828# CONFIG_PRINTK_TIME is not set
1829CONFIG_ENABLE_WARN_DEPRECATED=y
1830CONFIG_ENABLE_MUST_CHECK=y
1831CONFIG_FRAME_WARN=1024
1832CONFIG_MAGIC_SYSRQ=y
1833# CONFIG_STRIP_ASM_SYMS is not set
1834# CONFIG_UNUSED_SYMBOLS is not set
1835# CONFIG_DEBUG_FS is not set
1836# CONFIG_HEADERS_CHECK is not set
1837CONFIG_DEBUG_KERNEL=y
1838# CONFIG_DEBUG_SHIRQ is not set
1839CONFIG_DETECT_SOFTLOCKUP=y
1840# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1841CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1842CONFIG_DETECT_HUNG_TASK=y
1843# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1844CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1845CONFIG_SCHED_DEBUG=y
1846# CONFIG_SCHEDSTATS is not set
1847# CONFIG_TIMER_STATS is not set
1848# CONFIG_DEBUG_OBJECTS is not set
1849# CONFIG_SLUB_DEBUG_ON is not set
1850# CONFIG_SLUB_STATS is not set
1851# CONFIG_DEBUG_KMEMLEAK is not set
1852# CONFIG_DEBUG_RT_MUTEXES is not set
1853# CONFIG_RT_MUTEX_TESTER is not set
1854# CONFIG_DEBUG_SPINLOCK is not set
1855CONFIG_DEBUG_MUTEXES=y
1856# CONFIG_DEBUG_LOCK_ALLOC is not set
1857# CONFIG_PROVE_LOCKING is not set
1858# CONFIG_LOCK_STAT is not set
1859# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1860# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1861# CONFIG_DEBUG_KOBJECT is not set
1862CONFIG_DEBUG_BUGVERBOSE=y
1863# CONFIG_DEBUG_INFO is not set
1864# CONFIG_DEBUG_VM is not set
1865# CONFIG_DEBUG_WRITECOUNT is not set
1866CONFIG_DEBUG_MEMORY_INIT=y
1867# CONFIG_DEBUG_LIST is not set
1868# CONFIG_DEBUG_SG is not set
1869# CONFIG_DEBUG_NOTIFIERS is not set
1870# CONFIG_DEBUG_CREDENTIALS is not set
1871# CONFIG_BOOT_PRINTK_DELAY is not set
1872# CONFIG_RCU_TORTURE_TEST is not set
1873# CONFIG_BACKTRACE_SELF_TEST is not set
1874# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1875# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1876# CONFIG_FAULT_INJECTION is not set
1877# CONFIG_LATENCYTOP is not set
1878CONFIG_SYSCTL_SYSCALL_CHECK=y
1879# CONFIG_PAGE_POISONING is not set
1880CONFIG_HAVE_FUNCTION_TRACER=y
1881CONFIG_TRACING_SUPPORT=y
1882CONFIG_FTRACE=y
1883# CONFIG_FUNCTION_TRACER is not set
1884# CONFIG_IRQSOFF_TRACER is not set
1885# CONFIG_SCHED_TRACER is not set
1886# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1887# CONFIG_BOOT_TRACER is not set
1888CONFIG_BRANCH_PROFILE_NONE=y
1889# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1890# CONFIG_PROFILE_ALL_BRANCHES is not set
1891# CONFIG_STACK_TRACER is not set
1892# CONFIG_KMEMTRACE is not set
1893# CONFIG_WORKQUEUE_TRACER is not set
1894# CONFIG_BLK_DEV_IO_TRACE is not set
1895# CONFIG_SAMPLES is not set
1896CONFIG_HAVE_ARCH_KGDB=y
1897# CONFIG_KGDB is not set
1898CONFIG_ARM_UNWIND=y
1899# CONFIG_DEBUG_USER is not set
1900CONFIG_DEBUG_ERRORS=y
1901# CONFIG_DEBUG_STACK_USAGE is not set
1902# CONFIG_DEBUG_LL is not set
1903# CONFIG_OC_ETM is not set
1904
1905#
1906# Security options
1907#
1908# CONFIG_KEYS is not set
1909# CONFIG_SECURITY is not set
1910# CONFIG_SECURITYFS is not set
1911# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1912# CONFIG_DEFAULT_SECURITY_SMACK is not set
1913# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1914CONFIG_DEFAULT_SECURITY_DAC=y
1915CONFIG_DEFAULT_SECURITY=""
1916CONFIG_CRYPTO=y
1917
1918#
1919# Crypto core or helper
1920#
1921CONFIG_CRYPTO_ALGAPI=m
1922CONFIG_CRYPTO_ALGAPI2=m
1923CONFIG_CRYPTO_AEAD2=m
1924CONFIG_CRYPTO_BLKCIPHER=m
1925CONFIG_CRYPTO_BLKCIPHER2=m
1926CONFIG_CRYPTO_HASH=m
1927CONFIG_CRYPTO_HASH2=m
1928CONFIG_CRYPTO_RNG2=m
1929CONFIG_CRYPTO_PCOMP=m
1930CONFIG_CRYPTO_MANAGER=m
1931CONFIG_CRYPTO_MANAGER2=m
1932# CONFIG_CRYPTO_GF128MUL is not set
1933# CONFIG_CRYPTO_NULL is not set
1934CONFIG_CRYPTO_WORKQUEUE=m
1935# CONFIG_CRYPTO_CRYPTD is not set
1936# CONFIG_CRYPTO_AUTHENC is not set
1937# CONFIG_CRYPTO_TEST is not set
1938
1939#
1940# Authenticated Encryption with Associated Data
1941#
1942# CONFIG_CRYPTO_CCM is not set
1943# CONFIG_CRYPTO_GCM is not set
1944# CONFIG_CRYPTO_SEQIV is not set
1945
1946#
1947# Block modes
1948#
1949# CONFIG_CRYPTO_CBC is not set
1950# CONFIG_CRYPTO_CTR is not set
1951# CONFIG_CRYPTO_CTS is not set
1952CONFIG_CRYPTO_ECB=m
1953# CONFIG_CRYPTO_LRW is not set
1954# CONFIG_CRYPTO_PCBC is not set
1955# CONFIG_CRYPTO_XTS is not set
1956
1957#
1958# Hash modes
1959#
1960# CONFIG_CRYPTO_HMAC is not set
1961# CONFIG_CRYPTO_XCBC is not set
1962# CONFIG_CRYPTO_VMAC is not set
1963
1964#
1965# Digest
1966#
1967# CONFIG_CRYPTO_CRC32C is not set
1968# CONFIG_CRYPTO_GHASH is not set
1969# CONFIG_CRYPTO_MD4 is not set
1970# CONFIG_CRYPTO_MD5 is not set
1971CONFIG_CRYPTO_MICHAEL_MIC=m
1972# CONFIG_CRYPTO_RMD128 is not set
1973# CONFIG_CRYPTO_RMD160 is not set
1974# CONFIG_CRYPTO_RMD256 is not set
1975# CONFIG_CRYPTO_RMD320 is not set
1976# CONFIG_CRYPTO_SHA1 is not set
1977# CONFIG_CRYPTO_SHA256 is not set
1978# CONFIG_CRYPTO_SHA512 is not set
1979# CONFIG_CRYPTO_TGR192 is not set
1980# CONFIG_CRYPTO_WP512 is not set
1981
1982#
1983# Ciphers
1984#
1985CONFIG_CRYPTO_AES=m
1986# CONFIG_CRYPTO_ANUBIS is not set
1987CONFIG_CRYPTO_ARC4=m
1988# CONFIG_CRYPTO_BLOWFISH is not set
1989# CONFIG_CRYPTO_CAMELLIA is not set
1990# CONFIG_CRYPTO_CAST5 is not set
1991# CONFIG_CRYPTO_CAST6 is not set
1992# CONFIG_CRYPTO_DES is not set
1993# CONFIG_CRYPTO_FCRYPT is not set
1994# CONFIG_CRYPTO_KHAZAD is not set
1995# CONFIG_CRYPTO_SALSA20 is not set
1996# CONFIG_CRYPTO_SEED is not set
1997# CONFIG_CRYPTO_SERPENT is not set
1998# CONFIG_CRYPTO_TEA is not set
1999# CONFIG_CRYPTO_TWOFISH is not set
2000
2001#
2002# Compression
2003#
2004# CONFIG_CRYPTO_DEFLATE is not set
2005# CONFIG_CRYPTO_ZLIB is not set
2006# CONFIG_CRYPTO_LZO is not set
2007
2008#
2009# Random Number Generation
2010#
2011# CONFIG_CRYPTO_ANSI_CPRNG is not set
2012CONFIG_CRYPTO_HW=y
2013# CONFIG_BINARY_PRINTF is not set
2014
2015#
2016# Library routines
2017#
2018CONFIG_BITREVERSE=y
2019CONFIG_GENERIC_FIND_LAST_BIT=y
2020CONFIG_CRC_CCITT=m
2021CONFIG_CRC16=m
2022CONFIG_CRC_T10DIF=m
2023CONFIG_CRC_ITU_T=m
2024CONFIG_CRC32=y
2025# CONFIG_CRC7 is not set
2026# CONFIG_LIBCRC32C is not set
2027CONFIG_ZLIB_INFLATE=y
2028CONFIG_ZLIB_DEFLATE=y
2029CONFIG_HAS_IOMEM=y
2030CONFIG_HAS_IOPORT=y
2031CONFIG_HAS_DMA=y
2032CONFIG_NLATTR=y
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e7ccf7e697ce..dd00f747e2ad 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -54,5 +54,6 @@ endif
54 54
55head-y := head$(MMUEXT).o 55head-y := head$(MMUEXT).o
56obj-$(CONFIG_DEBUG_LL) += debug.o 56obj-$(CONFIG_DEBUG_LL) += debug.o
57obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
57 58
58extra-y := $(head-y) init_task.o vmlinux.lds 59extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
new file mode 100644
index 000000000000..85aa2b292692
--- /dev/null
+++ b/arch/arm/kernel/early_printk.c
@@ -0,0 +1,57 @@
1/*
2 * linux/arch/arm/kernel/early_printk.c
3 *
4 * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/console.h>
13#include <linux/init.h>
14
15extern void printch(int);
16
17static void early_write(const char *s, unsigned n)
18{
19 while (n-- > 0) {
20 if (*s == '\n')
21 printch('\r');
22 printch(*s);
23 s++;
24 }
25}
26
27static void early_console_write(struct console *con, const char *s, unsigned n)
28{
29 early_write(s, n);
30}
31
32static struct console early_console = {
33 .name = "earlycon",
34 .write = early_console_write,
35 .flags = CON_PRINTBUFFER | CON_BOOT,
36 .index = -1,
37};
38
39asmlinkage void early_printk(const char *fmt, ...)
40{
41 char buf[512];
42 int n;
43 va_list ap;
44
45 va_start(ap, fmt);
46 n = vscnprintf(buf, sizeof(buf), fmt, ap);
47 early_write(buf, n);
48 va_end(ap);
49}
50
51static int __init setup_early_printk(char *buf)
52{
53 register_console(&early_console);
54 return 0;
55}
56
57early_param("earlyprintk", setup_early_printk);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a73a34dccf2a..ea02a7b1c244 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -160,6 +160,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
160 160
161 /* Make sure our local interrupt controller has this enabled */ 161 /* Make sure our local interrupt controller has this enabled */
162 local_irq_save(flags); 162 local_irq_save(flags);
163 irq_to_desc(clk->irq)->status |= IRQ_NOPROBE;
163 get_irq_chip(clk->irq)->unmask(clk->irq); 164 get_irq_chip(clk->irq)->unmask(clk->irq);
164 local_irq_restore(flags); 165 local_irq_restore(flags);
165 166
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index e522b20bcbc2..f70d52be48a2 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -30,6 +30,8 @@
30 30
31#define __virt_to_bus(x) ((x) - PAGE_OFFSET) 31#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
32#define __bus_to_virt(x) ((x) + PAGE_OFFSET) 32#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
33#define __pfn_to_bus(x) (__pfn_to_phys(x) - PHYS_OFFSET)
34#define __bus_to_pfn(x) __phys_to_pfn((x) + PHYS_OFFSET)
33 35
34#endif 36#endif
35 37
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index b97f529e58e8..41febc796b1c 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -201,6 +201,11 @@ void __init footbridge_map_io(void)
201 201
202#ifdef CONFIG_FOOTBRIDGE_ADDIN 202#ifdef CONFIG_FOOTBRIDGE_ADDIN
203 203
204static inline unsigned long fb_bus_sdram_offset(void)
205{
206 return *CSR_PCISDRAMBASE & 0xfffffff0;
207}
208
204/* 209/*
205 * These two functions convert virtual addresses to PCI addresses and PCI 210 * These two functions convert virtual addresses to PCI addresses and PCI
206 * addresses to virtual addresses. Note that it is only legal to use these 211 * addresses to virtual addresses. Note that it is only legal to use these
@@ -210,14 +215,13 @@ unsigned long __virt_to_bus(unsigned long res)
210{ 215{
211 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory); 216 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
212 217
213 return (res - PAGE_OFFSET) + (*CSR_PCISDRAMBASE & 0xfffffff0); 218 return res + (fb_bus_sdram_offset() - PAGE_OFFSET);
214} 219}
215EXPORT_SYMBOL(__virt_to_bus); 220EXPORT_SYMBOL(__virt_to_bus);
216 221
217unsigned long __bus_to_virt(unsigned long res) 222unsigned long __bus_to_virt(unsigned long res)
218{ 223{
219 res -= (*CSR_PCISDRAMBASE & 0xfffffff0); 224 res = res - (fb_bus_sdram_offset() - PAGE_OFFSET);
220 res += PAGE_OFFSET;
221 225
222 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory); 226 WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
223 227
@@ -225,4 +229,16 @@ unsigned long __bus_to_virt(unsigned long res)
225} 229}
226EXPORT_SYMBOL(__bus_to_virt); 230EXPORT_SYMBOL(__bus_to_virt);
227 231
232unsigned long __pfn_to_bus(unsigned long pfn)
233{
234 return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
235}
236EXPORT_SYMBOL(__pfn_to_bus);
237
238unsigned long __bus_to_pfn(unsigned long bus)
239{
240 return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
241}
242EXPORT_SYMBOL(__bus_to_pfn);
243
228#endif 244#endif
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index cb16e59d87b6..8d64f4574087 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -29,6 +29,8 @@
29#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
30extern unsigned long __virt_to_bus(unsigned long); 30extern unsigned long __virt_to_bus(unsigned long);
31extern unsigned long __bus_to_virt(unsigned long); 31extern unsigned long __bus_to_virt(unsigned long);
32extern unsigned long __pfn_to_bus(unsigned long);
33extern unsigned long __bus_to_pfn(unsigned long);
32#endif 34#endif
33#define __virt_to_bus __virt_to_bus 35#define __virt_to_bus __virt_to_bus
34#define __bus_to_virt __bus_to_virt 36#define __bus_to_virt __bus_to_virt
@@ -36,14 +38,15 @@ extern unsigned long __bus_to_virt(unsigned long);
36#elif defined(CONFIG_FOOTBRIDGE_HOST) 38#elif defined(CONFIG_FOOTBRIDGE_HOST)
37 39
38/* 40/*
39 * The footbridge is programmed to expose the system RAM at the corresponding 41 * The footbridge is programmed to expose the system RAM at 0xe0000000.
40 * address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000. 42 * The requirement is that the RAM isn't placed at bus address 0, which
41 * If 0x80000000, then its exposed at 0xa0000000 on the bus. etc.
42 * The only requirement is that the RAM isn't placed at bus address 0 which
43 * would clash with VGA cards. 43 * would clash with VGA cards.
44 */ 44 */
45#define __virt_to_bus(x) ((x) - 0xe0000000) 45#define BUS_OFFSET 0xe0000000
46#define __bus_to_virt(x) ((x) + 0xe0000000) 46#define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET))
47#define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
48#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
49#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
47 50
48#else 51#else
49 52
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 4891828454f5..991f24d2c115 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -28,6 +28,7 @@
28#define BUS_OFFSET UL(0x80000000) 28#define BUS_OFFSET UL(0x80000000)
29#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET) 29#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
30#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET) 30#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET)
31#define __pfn_to_bus(x) (((x) << PAGE_SHIFT) + BUS_OFFSET) 31#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
32#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
32 33
33#endif 34#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h
index aee7eb8a71b2..98e3471be15b 100644
--- a/arch/arm/mach-ixp2000/include/mach/memory.h
+++ b/arch/arm/mach-ixp2000/include/mach/memory.h
@@ -17,11 +17,15 @@
17 17
18#include <mach/ixp2000-regs.h> 18#include <mach/ixp2000-regs.h>
19 19
20#define __virt_to_bus(v) \ 20#define IXP2000_PCI_SDRAM_OFFSET (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)
21 (((__virt_to_phys(v) - 0x0) + (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)))
22 21
23#define __bus_to_virt(b) \ 22#define __phys_to_bus(x) ((x) + (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
24 __phys_to_virt((((b - (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)) + 0x0))) 23#define __bus_to_phys(x) ((x) - (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
24
25#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
26#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
27#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
28#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
25 29
26#endif 30#endif
27 31
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index fdd138706c70..94a3a86cfeb8 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -19,16 +19,15 @@
19 */ 19 */
20#define PHYS_OFFSET (0x00000000) 20#define PHYS_OFFSET (0x00000000)
21 21
22#define __virt_to_bus(v) \ 22#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0))
23 ({ unsigned int ret; \ 23
24 ret = ((__virt_to_phys(v) - 0x00000000) + \ 24#define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
25 (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)); \ 25#define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
26 ret; }) 26
27 27#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
28#define __bus_to_virt(b) \ 28#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
29 ({ unsigned int data; \ 29#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
30 data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \ 30#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
31 __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
32 31
33#define arch_is_coherent() 1 32#define arch_is_coherent() 1
34 33
diff --git a/arch/arm/mach-lh7a40x/clocks.c b/arch/arm/mach-lh7a40x/clocks.c
index 6182f5410b4d..fcaf876f19b6 100644
--- a/arch/arm/mach-lh7a40x/clocks.c
+++ b/arch/arm/mach-lh7a40x/clocks.c
@@ -7,8 +7,6 @@
7 * version 2 as published by the Free Software Foundation. 7 * version 2 as published by the Free Software Foundation.
8 * 8 *
9 */ 9 */
10
11#include <linux/cpufreq.h>
12#include <mach/hardware.h> 10#include <mach/hardware.h>
13#include <mach/clocks.h> 11#include <mach/clocks.h>
14#include <linux/err.h> 12#include <linux/err.h>
@@ -31,12 +29,6 @@ struct clk {
31#define HCLKDIV(c) (((c) >> 0) & 0x02) 29#define HCLKDIV(c) (((c) >> 0) & 0x02)
32#define PCLKDIV(c) (((c) >> 16) & 0x03) 30#define PCLKDIV(c) (((c) >> 16) & 0x03)
33 31
34unsigned int cpufreq_get (unsigned int cpu) /* in kHz */
35{
36 return fclkfreq_get ()/1000;
37}
38EXPORT_SYMBOL(cpufreq_get);
39
40unsigned int fclkfreq_get (void) 32unsigned int fclkfreq_get (void)
41{ 33{
42 unsigned int clkset = CSC_CLKSET; 34 unsigned int clkset = CSC_CLKSET;
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index d89c6adbe8bc..e6d8e10ae5d1 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -63,6 +63,15 @@ config ARCH_VIPER
63 select HAVE_PWM 63 select HAVE_PWM
64 select PXA_HAVE_BOARD_IRQS 64 select PXA_HAVE_BOARD_IRQS
65 select PXA_HAVE_ISA_IRQS 65 select PXA_HAVE_ISA_IRQS
66 select ARCOM_PCMCIA
67
68config MACH_ARCOM_ZEUS
69 bool "Arcom/Eurotech ZEUS SBC"
70 select PXA27x
71 select ISA
72 select PXA_HAVE_BOARD_IRQS
73 select PXA_HAVE_ISA_IRQS
74 select ARCOM_PCMCIA
66 75
67config MACH_BALLOON3 76config MACH_BALLOON3
68 bool "Balloon 3 board" 77 bool "Balloon 3 board"
@@ -179,6 +188,11 @@ config MACH_TRIZEPS_ANY
179 188
180endchoice 189endchoice
181 190
191config ARCOM_PCMCIA
192 bool
193 help
194 Generic option for Arcom Viper/Zeus PCMCIA
195
182config TRIZEPS_PCMCIA 196config TRIZEPS_PCMCIA
183 bool 197 bool
184 help 198 help
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index b5d29e60a341..f64afda7e6f6 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_MACH_SAAR) += saar.o
38# 3rd Party Dev Platforms 38# 3rd Party Dev Platforms
39obj-$(CONFIG_ARCH_PXA_IDP) += idp.o 39obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
40obj-$(CONFIG_ARCH_VIPER) += viper.o 40obj-$(CONFIG_ARCH_VIPER) += viper.o
41obj-$(CONFIG_MACH_ARCOM_ZEUS) += zeus.o
41obj-$(CONFIG_MACH_BALLOON3) += balloon3.o 42obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
42obj-$(CONFIG_MACH_CSB726) += csb726.o 43obj-$(CONFIG_MACH_CSB726) += csb726.o
43obj-$(CONFIG_CSB726_CSB701) += csb701.o 44obj-$(CONFIG_CSB726_CSB701) += csb701.o
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 1c0de808b54d..c8a01bc85fde 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -497,16 +497,15 @@ static int em_x270_usb_hub_init(void)
497 goto err_free_vbus_gpio; 497 goto err_free_vbus_gpio;
498 498
499 /* USB Hub power-on and reset */ 499 /* USB Hub power-on and reset */
500 gpio_direction_output(usb_hub_reset, 0); 500 gpio_direction_output(usb_hub_reset, 1);
501 gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
501 regulator_enable(em_x270_usb_ldo); 502 regulator_enable(em_x270_usb_ldo);
502 gpio_set_value(usb_hub_reset, 1);
503 gpio_set_value(usb_hub_reset, 0); 503 gpio_set_value(usb_hub_reset, 0);
504 gpio_set_value(usb_hub_reset, 1);
504 regulator_disable(em_x270_usb_ldo); 505 regulator_disable(em_x270_usb_ldo);
505 regulator_enable(em_x270_usb_ldo); 506 regulator_enable(em_x270_usb_ldo);
506 gpio_set_value(usb_hub_reset, 1); 507 gpio_set_value(usb_hub_reset, 0);
507 508 gpio_set_value(GPIO9_USB_VBUS_EN, 1);
508 /* enable VBUS */
509 gpio_direction_output(GPIO9_USB_VBUS_EN, 1);
510 509
511 return 0; 510 return 0;
512 511
diff --git a/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
new file mode 100644
index 000000000000..d428be4db44c
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
@@ -0,0 +1,11 @@
1#ifndef __ARCOM_PCMCIA_H
2#define __ARCOM_PCMCIA_H
3
4struct arcom_pcmcia_pdata {
5 int cd_gpio;
6 int rdy_gpio;
7 int pwr_gpio;
8 void (*reset)(int state);
9};
10
11#endif
diff --git a/arch/arm/mach-pxa/include/mach/viper.h b/arch/arm/mach-pxa/include/mach/viper.h
index 10988c270ca3..5f5fbf1f6489 100644
--- a/arch/arm/mach-pxa/include/mach/viper.h
+++ b/arch/arm/mach-pxa/include/mach/viper.h
@@ -85,8 +85,6 @@
85/* Interrupt and Configuration Register (VIPER_ICR) */ 85/* Interrupt and Configuration Register (VIPER_ICR) */
86/* This is a write only register. Only CF_RST is used under Linux */ 86/* This is a write only register. Only CF_RST is used under Linux */
87 87
88extern void viper_cf_rst(int state);
89
90#define VIPER_ICR_RETRIG (1 << 0) 88#define VIPER_ICR_RETRIG (1 << 0)
91#define VIPER_ICR_AUTO_CLR (1 << 1) 89#define VIPER_ICR_AUTO_CLR (1 << 1)
92#define VIPER_ICR_R_DIS (1 << 2) 90#define VIPER_ICR_R_DIS (1 << 2)
diff --git a/arch/arm/mach-pxa/include/mach/zeus.h b/arch/arm/mach-pxa/include/mach/zeus.h
new file mode 100644
index 000000000000..c387046d2f28
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/zeus.h
@@ -0,0 +1,82 @@
1/*
2 * arch/arm/mach-pxa/include/mach/zeus.h
3 *
4 * Author: David Vrabel
5 * Created: Sept 28, 2005
6 * Copyright: Arcom Control Systems Ltd.
7 *
8 * Maintained by: Marc Zyngier <maz@misterjones.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef _MACH_ZEUS_H
16#define _MACH_ZEUS_H
17
18/* Physical addresses */
19#define ZEUS_FLASH_PHYS PXA_CS0_PHYS
20#define ZEUS_ETH0_PHYS PXA_CS1_PHYS
21#define ZEUS_ETH1_PHYS PXA_CS2_PHYS
22#define ZEUS_CPLD_PHYS (PXA_CS4_PHYS+0x2000000)
23#define ZEUS_SRAM_PHYS PXA_CS5_PHYS
24#define ZEUS_PC104IO_PHYS (0x30000000)
25
26#define ZEUS_CPLD_VERSION_PHYS (ZEUS_CPLD_PHYS + 0x00000000)
27#define ZEUS_CPLD_ISA_IRQ_PHYS (ZEUS_CPLD_PHYS + 0x00800000)
28#define ZEUS_CPLD_CONTROL_PHYS (ZEUS_CPLD_PHYS + 0x01000000)
29#define ZEUS_CPLD_EXTWDOG_PHYS (ZEUS_CPLD_PHYS + 0x01800000)
30
31/* GPIOs */
32#define ZEUS_AC97_GPIO 0
33#define ZEUS_WAKEUP_GPIO 1
34#define ZEUS_UARTA_GPIO 9
35#define ZEUS_UARTB_GPIO 10
36#define ZEUS_UARTC_GPIO 12
37#define ZEUS_UARTD_GPIO 11
38#define ZEUS_ETH0_GPIO 14
39#define ZEUS_ISA_GPIO 17
40#define ZEUS_BKLEN_GPIO 19
41#define ZEUS_USB2_PWREN_GPIO 22
42#define ZEUS_PTT_GPIO 27
43#define ZEUS_CF_CD_GPIO 35
44#define ZEUS_MMC_WP_GPIO 52
45#define ZEUS_MMC_CD_GPIO 53
46#define ZEUS_EXTGPIO_GPIO 91
47#define ZEUS_CF_PWEN_GPIO 97
48#define ZEUS_CF_RDY_GPIO 99
49#define ZEUS_LCD_EN_GPIO 101
50#define ZEUS_ETH1_GPIO 113
51#define ZEUS_CAN_GPIO 116
52
53#define ZEUS_EXT0_GPIO_BASE 128
54#define ZEUS_EXT1_GPIO_BASE 160
55#define ZEUS_USER_GPIO_BASE 192
56
57#define ZEUS_EXT0_GPIO(x) (ZEUS_EXT0_GPIO_BASE + (x))
58#define ZEUS_EXT1_GPIO(x) (ZEUS_EXT1_GPIO_BASE + (x))
59#define ZEUS_USER_GPIO(x) (ZEUS_USER_GPIO_BASE + (x))
60
61/*
62 * CPLD registers:
63 * Only 4 registers, but spreaded over a 32MB address space.
64 * Be gentle, and remap that over 32kB...
65 */
66
67#define ZEUS_CPLD (0xf0000000)
68#define ZEUS_CPLD_VERSION (ZEUS_CPLD + 0x0000)
69#define ZEUS_CPLD_ISA_IRQ (ZEUS_CPLD + 0x1000)
70#define ZEUS_CPLD_CONTROL (ZEUS_CPLD + 0x2000)
71#define ZEUS_CPLD_EXTWDOG (ZEUS_CPLD + 0x3000)
72
73/* CPLD register bits */
74#define ZEUS_CPLD_CONTROL_CF_RST 0x01
75
76#define ZEUS_PC104IO (0xf1000000)
77
78#define ZEUS_SRAM_SIZE (256 * 1024)
79
80#endif
81
82
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index cf0d71b7797e..5352b4e5a7dd 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -47,6 +47,7 @@
47#include <mach/pxafb.h> 47#include <mach/pxafb.h>
48#include <plat/i2c.h> 48#include <plat/i2c.h>
49#include <mach/regs-uart.h> 49#include <mach/regs-uart.h>
50#include <mach/arcom-pcmcia.h>
50#include <mach/viper.h> 51#include <mach/viper.h>
51 52
52#include <asm/setup.h> 53#include <asm/setup.h>
@@ -76,14 +77,28 @@ static void viper_icr_clear_bit(unsigned int bit)
76} 77}
77 78
78/* This function is used from the pcmcia module to reset the CF */ 79/* This function is used from the pcmcia module to reset the CF */
79void viper_cf_rst(int state) 80static void viper_cf_reset(int state)
80{ 81{
81 if (state) 82 if (state)
82 viper_icr_set_bit(VIPER_ICR_CF_RST); 83 viper_icr_set_bit(VIPER_ICR_CF_RST);
83 else 84 else
84 viper_icr_clear_bit(VIPER_ICR_CF_RST); 85 viper_icr_clear_bit(VIPER_ICR_CF_RST);
85} 86}
86EXPORT_SYMBOL(viper_cf_rst); 87
88static struct arcom_pcmcia_pdata viper_pcmcia_info = {
89 .cd_gpio = VIPER_CF_CD_GPIO,
90 .rdy_gpio = VIPER_CF_RDY_GPIO,
91 .pwr_gpio = VIPER_CF_POWER_GPIO,
92 .reset = viper_cf_reset,
93};
94
95static struct platform_device viper_pcmcia_device = {
96 .name = "viper-pcmcia",
97 .id = -1,
98 .dev = {
99 .platform_data = &viper_pcmcia_info,
100 },
101};
87 102
88/* 103/*
89 * The CPLD version register was not present on VIPER boards prior to 104 * The CPLD version register was not present on VIPER boards prior to
@@ -685,6 +700,7 @@ static struct platform_device *viper_devs[] __initdata = {
685 &viper_mtd_devices[0], 700 &viper_mtd_devices[0],
686 &viper_mtd_devices[1], 701 &viper_mtd_devices[1],
687 &viper_backlight_device, 702 &viper_backlight_device,
703 &viper_pcmcia_device,
688}; 704};
689 705
690static mfp_cfg_t viper_pin_config[] __initdata = { 706static mfp_cfg_t viper_pin_config[] __initdata = {
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
new file mode 100644
index 000000000000..5b986a8bd9e6
--- /dev/null
+++ b/arch/arm/mach-pxa/zeus.c
@@ -0,0 +1,820 @@
1/*
2 * Support for the Arcom ZEUS.
3 *
4 * Copyright (C) 2006 Arcom Control Systems Ltd.
5 *
6 * Loosely based on Arcom's 2.6.16.28.
7 * Maintained by Marc Zyngier <maz@misterjones.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/cpufreq.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/pm.h>
18#include <linux/gpio.h>
19#include <linux/serial_8250.h>
20#include <linux/dm9000.h>
21#include <linux/mmc/host.h>
22#include <linux/spi/spi.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/partitions.h>
25#include <linux/mtd/physmap.h>
26#include <linux/i2c.h>
27#include <linux/i2c/pca953x.h>
28
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include <plat/i2c.h>
34
35#include <mach/pxa2xx-regs.h>
36#include <mach/regs-uart.h>
37#include <mach/ohci.h>
38#include <mach/mmc.h>
39#include <mach/pxa27x-udc.h>
40#include <mach/udc.h>
41#include <mach/pxafb.h>
42#include <mach/pxa2xx_spi.h>
43#include <mach/mfp-pxa27x.h>
44#include <mach/pm.h>
45#include <mach/audio.h>
46#include <mach/arcom-pcmcia.h>
47#include <mach/zeus.h>
48
49#include "generic.h"
50
51/*
52 * Interrupt handling
53 */
54
55static unsigned long zeus_irq_enabled_mask;
56static const int zeus_isa_irqs[] = { 3, 4, 5, 6, 7, 10, 11, 12, };
57static const int zeus_isa_irq_map[] = {
58 0, /* ISA irq #0, invalid */
59 0, /* ISA irq #1, invalid */
60 0, /* ISA irq #2, invalid */
61 1 << 0, /* ISA irq #3 */
62 1 << 1, /* ISA irq #4 */
63 1 << 2, /* ISA irq #5 */
64 1 << 3, /* ISA irq #6 */
65 1 << 4, /* ISA irq #7 */
66 0, /* ISA irq #8, invalid */
67 0, /* ISA irq #9, invalid */
68 1 << 5, /* ISA irq #10 */
69 1 << 6, /* ISA irq #11 */
70 1 << 7, /* ISA irq #12 */
71};
72
73static inline int zeus_irq_to_bitmask(unsigned int irq)
74{
75 return zeus_isa_irq_map[irq - PXA_ISA_IRQ(0)];
76}
77
78static inline int zeus_bit_to_irq(int bit)
79{
80 return zeus_isa_irqs[bit] + PXA_ISA_IRQ(0);
81}
82
83static void zeus_ack_irq(unsigned int irq)
84{
85 __raw_writew(zeus_irq_to_bitmask(irq), ZEUS_CPLD_ISA_IRQ);
86}
87
88static void zeus_mask_irq(unsigned int irq)
89{
90 zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(irq));
91}
92
93static void zeus_unmask_irq(unsigned int irq)
94{
95 zeus_irq_enabled_mask |= zeus_irq_to_bitmask(irq);
96}
97
98static inline unsigned long zeus_irq_pending(void)
99{
100 return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
101}
102
103static void zeus_irq_handler(unsigned int irq, struct irq_desc *desc)
104{
105 unsigned long pending;
106
107 pending = zeus_irq_pending();
108 do {
109 /* we're in a chained irq handler,
110 * so ack the interrupt by hand */
111 desc->chip->ack(gpio_to_irq(ZEUS_ISA_GPIO));
112
113 if (likely(pending)) {
114 irq = zeus_bit_to_irq(__ffs(pending));
115 generic_handle_irq(irq);
116 }
117 pending = zeus_irq_pending();
118 } while (pending);
119}
120
121static struct irq_chip zeus_irq_chip = {
122 .name = "ISA",
123 .ack = zeus_ack_irq,
124 .mask = zeus_mask_irq,
125 .unmask = zeus_unmask_irq,
126};
127
128static void __init zeus_init_irq(void)
129{
130 int level;
131 int isa_irq;
132
133 pxa27x_init_irq();
134
135 /* Peripheral IRQs. It would be nice to move those inside driver
136 configuration, but it is not supported at the moment. */
137 set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING);
138 set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING);
139 set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING);
140 set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING);
141 set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING);
142
143 /* Setup ISA IRQs */
144 for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) {
145 isa_irq = zeus_bit_to_irq(level);
146 set_irq_chip(isa_irq, &zeus_irq_chip);
147 set_irq_handler(isa_irq, handle_edge_irq);
148 set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
149 }
150
151 set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
152 set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler);
153}
154
155
156/*
157 * Platform devices
158 */
159
160/* Flash */
161static struct resource zeus_mtd_resources[] = {
162 [0] = { /* NOR Flash (up to 64MB) */
163 .start = ZEUS_FLASH_PHYS,
164 .end = ZEUS_FLASH_PHYS + SZ_64M - 1,
165 .flags = IORESOURCE_MEM,
166 },
167 [1] = { /* SRAM */
168 .start = ZEUS_SRAM_PHYS,
169 .end = ZEUS_SRAM_PHYS + SZ_512K - 1,
170 .flags = IORESOURCE_MEM,
171 },
172};
173
174static struct physmap_flash_data zeus_flash_data[] = {
175 [0] = {
176 .width = 2,
177 .parts = NULL,
178 .nr_parts = 0,
179 },
180};
181
182static struct platform_device zeus_mtd_devices[] = {
183 [0] = {
184 .name = "physmap-flash",
185 .id = 0,
186 .dev = {
187 .platform_data = &zeus_flash_data[0],
188 },
189 .resource = &zeus_mtd_resources[0],
190 .num_resources = 1,
191 },
192};
193
194/* Serial */
195static struct resource zeus_serial_resources[] = {
196 {
197 .start = 0x10000000,
198 .end = 0x1000000f,
199 .flags = IORESOURCE_MEM,
200 },
201 {
202 .start = 0x10800000,
203 .end = 0x1080000f,
204 .flags = IORESOURCE_MEM,
205 },
206 {
207 .start = 0x11000000,
208 .end = 0x1100000f,
209 .flags = IORESOURCE_MEM,
210 },
211 {
212 .start = 0x40100000,
213 .end = 0x4010001f,
214 .flags = IORESOURCE_MEM,
215 },
216 {
217 .start = 0x40200000,
218 .end = 0x4020001f,
219 .flags = IORESOURCE_MEM,
220 },
221 {
222 .start = 0x40700000,
223 .end = 0x4070001f,
224 .flags = IORESOURCE_MEM,
225 },
226};
227
228static struct plat_serial8250_port serial_platform_data[] = {
229 /* External UARTs */
230 /* FIXME: Shared IRQs on COM1-COM4 will not work properly on v1i1 hardware. */
231 { /* COM1 */
232 .mapbase = 0x10000000,
233 .irq = gpio_to_irq(ZEUS_UARTA_GPIO),
234 .irqflags = IRQF_TRIGGER_RISING,
235 .uartclk = 14745600,
236 .regshift = 1,
237 .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
238 .iotype = UPIO_MEM,
239 },
240 { /* COM2 */
241 .mapbase = 0x10800000,
242 .irq = gpio_to_irq(ZEUS_UARTB_GPIO),
243 .irqflags = IRQF_TRIGGER_RISING,
244 .uartclk = 14745600,
245 .regshift = 1,
246 .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
247 .iotype = UPIO_MEM,
248 },
249 { /* COM3 */
250 .mapbase = 0x11000000,
251 .irq = gpio_to_irq(ZEUS_UARTC_GPIO),
252 .irqflags = IRQF_TRIGGER_RISING,
253 .uartclk = 14745600,
254 .regshift = 1,
255 .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
256 .iotype = UPIO_MEM,
257 },
258 { /* COM4 */
259 .mapbase = 0x11800000,
260 .irq = gpio_to_irq(ZEUS_UARTD_GPIO),
261 .irqflags = IRQF_TRIGGER_RISING,
262 .uartclk = 14745600,
263 .regshift = 1,
264 .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
265 .iotype = UPIO_MEM,
266 },
267 /* Internal UARTs */
268 { /* FFUART */
269 .membase = (void *)&FFUART,
270 .mapbase = __PREG(FFUART),
271 .irq = IRQ_FFUART,
272 .uartclk = 921600 * 16,
273 .regshift = 2,
274 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
275 .iotype = UPIO_MEM,
276 },
277 { /* BTUART */
278 .membase = (void *)&BTUART,
279 .mapbase = __PREG(BTUART),
280 .irq = IRQ_BTUART,
281 .uartclk = 921600 * 16,
282 .regshift = 2,
283 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
284 .iotype = UPIO_MEM,
285 },
286 { /* STUART */
287 .membase = (void *)&STUART,
288 .mapbase = __PREG(STUART),
289 .irq = IRQ_STUART,
290 .uartclk = 921600 * 16,
291 .regshift = 2,
292 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
293 .iotype = UPIO_MEM,
294 },
295 { },
296};
297
298static struct platform_device zeus_serial_device = {
299 .name = "serial8250",
300 .id = PLAT8250_DEV_PLATFORM,
301 .dev = {
302 .platform_data = serial_platform_data,
303 },
304 .num_resources = ARRAY_SIZE(zeus_serial_resources),
305 .resource = zeus_serial_resources,
306};
307
308/* Ethernet */
309static struct resource zeus_dm9k0_resource[] = {
310 [0] = {
311 .start = ZEUS_ETH0_PHYS,
312 .end = ZEUS_ETH0_PHYS + 1,
313 .flags = IORESOURCE_MEM
314 },
315 [1] = {
316 .start = ZEUS_ETH0_PHYS + 2,
317 .end = ZEUS_ETH0_PHYS + 3,
318 .flags = IORESOURCE_MEM
319 },
320 [2] = {
321 .start = gpio_to_irq(ZEUS_ETH0_GPIO),
322 .end = gpio_to_irq(ZEUS_ETH0_GPIO),
323 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
324 },
325};
326
327static struct resource zeus_dm9k1_resource[] = {
328 [0] = {
329 .start = ZEUS_ETH1_PHYS,
330 .end = ZEUS_ETH1_PHYS + 1,
331 .flags = IORESOURCE_MEM
332 },
333 [1] = {
334 .start = ZEUS_ETH1_PHYS + 2,
335 .end = ZEUS_ETH1_PHYS + 3,
336 .flags = IORESOURCE_MEM,
337 },
338 [2] = {
339 .start = gpio_to_irq(ZEUS_ETH1_GPIO),
340 .end = gpio_to_irq(ZEUS_ETH1_GPIO),
341 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
342 },
343};
344
345static struct dm9000_plat_data zeus_dm9k_platdata = {
346 .flags = DM9000_PLATF_16BITONLY,
347};
348
349static struct platform_device zeus_dm9k0_device = {
350 .name = "dm9000",
351 .id = 0,
352 .num_resources = ARRAY_SIZE(zeus_dm9k0_resource),
353 .resource = zeus_dm9k0_resource,
354 .dev = {
355 .platform_data = &zeus_dm9k_platdata,
356 }
357};
358
359static struct platform_device zeus_dm9k1_device = {
360 .name = "dm9000",
361 .id = 1,
362 .num_resources = ARRAY_SIZE(zeus_dm9k1_resource),
363 .resource = zeus_dm9k1_resource,
364 .dev = {
365 .platform_data = &zeus_dm9k_platdata,
366 }
367};
368
369/* External SRAM */
370static struct resource zeus_sram_resource = {
371 .start = ZEUS_SRAM_PHYS,
372 .end = ZEUS_SRAM_PHYS + ZEUS_SRAM_SIZE * 2 - 1,
373 .flags = IORESOURCE_MEM,
374};
375
376static struct platform_device zeus_sram_device = {
377 .name = "pxa2xx-8bit-sram",
378 .id = 0,
379 .num_resources = 1,
380 .resource = &zeus_sram_resource,
381};
382
383/* SPI interface on SSP3 */
384static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
385 .num_chipselect = 1,
386 .enable_dma = 1,
387};
388
389static struct platform_device pxa2xx_spi_ssp3_device = {
390 .name = "pxa2xx-spi",
391 .id = 3,
392 .dev = {
393 .platform_data = &pxa2xx_spi_ssp3_master_info,
394 },
395};
396
397/* Leds */
398static struct gpio_led zeus_leds[] = {
399 [0] = {
400 .name = "zeus:yellow:1",
401 .default_trigger = "heartbeat",
402 .gpio = ZEUS_EXT0_GPIO(3),
403 .active_low = 1,
404 },
405 [1] = {
406 .name = "zeus:yellow:2",
407 .default_trigger = "default-on",
408 .gpio = ZEUS_EXT0_GPIO(4),
409 .active_low = 1,
410 },
411 [2] = {
412 .name = "zeus:yellow:3",
413 .default_trigger = "default-on",
414 .gpio = ZEUS_EXT0_GPIO(5),
415 .active_low = 1,
416 },
417};
418
419static struct gpio_led_platform_data zeus_leds_info = {
420 .leds = zeus_leds,
421 .num_leds = ARRAY_SIZE(zeus_leds),
422};
423
424static struct platform_device zeus_leds_device = {
425 .name = "leds-gpio",
426 .id = -1,
427 .dev = {
428 .platform_data = &zeus_leds_info,
429 },
430};
431
432static void zeus_cf_reset(int state)
433{
434 u16 cpld_state = __raw_readw(ZEUS_CPLD_CONTROL);
435
436 if (state)
437 cpld_state |= ZEUS_CPLD_CONTROL_CF_RST;
438 else
439 cpld_state &= ~ZEUS_CPLD_CONTROL_CF_RST;
440
441 __raw_writew(cpld_state, ZEUS_CPLD_CONTROL);
442}
443
444static struct arcom_pcmcia_pdata zeus_pcmcia_info = {
445 .cd_gpio = ZEUS_CF_CD_GPIO,
446 .rdy_gpio = ZEUS_CF_RDY_GPIO,
447 .pwr_gpio = ZEUS_CF_PWEN_GPIO,
448 .reset = zeus_cf_reset,
449};
450
451static struct platform_device zeus_pcmcia_device = {
452 .name = "zeus-pcmcia",
453 .id = -1,
454 .dev = {
455 .platform_data = &zeus_pcmcia_info,
456 },
457};
458
459static struct platform_device *zeus_devices[] __initdata = {
460 &zeus_serial_device,
461 &zeus_mtd_devices[0],
462 &zeus_dm9k0_device,
463 &zeus_dm9k1_device,
464 &zeus_sram_device,
465 &pxa2xx_spi_ssp3_device,
466 &zeus_leds_device,
467 &zeus_pcmcia_device,
468};
469
470/* AC'97 */
471static pxa2xx_audio_ops_t zeus_ac97_info = {
472 .reset_gpio = 95,
473};
474
475
476/*
477 * USB host
478 */
479
480static int zeus_ohci_init(struct device *dev)
481{
482 int err;
483
484 /* Switch on port 2. */
485 if ((err = gpio_request(ZEUS_USB2_PWREN_GPIO, "USB2_PWREN"))) {
486 dev_err(dev, "Can't request USB2_PWREN\n");
487 return err;
488 }
489
490 if ((err = gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 1))) {
491 gpio_free(ZEUS_USB2_PWREN_GPIO);
492 dev_err(dev, "Can't enable USB2_PWREN\n");
493 return err;
494 }
495
496 /* Port 2 is shared between host and client interface. */
497 UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
498
499 return 0;
500}
501
502static void zeus_ohci_exit(struct device *dev)
503{
504 /* Power-off port 2 */
505 gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 0);
506 gpio_free(ZEUS_USB2_PWREN_GPIO);
507}
508
509static struct pxaohci_platform_data zeus_ohci_platform_data = {
510 .port_mode = PMM_NPS_MODE,
511 .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
512 .init = zeus_ohci_init,
513 .exit = zeus_ohci_exit,
514};
515
516/*
517 * Flat Panel
518 */
519
520static void zeus_lcd_power(int on, struct fb_var_screeninfo *si)
521{
522 gpio_set_value(ZEUS_LCD_EN_GPIO, on);
523}
524
525static void zeus_backlight_power(int on)
526{
527 gpio_set_value(ZEUS_BKLEN_GPIO, on);
528}
529
530static int zeus_setup_fb_gpios(void)
531{
532 int err;
533
534 if ((err = gpio_request(ZEUS_LCD_EN_GPIO, "LCD_EN")))
535 goto out_err;
536
537 if ((err = gpio_direction_output(ZEUS_LCD_EN_GPIO, 0)))
538 goto out_err_lcd;
539
540 if ((err = gpio_request(ZEUS_BKLEN_GPIO, "BKLEN")))
541 goto out_err_lcd;
542
543 if ((err = gpio_direction_output(ZEUS_BKLEN_GPIO, 0)))
544 goto out_err_bkl;
545
546 return 0;
547
548out_err_bkl:
549 gpio_free(ZEUS_BKLEN_GPIO);
550out_err_lcd:
551 gpio_free(ZEUS_LCD_EN_GPIO);
552out_err:
553 return err;
554}
555
556static struct pxafb_mode_info zeus_fb_mode_info[] = {
557 {
558 .pixclock = 39722,
559
560 .xres = 640,
561 .yres = 480,
562
563 .bpp = 16,
564
565 .hsync_len = 63,
566 .left_margin = 16,
567 .right_margin = 81,
568
569 .vsync_len = 2,
570 .upper_margin = 12,
571 .lower_margin = 31,
572
573 .sync = 0,
574 },
575};
576
577static struct pxafb_mach_info zeus_fb_info = {
578 .modes = zeus_fb_mode_info,
579 .num_modes = 1,
580 .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
581 .pxafb_lcd_power = zeus_lcd_power,
582 .pxafb_backlight_power = zeus_backlight_power,
583};
584
585/*
586 * MMC/SD Device
587 *
588 * The card detect interrupt isn't debounced so we delay it by 250ms
589 * to give the card a chance to fully insert/eject.
590 */
591
592static struct pxamci_platform_data zeus_mci_platform_data = {
593 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
594 .detect_delay = HZ/4,
595 .gpio_card_detect = ZEUS_MMC_CD_GPIO,
596 .gpio_card_ro = ZEUS_MMC_WP_GPIO,
597 .gpio_card_ro_invert = 1,
598 .gpio_power = -1
599};
600
601/*
602 * USB Device Controller
603 */
604static void zeus_udc_command(int cmd)
605{
606 switch (cmd) {
607 case PXA2XX_UDC_CMD_DISCONNECT:
608 pr_info("zeus: disconnecting USB client\n");
609 UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
610 break;
611
612 case PXA2XX_UDC_CMD_CONNECT:
613 pr_info("zeus: connecting USB client\n");
614 UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE;
615 break;
616 }
617}
618
619static struct pxa2xx_udc_mach_info zeus_udc_info = {
620 .udc_command = zeus_udc_command,
621};
622
623static void zeus_power_off(void)
624{
625 local_irq_disable();
626 pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
627}
628
629int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
630 unsigned ngpio, void *context)
631{
632 int i;
633 u8 pcb_info = 0;
634
635 for (i = 0; i < 8; i++) {
636 int pcb_bit = gpio + i + 8;
637
638 if (gpio_request(pcb_bit, "pcb info")) {
639 dev_err(&client->dev, "Can't request pcb info %d\n", i);
640 continue;
641 }
642
643 if (gpio_direction_input(pcb_bit)) {
644 dev_err(&client->dev, "Can't read pcb info %d\n", i);
645 gpio_free(pcb_bit);
646 continue;
647 }
648
649 pcb_info |= !!gpio_get_value(pcb_bit) << i;
650
651 gpio_free(pcb_bit);
652 }
653
654 dev_info(&client->dev, "Zeus PCB version %d issue %d\n",
655 pcb_info >> 4, pcb_info & 0xf);
656
657 return 0;
658}
659
660static struct pca953x_platform_data zeus_pca953x_pdata[] = {
661 [0] = { .gpio_base = ZEUS_EXT0_GPIO_BASE, },
662 [1] = {
663 .gpio_base = ZEUS_EXT1_GPIO_BASE,
664 .setup = zeus_get_pcb_info,
665 },
666 [2] = { .gpio_base = ZEUS_USER_GPIO_BASE, },
667};
668
669static struct i2c_board_info __initdata zeus_i2c_devices[] = {
670 {
671 I2C_BOARD_INFO("pca9535", 0x21),
672 .platform_data = &zeus_pca953x_pdata[0],
673 },
674 {
675 I2C_BOARD_INFO("pca9535", 0x22),
676 .platform_data = &zeus_pca953x_pdata[1],
677 },
678 {
679 I2C_BOARD_INFO("pca9535", 0x20),
680 .platform_data = &zeus_pca953x_pdata[2],
681 .irq = gpio_to_irq(ZEUS_EXTGPIO_GPIO),
682 },
683 { I2C_BOARD_INFO("lm75a", 0x48) },
684 { I2C_BOARD_INFO("24c01", 0x50) },
685 { I2C_BOARD_INFO("isl1208", 0x6f) },
686};
687
688static mfp_cfg_t zeus_pin_config[] __initdata = {
689 GPIO15_nCS_1,
690 GPIO78_nCS_2,
691 GPIO80_nCS_4,
692 GPIO33_nCS_5,
693
694 GPIO22_GPIO,
695 GPIO32_MMC_CLK,
696 GPIO92_MMC_DAT_0,
697 GPIO109_MMC_DAT_1,
698 GPIO110_MMC_DAT_2,
699 GPIO111_MMC_DAT_3,
700 GPIO112_MMC_CMD,
701
702 GPIO88_USBH1_PWR,
703 GPIO89_USBH1_PEN,
704 GPIO119_USBH2_PWR,
705 GPIO120_USBH2_PEN,
706
707 GPIO86_LCD_LDD_16,
708 GPIO87_LCD_LDD_17,
709
710 GPIO102_GPIO,
711 GPIO104_CIF_DD_2,
712 GPIO105_CIF_DD_1,
713
714 GPIO48_nPOE,
715 GPIO49_nPWE,
716 GPIO50_nPIOR,
717 GPIO51_nPIOW,
718 GPIO85_nPCE_1,
719 GPIO54_nPCE_2,
720 GPIO79_PSKTSEL,
721 GPIO55_nPREG,
722 GPIO56_nPWAIT,
723 GPIO57_nIOIS16,
724 GPIO36_GPIO, /* CF CD */
725 GPIO97_GPIO, /* CF PWREN */
726 GPIO99_GPIO, /* CF RDY */
727};
728
729static void __init zeus_init(void)
730{
731 u16 dm9000_msc = 0xe279;
732
733 system_rev = __raw_readw(ZEUS_CPLD_VERSION);
734 pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
735
736 /* Fix timings for dm9000s (CS1/CS2)*/
737 MSC0 = (MSC0 & 0xffff) | (dm9000_msc << 16);
738 MSC1 = (MSC1 & 0xffff0000) | dm9000_msc;
739
740 pm_power_off = zeus_power_off;
741
742 pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
743
744 platform_add_devices(zeus_devices, ARRAY_SIZE(zeus_devices));
745
746 pxa_set_ohci_info(&zeus_ohci_platform_data);
747
748 if (zeus_setup_fb_gpios())
749 pr_err("Failed to setup fb gpios\n");
750 else
751 set_pxa_fb_info(&zeus_fb_info);
752
753 pxa_set_mci_info(&zeus_mci_platform_data);
754 pxa_set_udc_info(&zeus_udc_info);
755 pxa_set_ac97_info(&zeus_ac97_info);
756 pxa_set_i2c_info(NULL);
757 i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
758}
759
760static struct map_desc zeus_io_desc[] __initdata = {
761 {
762 .virtual = ZEUS_CPLD_VERSION,
763 .pfn = __phys_to_pfn(ZEUS_CPLD_VERSION_PHYS),
764 .length = 0x1000,
765 .type = MT_DEVICE,
766 },
767 {
768 .virtual = ZEUS_CPLD_ISA_IRQ,
769 .pfn = __phys_to_pfn(ZEUS_CPLD_ISA_IRQ_PHYS),
770 .length = 0x1000,
771 .type = MT_DEVICE,
772 },
773 {
774 .virtual = ZEUS_CPLD_CONTROL,
775 .pfn = __phys_to_pfn(ZEUS_CPLD_CONTROL_PHYS),
776 .length = 0x1000,
777 .type = MT_DEVICE,
778 },
779 {
780 .virtual = ZEUS_CPLD_EXTWDOG,
781 .pfn = __phys_to_pfn(ZEUS_CPLD_EXTWDOG_PHYS),
782 .length = 0x1000,
783 .type = MT_DEVICE,
784 },
785 {
786 .virtual = ZEUS_PC104IO,
787 .pfn = __phys_to_pfn(ZEUS_PC104IO_PHYS),
788 .length = 0x00800000,
789 .type = MT_DEVICE,
790 },
791};
792
793static void __init zeus_map_io(void)
794{
795 pxa_map_io();
796
797 iotable_init(zeus_io_desc, ARRAY_SIZE(zeus_io_desc));
798
799 /* Clear PSPR to ensure a full restart on wake-up. */
800 PMCR = PSPR = 0;
801
802 /* enable internal 32.768Khz oscillator (ignore OSCC_OOK) */
803 OSCC |= OSCC_OON;
804
805 /* Some clock cycles later (from OSCC_ON), programme PCFR (OPDE...).
806 * float chip selects and PCMCIA */
807 PCFR = PCFR_OPDE | PCFR_DC_EN | PCFR_FS | PCFR_FP;
808}
809
810MACHINE_START(ARCOM_ZEUS, "Arcom ZEUS")
811 /* Maintainer: Marc Zyngier <maz@misterjones.org> */
812 .phys_io = 0x40000000,
813 .io_pg_offst = ((io_p2v(0x40000000) >> 18) & 0xfffc),
814 .boot_params = 0xa0000100,
815 .map_io = zeus_map_io,
816 .init_irq = zeus_init_irq,
817 .timer = &pxa_timer,
818 .init_machine = zeus_init,
819MACHINE_END
820
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index c48e1f2c3349..ee5e392430e8 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -70,7 +70,7 @@ config MACH_REALVIEW_PBX
70 bool "Support RealView/PBX platform" 70 bool "Support RealView/PBX platform"
71 select ARM_GIC 71 select ARM_GIC
72 select HAVE_PATA_PLATFORM 72 select HAVE_PATA_PLATFORM
73 select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !HIGH_PHYS_OFFSET 73 select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
74 select ZONE_DMA if SPARSEMEM 74 select ZONE_DMA if SPARSEMEM
75 help 75 help
76 Include support for the ARM(R) RealView PBX platform. 76 Include support for the ARM(R) RealView PBX platform.
diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h
index 585211ca0187..7d74fd5c8d66 100644
--- a/arch/arm/mach-s3c24a0/include/mach/memory.h
+++ b/arch/arm/mach-s3c24a0/include/mach/memory.h
@@ -15,5 +15,7 @@
15 15
16#define __virt_to_bus(x) __virt_to_phys(x) 16#define __virt_to_bus(x) __virt_to_phys(x)
17#define __bus_to_virt(x) __phys_to_virt(x) 17#define __bus_to_virt(x) __phys_to_virt(x)
18#define __pfn_to_bus(x) __pfn_to_phys(x)
19#define __bus_to_pfn(x) __phys_to_pfn(x)
18 20
19#endif 21#endif
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index 03a7f3857c5e..b17d52f7cc48 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -4,6 +4,7 @@ menu "SA11x0 Implementations"
4 4
5config SA1100_ASSABET 5config SA1100_ASSABET
6 bool "Assabet" 6 bool "Assabet"
7 select CPU_FREQ_SA1110
7 help 8 help
8 Say Y here if you are using the Intel(R) StrongARM(R) SA-1110 9 Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
9 Microprocessor Development Board (also known as the Assabet). 10 Microprocessor Development Board (also known as the Assabet).
@@ -19,6 +20,7 @@ config ASSABET_NEPONSET
19 20
20config SA1100_CERF 21config SA1100_CERF
21 bool "CerfBoard" 22 bool "CerfBoard"
23 select CPU_FREQ_SA1110
22 help 24 help
23 The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued). 25 The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued).
24 More information is available at: 26 More information is available at:
@@ -45,6 +47,7 @@ endchoice
45 47
46config SA1100_COLLIE 48config SA1100_COLLIE
47 bool "Sharp Zaurus SL5500" 49 bool "Sharp Zaurus SL5500"
50 # FIXME: select CPU_FREQ_SA11x0
48 select SHARP_LOCOMO 51 select SHARP_LOCOMO
49 select SHARP_SCOOP 52 select SHARP_SCOOP
50 select SHARP_PARAM 53 select SHARP_PARAM
@@ -54,6 +57,7 @@ config SA1100_COLLIE
54config SA1100_H3100 57config SA1100_H3100
55 bool "Compaq iPAQ H3100" 58 bool "Compaq iPAQ H3100"
56 select HTC_EGPIO 59 select HTC_EGPIO
60 select CPU_FREQ_SA1100
57 help 61 help
58 Say Y here if you intend to run this kernel on the Compaq iPAQ 62 Say Y here if you intend to run this kernel on the Compaq iPAQ
59 H3100 handheld computer. Information about this machine and the 63 H3100 handheld computer. Information about this machine and the
@@ -64,6 +68,7 @@ config SA1100_H3100
64config SA1100_H3600 68config SA1100_H3600
65 bool "Compaq iPAQ H3600/H3700" 69 bool "Compaq iPAQ H3600/H3700"
66 select HTC_EGPIO 70 select HTC_EGPIO
71 select CPU_FREQ_SA1100
67 help 72 help
68 Say Y here if you intend to run this kernel on the Compaq iPAQ 73 Say Y here if you intend to run this kernel on the Compaq iPAQ
69 H3600 handheld computer. Information about this machine and the 74 H3600 handheld computer. Information about this machine and the
@@ -74,6 +79,7 @@ config SA1100_H3600
74config SA1100_BADGE4 79config SA1100_BADGE4
75 bool "HP Labs BadgePAD 4" 80 bool "HP Labs BadgePAD 4"
76 select SA1111 81 select SA1111
82 select CPU_FREQ_SA1100
77 help 83 help
78 Say Y here if you want to build a kernel for the HP Laboratories 84 Say Y here if you want to build a kernel for the HP Laboratories
79 BadgePAD 4. 85 BadgePAD 4.
@@ -81,6 +87,7 @@ config SA1100_BADGE4
81config SA1100_JORNADA720 87config SA1100_JORNADA720
82 bool "HP Jornada 720" 88 bool "HP Jornada 720"
83 select SA1111 89 select SA1111
90 # FIXME: select CPU_FREQ_SA11x0
84 help 91 help
85 Say Y here if you want to build a kernel for the HP Jornada 720 92 Say Y here if you want to build a kernel for the HP Jornada 720
86 handheld computer. See <http://www.hp.com/jornada/products/720> 93 handheld computer. See <http://www.hp.com/jornada/products/720>
@@ -98,12 +105,14 @@ config SA1100_JORNADA720_SSP
98 105
99config SA1100_HACKKIT 106config SA1100_HACKKIT
100 bool "HackKit Core CPU Board" 107 bool "HackKit Core CPU Board"
108 select CPU_FREQ_SA1100
101 help 109 help
102 Say Y here to support the HackKit Core CPU Board 110 Say Y here to support the HackKit Core CPU Board
103 <http://hackkit.eletztrick.de>; 111 <http://hackkit.eletztrick.de>;
104 112
105config SA1100_LART 113config SA1100_LART
106 bool "LART" 114 bool "LART"
115 select CPU_FREQ_SA1100
107 help 116 help
108 Say Y here if you are using the Linux Advanced Radio Terminal 117 Say Y here if you are using the Linux Advanced Radio Terminal
109 (also known as the LART). See <http://www.lartmaker.nl/> for 118 (also known as the LART). See <http://www.lartmaker.nl/> for
@@ -111,6 +120,7 @@ config SA1100_LART
111 120
112config SA1100_PLEB 121config SA1100_PLEB
113 bool "PLEB" 122 bool "PLEB"
123 select CPU_FREQ_SA1100
114 help 124 help
115 Say Y here if you are using version 1 of the Portable Linux 125 Say Y here if you are using version 1 of the Portable Linux
116 Embedded Board (also known as PLEB). 126 Embedded Board (also known as PLEB).
@@ -119,6 +129,7 @@ config SA1100_PLEB
119 129
120config SA1100_SHANNON 130config SA1100_SHANNON
121 bool "Shannon" 131 bool "Shannon"
132 select CPU_FREQ_SA1100
122 help 133 help
123 The Shannon (also known as a Tuxscreen, and also as a IS2630) was a 134 The Shannon (also known as a Tuxscreen, and also as a IS2630) was a
124 limited edition webphone produced by Philips. The Shannon is a SA1100 135 limited edition webphone produced by Philips. The Shannon is a SA1100
@@ -127,6 +138,7 @@ config SA1100_SHANNON
127 138
128config SA1100_SIMPAD 139config SA1100_SIMPAD
129 bool "Simpad" 140 bool "Simpad"
141 select CPU_FREQ_SA1110
130 help 142 help
131 The SIEMENS webpad SIMpad is based on the StrongARM 1110. There 143 The SIEMENS webpad SIMpad is based on the StrongARM 1110. There
132 are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB 144 are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB
@@ -145,3 +157,4 @@ config SA1100_SSP
145endmenu 157endmenu
146 158
147endif 159endif
160
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 9faea1511c1f..3c1fcd696714 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -58,7 +58,6 @@ static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
58 2802 /* 280.2 MHz */ 58 2802 /* 280.2 MHz */
59}; 59};
60 60
61#if defined(CONFIG_CPU_FREQ_SA1100) || defined(CONFIG_CPU_FREQ_SA1110)
62/* rounds up(!) */ 61/* rounds up(!) */
63unsigned int sa11x0_freq_to_ppcr(unsigned int khz) 62unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
64{ 63{
@@ -110,17 +109,6 @@ unsigned int sa11x0_getspeed(unsigned int cpu)
110 return cclk_frequency_100khz[PPCR & 0xf] * 100; 109 return cclk_frequency_100khz[PPCR & 0xf] * 100;
111} 110}
112 111
113#else
114/*
115 * We still need to provide this so building without cpufreq works.
116 */
117unsigned int cpufreq_get(unsigned int cpu)
118{
119 return cclk_frequency_100khz[PPCR & 0xf] * 100;
120}
121EXPORT_SYMBOL(cpufreq_get);
122#endif
123
124/* 112/*
125 * This is the SA11x0 sched_clock implementation. This has 113 * This is the SA11x0 sched_clock implementation. This has
126 * a resolution of 271ns, and a maximum value of 32025597s (370 days). 114 * a resolution of 271ns, and a maximum value of 32025597s (370 days).
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 2d7423af1197..aed05bc3c2ea 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -38,16 +38,72 @@ union vfp_state *last_VFP_context[NR_CPUS];
38 */ 38 */
39unsigned int VFP_arch; 39unsigned int VFP_arch;
40 40
41/*
42 * Per-thread VFP initialization.
43 */
44static void vfp_thread_flush(struct thread_info *thread)
45{
46 union vfp_state *vfp = &thread->vfpstate;
47 unsigned int cpu;
48
49 memset(vfp, 0, sizeof(union vfp_state));
50
51 vfp->hard.fpexc = FPEXC_EN;
52 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
53
54 /*
55 * Disable VFP to ensure we initialize it first. We must ensure
56 * that the modification of last_VFP_context[] and hardware disable
57 * are done for the same CPU and without preemption.
58 */
59 cpu = get_cpu();
60 if (last_VFP_context[cpu] == vfp)
61 last_VFP_context[cpu] = NULL;
62 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
63 put_cpu();
64}
65
66static void vfp_thread_release(struct thread_info *thread)
67{
68 /* release case: Per-thread VFP cleanup. */
69 union vfp_state *vfp = &thread->vfpstate;
70 unsigned int cpu = thread->cpu;
71
72 if (last_VFP_context[cpu] == vfp)
73 last_VFP_context[cpu] = NULL;
74}
75
76/*
77 * When this function is called with the following 'cmd's, the following
78 * is true while this function is being run:
79 * THREAD_NOFTIFY_SWTICH:
80 * - the previously running thread will not be scheduled onto another CPU.
81 * - the next thread to be run (v) will not be running on another CPU.
82 * - thread->cpu is the local CPU number
83 * - not preemptible as we're called in the middle of a thread switch
84 * THREAD_NOTIFY_FLUSH:
85 * - the thread (v) will be running on the local CPU, so
86 * v === current_thread_info()
87 * - thread->cpu is the local CPU number at the time it is accessed,
88 * but may change at any time.
89 * - we could be preempted if tree preempt rcu is enabled, so
90 * it is unsafe to use thread->cpu.
91 * THREAD_NOTIFY_RELEASE:
92 * - the thread (v) will not be running on any CPU; it is a dead thread.
93 * - thread->cpu will be the last CPU the thread ran on, which may not
94 * be the current CPU.
95 * - we could be preempted if tree preempt rcu is enabled.
96 */
41static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) 97static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
42{ 98{
43 struct thread_info *thread = v; 99 struct thread_info *thread = v;
44 union vfp_state *vfp;
45 __u32 cpu = thread->cpu;
46 100
47 if (likely(cmd == THREAD_NOTIFY_SWITCH)) { 101 if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
48 u32 fpexc = fmrx(FPEXC); 102 u32 fpexc = fmrx(FPEXC);
49 103
50#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP
105 unsigned int cpu = thread->cpu;
106
51 /* 107 /*
52 * On SMP, if VFP is enabled, save the old state in 108 * On SMP, if VFP is enabled, save the old state in
53 * case the thread migrates to a different CPU. The 109 * case the thread migrates to a different CPU. The
@@ -74,25 +130,10 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
74 return NOTIFY_DONE; 130 return NOTIFY_DONE;
75 } 131 }
76 132
77 vfp = &thread->vfpstate; 133 if (cmd == THREAD_NOTIFY_FLUSH)
78 if (cmd == THREAD_NOTIFY_FLUSH) { 134 vfp_thread_flush(thread);
79 /* 135 else
80 * Per-thread VFP initialisation. 136 vfp_thread_release(thread);
81 */
82 memset(vfp, 0, sizeof(union vfp_state));
83
84 vfp->hard.fpexc = FPEXC_EN;
85 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
86
87 /*
88 * Disable VFP to ensure we initialise it first.
89 */
90 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
91 }
92
93 /* flush and release case: Per-thread VFP cleanup. */
94 if (last_VFP_context[cpu] == vfp)
95 last_VFP_context[cpu] = NULL;
96 137
97 return NOTIFY_DONE; 138 return NOTIFY_DONE;
98} 139}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942f..2d7f56a98e0f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
87 bool 87 bool
88 default y 88 default y
89 89
90config HAVE_LEGACY_PER_CPU_AREA
91 def_bool y
92
93config HAVE_SETUP_PER_CPU_AREA 90config HAVE_SETUP_PER_CPU_AREA
94 def_bool y 91 def_bool y
95 92
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017d..61c7b1750b16 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
61 61
62#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
64 extern unsigned long vmalloc_end; 64 extern unsigned long VMALLOC_END;
65 extern struct page *vmem_map; 65 extern struct page *vmem_map;
66 extern int find_largest_hole(u64 start, u64 end, void *arg); 66 extern int find_largest_hole(u64 start, u64 end, void *arg);
67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg); 67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e7..69bf13857a9f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
229#ifdef CONFIG_VIRTUAL_MEM_MAP 229#ifdef CONFIG_VIRTUAL_MEM_MAP
230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
231# define VMALLOC_END vmalloc_end 231extern unsigned long VMALLOC_END;
232 extern unsigned long vmalloc_end;
233#else 232#else
234#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) 233#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
235/* SPARSEMEM_VMEMMAP uses half of vmalloc... */ 234/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef2..7fa90f73f6be 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
229#endif 229#endif
230}; 230};
231 231
232DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); 232DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
233 233
234/* 234/*
235 * The "local" data variable. It refers to the per-CPU data of the currently executing 235 * The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
237 * Do not use the address of local_cpu_data, since it will be different from 237 * Do not use the address of local_cpu_data, since it will be different from
238 * cpu_data(smp_processor_id())! 238 * cpu_data(smp_processor_id())!
239 */ 239 */
240#define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) 240#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
241#define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) 241#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
242 242
243extern void print_cpu_info (struct cpuinfo_ia64 *); 243extern void print_cpu_info (struct cpuinfo_ia64 *);
244 244
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f3..40574ae11401 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
702 printk(KERN_ERR PREFIX 702 printk(KERN_ERR PREFIX
703 "Error parsing MADT - no LAPIC entries\n"); 703 "Error parsing MADT - no LAPIC entries\n");
704 704
705#ifdef CONFIG_SMP
706 if (available_cpus == 0) {
707 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
708 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
709 smp_boot_data.cpu_phys_id[available_cpus] =
710 hard_smp_processor_id();
711 available_cpus = 1; /* We've got at least one of these, no? */
712 }
713 smp_boot_data.cpu_count = available_cpus;
714#endif
715 /* Make boot-up look pretty */
716 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
717 total_cpus);
718
705 return 0; 719 return 0;
706} 720}
707 721
708
709
710int __init acpi_boot_init(void) 722int __init acpi_boot_init(void)
711{ 723{
712 724
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
769 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) 781 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
770 printk(KERN_ERR PREFIX "Can't find FADT\n"); 782 printk(KERN_ERR PREFIX "Can't find FADT\n");
771 783
784#ifdef CONFIG_ACPI_NUMA
772#ifdef CONFIG_SMP 785#ifdef CONFIG_SMP
773 if (available_cpus == 0) {
774 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
775 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
776 smp_boot_data.cpu_phys_id[available_cpus] =
777 hard_smp_processor_id();
778 available_cpus = 1; /* We've got at least one of these, no? */
779 }
780 smp_boot_data.cpu_count = available_cpus;
781
782 smp_build_cpu_map();
783# ifdef CONFIG_ACPI_NUMA
784 if (srat_num_cpus == 0) { 786 if (srat_num_cpus == 0) {
785 int cpu, i = 1; 787 int cpu, i = 1;
786 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 788 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
789 node_cpuid[i++].phys_id = 791 node_cpuid[i++].phys_id =
790 smp_boot_data.cpu_phys_id[cpu]; 792 smp_boot_data.cpu_phys_id[cpu];
791 } 793 }
792# endif
793#endif 794#endif
794#ifdef CONFIG_ACPI_NUMA
795 build_cpu_to_node_map(); 795 build_cpu_to_node_map();
796#endif 796#endif
797 /* Make boot-up look pretty */
798 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
799 total_cpus);
800 return 0; 797 return 0;
801} 798}
802 799
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c4..17a9fba38930 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
1051 * intermediate precision so that we can produce a full 64-bit result. 1051 * intermediate precision so that we can produce a full 64-bit result.
1052 */ 1052 */
1053GLOBAL_ENTRY(ia64_native_sched_clock) 1053GLOBAL_ENTRY(ia64_native_sched_clock)
1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1054 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) 1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
1056 ;; 1056 ;;
1057 ldf8 f8=[r8] 1057 ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1078GLOBAL_ENTRY(cycle_to_cputime) 1078GLOBAL_ENTRY(cycle_to_cputime)
1079 alloc r16=ar.pfs,1,0,0,0 1079 alloc r16=ar.pfs,1,0,0,0
1080 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1080 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1081 ;; 1081 ;;
1082 ldf8 f8=[r8] 1082 ldf8 f8=[r8]
1083 ;; 1083 ;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e300627..461b99902bf6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
30#endif 30#endif
31 31
32#include <asm/processor.h> 32#include <asm/processor.h>
33EXPORT_SYMBOL(per_cpu__cpu_info); 33EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
34#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
36#endif 36#endif
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d41..d5bdf9de36b6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
59ia64_do_tlb_purge: 59ia64_do_tlb_purge:
60#define O(member) IA64_CPUINFO_##member##_OFFSET 60#define O(member) IA64_CPUINFO_##member##_OFFSET
61 61
62 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 62 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
63 ;; 63 ;;
64 addl r17=O(PTCE_STRIDE),r2 64 addl r17=O(PTCE_STRIDE),r2
65 addl r2=O(PTCE_BASE),r2 65 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fbe..c370e02f0061 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
61 61
62 // purge all TC entries 62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET 63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 64 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
65 ;; 65 ;;
66 addl r17=O(PTCE_STRIDE),r2 66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2 67 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801d..a1ea87919777 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
74EXPORT_SYMBOL(__per_cpu_offset); 74EXPORT_SYMBOL(__per_cpu_offset);
75#endif 75#endif
76 76
77DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
78DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
79unsigned long ia64_cycles_per_usec; 79unsigned long ia64_cycles_per_usec;
80struct ia64_boot_param *ia64_boot_param; 80struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
566 early_acpi_boot_init(); 566 early_acpi_boot_init();
567# ifdef CONFIG_ACPI_NUMA 567# ifdef CONFIG_ACPI_NUMA
568 acpi_numa_init(); 568 acpi_numa_init();
569#ifdef CONFIG_ACPI_HOTPLUG_CPU 569# ifdef CONFIG_ACPI_HOTPLUG_CPU
570 prefill_possible_map(); 570 prefill_possible_map();
571#endif 571# endif
572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
573 32 : cpus_weight(early_cpu_possible_map)), 573 32 : cpus_weight(early_cpu_possible_map)),
574 additional_cpus > 0 ? additional_cpus : 0); 574 additional_cpus > 0 ? additional_cpus : 0);
575# endif 575# endif
576#else
577# ifdef CONFIG_SMP
578 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
579# endif
580#endif /* CONFIG_APCI_BOOT */ 576#endif /* CONFIG_APCI_BOOT */
581 577
578#ifdef CONFIG_SMP
579 smp_build_cpu_map();
580#endif
582 find_memory(); 581 find_memory();
583 582
584 /* process SAL system table: */ 583 /* process SAL system table: */
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
856} 855}
857 856
858/* 857/*
859 * In UP configuration, setup_per_cpu_areas() is defined in
860 * include/linux/percpu.h
861 */
862#ifdef CONFIG_SMP
863void __init
864setup_per_cpu_areas (void)
865{
866 /* start_kernel() requires this... */
867}
868#endif
869
870/*
871 * Do the following calculations: 858 * Do the following calculations:
872 * 859 *
873 * 1. the max. cache line size. 860 * 1. the max. cache line size.
@@ -980,7 +967,7 @@ cpu_init (void)
980 * depends on the data returned by identify_cpu(). We break the dependency by 967 * depends on the data returned by identify_cpu(). We break the dependency by
981 * accessing cpu_data() through the canonical per-CPU address. 968 * accessing cpu_data() through the canonical per-CPU address.
982 */ 969 */
983 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 970 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
984 identify_cpu(cpu_info); 971 identify_cpu(cpu_info);
985 972
986#ifdef CONFIG_MCKINLEY 973#ifdef CONFIG_MCKINLEY
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c988..1295ba327f6f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
166 } 166 }
167#endif 167#endif
168 168
169#ifdef CONFIG_SMP
170 . = ALIGN(PERCPU_PAGE_SIZE);
171 __cpu0_per_cpu = .;
172 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
173#endif
174
169 . = ALIGN(PAGE_SIZE); 175 . = ALIGN(PAGE_SIZE);
170 __init_end = .; 176 __init_end = .;
171 177
@@ -198,11 +204,6 @@ SECTIONS
198 data : { } :data 204 data : { } :data
199 .data : AT(ADDR(.data) - LOAD_OFFSET) 205 .data : AT(ADDR(.data) - LOAD_OFFSET)
200 { 206 {
201#ifdef CONFIG_SMP
202 . = ALIGN(PERCPU_PAGE_SIZE);
203 __cpu0_per_cpu = .;
204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
205#endif
206 INIT_TASK_DATA(PAGE_SIZE) 207 INIT_TASK_DATA(PAGE_SIZE)
207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 208 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
208 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 209 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
154void * __cpuinit 154void * __cpuinit
155per_cpu_init (void) 155per_cpu_init (void)
156{ 156{
157 int cpu; 157 static bool first_time = true;
158 static int first_time=1; 158 void *cpu0_data = __cpu0_per_cpu;
159 unsigned int cpu;
160
161 if (!first_time)
162 goto skip;
163 first_time = false;
159 164
160 /* 165 /*
161 * get_free_pages() cannot be used before cpu_init() done. BSP 166 * get_free_pages() cannot be used before cpu_init() done.
162 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
163 * get_zeroed_page(). 168 * to avoid that AP calls get_zeroed_page().
164 */ 169 */
165 if (first_time) { 170 for_each_possible_cpu(cpu) {
166 void *cpu0_data = __cpu0_per_cpu; 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
167 172
168 first_time=0; 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
174 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
175 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
169 176
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; 177 /*
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; 178 * percpu area for cpu0 is moved from the __init area
179 * which is setup by head.S and used till this point.
180 * Update ar.k3. This move is ensures that percpu
181 * area for cpu0 is on the correct node and its
182 * virtual address isn't insanely far from other
183 * percpu areas which is important for congruent
184 * percpu allocator.
185 */
186 if (cpu == 0)
187 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
188 (unsigned long)__per_cpu_start);
172 189
173 for (cpu = 1; cpu < NR_CPUS; cpu++) { 190 cpu_data += PERCPU_PAGE_SIZE;
174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
176 cpu_data += PERCPU_PAGE_SIZE;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178 }
179 } 191 }
192skip:
180 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 193 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
181} 194}
182 195
183static inline void 196static inline void
184alloc_per_cpu_data(void) 197alloc_per_cpu_data(void)
185{ 198{
186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
188} 201}
202
203/**
204 * setup_per_cpu_areas - setup percpu areas
205 *
206 * Arch code has already allocated and initialized percpu areas. All
207 * this function has to do is to teach the determined layout to the
208 * dynamic percpu allocator, which happens to be more complex than
209 * creating whole new ones using helpers.
210 */
211void __init
212setup_per_cpu_areas(void)
213{
214 struct pcpu_alloc_info *ai;
215 struct pcpu_group_info *gi;
216 unsigned int cpu;
217 ssize_t static_size, reserved_size, dyn_size;
218 int rc;
219
220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
221 if (!ai)
222 panic("failed to allocate pcpu_alloc_info");
223 gi = &ai->groups[0];
224
225 /* units are assigned consecutively to possible cpus */
226 for_each_possible_cpu(cpu)
227 gi->cpu_map[gi->nr_units++] = cpu;
228
229 /* set parameters */
230 static_size = __per_cpu_end - __per_cpu_start;
231 reserved_size = PERCPU_MODULE_RESERVE;
232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
233 if (dyn_size < 0)
234 panic("percpu area overflow static=%zd reserved=%zd\n",
235 static_size, reserved_size);
236
237 ai->static_size = static_size;
238 ai->reserved_size = reserved_size;
239 ai->dyn_size = dyn_size;
240 ai->unit_size = PERCPU_PAGE_SIZE;
241 ai->atom_size = PAGE_SIZE;
242 ai->alloc_size = PERCPU_PAGE_SIZE;
243
244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
245 if (rc)
246 panic("failed to setup percpu area (err=%d)", rc);
247
248 pcpu_free_alloc_info(ai);
249}
189#else 250#else
190#define alloc_per_cpu_data() do { } while (0) 251#define alloc_per_cpu_data() do { } while (0)
191#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
270 331
271 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 332 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
272 sizeof(struct page)); 333 sizeof(struct page));
273 vmalloc_end -= map_size; 334 VMALLOC_END -= map_size;
274 vmem_map = (struct page *) vmalloc_end; 335 vmem_map = (struct page *) VMALLOC_END;
275 efi_memmap_walk(create_mem_map_page_table, NULL); 336 efi_memmap_walk(create_mem_map_page_table, NULL);
276 337
277 /* 338 /*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..19c4b2195dce 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 143 int cpu;
144 144
145 for_each_possible_early_cpu(cpu) { 145 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 146 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147 void *cpu0_data = __cpu0_per_cpu; 147
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 148 if (node != node_cpuid[cpu].nid)
149 __per_cpu_start; 149 continue;
150 } else if (node == node_cpuid[cpu].nid) { 150
151 memcpy(__va(cpu_data), __phys_per_cpu_start, 151 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152 __per_cpu_end - __per_cpu_start); 152 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 153 __per_cpu_start;
154 __per_cpu_start; 154
155 cpu_data += PERCPU_PAGE_SIZE; 155 /*
156 } 156 * percpu area for cpu0 is moved from the __init area
157 * which is setup by head.S and used till this point.
158 * Update ar.k3. This move is ensures that percpu
159 * area for cpu0 is on the correct node and its
160 * virtual address isn't insanely far from other
161 * percpu areas which is important for congruent
162 * percpu allocator.
163 */
164 if (cpu == 0)
165 ia64_set_kr(IA64_KR_PER_CPU_DATA,
166 (unsigned long)cpu_data -
167 (unsigned long)__per_cpu_start);
168
169 cpu_data += PERCPU_PAGE_SIZE;
157 } 170 }
158#endif 171#endif
159 return cpu_data; 172 return cpu_data;
160} 173}
161 174
175#ifdef CONFIG_SMP
176/**
177 * setup_per_cpu_areas - setup percpu areas
178 *
179 * Arch code has already allocated and initialized percpu areas. All
180 * this function has to do is to teach the determined layout to the
181 * dynamic percpu allocator, which happens to be more complex than
182 * creating whole new ones using helpers.
183 */
184void __init setup_per_cpu_areas(void)
185{
186 struct pcpu_alloc_info *ai;
187 struct pcpu_group_info *uninitialized_var(gi);
188 unsigned int *cpu_map;
189 void *base;
190 unsigned long base_offset;
191 unsigned int cpu;
192 ssize_t static_size, reserved_size, dyn_size;
193 int node, prev_node, unit, nr_units, rc;
194
195 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
196 if (!ai)
197 panic("failed to allocate pcpu_alloc_info");
198 cpu_map = ai->groups[0].cpu_map;
199
200 /* determine base */
201 base = (void *)ULONG_MAX;
202 for_each_possible_cpu(cpu)
203 base = min(base,
204 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
205 base_offset = (void *)__per_cpu_start - base;
206
207 /* build cpu_map, units are grouped by node */
208 unit = 0;
209 for_each_node(node)
210 for_each_possible_cpu(cpu)
211 if (node == node_cpuid[cpu].nid)
212 cpu_map[unit++] = cpu;
213 nr_units = unit;
214
215 /* set basic parameters */
216 static_size = __per_cpu_end - __per_cpu_start;
217 reserved_size = PERCPU_MODULE_RESERVE;
218 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
219 if (dyn_size < 0)
220 panic("percpu area overflow static=%zd reserved=%zd\n",
221 static_size, reserved_size);
222
223 ai->static_size = static_size;
224 ai->reserved_size = reserved_size;
225 ai->dyn_size = dyn_size;
226 ai->unit_size = PERCPU_PAGE_SIZE;
227 ai->atom_size = PAGE_SIZE;
228 ai->alloc_size = PERCPU_PAGE_SIZE;
229
230 /*
231 * CPUs are put into groups according to node. Walk cpu_map
232 * and create new groups at node boundaries.
233 */
234 prev_node = -1;
235 ai->nr_groups = 0;
236 for (unit = 0; unit < nr_units; unit++) {
237 cpu = cpu_map[unit];
238 node = node_cpuid[cpu].nid;
239
240 if (node == prev_node) {
241 gi->nr_units++;
242 continue;
243 }
244 prev_node = node;
245
246 gi = &ai->groups[ai->nr_groups++];
247 gi->nr_units = 1;
248 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
249 gi->cpu_map = &cpu_map[unit];
250 }
251
252 rc = pcpu_setup_first_chunk(ai, base);
253 if (rc)
254 panic("failed to setup percpu area (err=%d)", rc);
255
256 pcpu_free_alloc_info(ai);
257}
258#endif
259
162/** 260/**
163 * fill_pernode - initialize pernode data. 261 * fill_pernode - initialize pernode data.
164 * @node: the node id. 262 * @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */ 450 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu) { 451 for_each_possible_early_cpu(cpu) {
354 node = node_cpuid[cpu].nid; 452 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 453 per_cpu(ia64_cpu_info, cpu).node_data =
454 mem_data[node].node_data;
356 } 455 }
357#else 456#else
358 { 457 {
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
360 cpu = 0; 459 cpu = 0;
361 node = node_cpuid[cpu].nid; 460 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 462 ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data; 463 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 } 464 }
366#endif /* CONFIG_SMP */ 465#endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
666 sparse_init(); 765 sparse_init();
667 766
668#ifdef CONFIG_VIRTUAL_MEM_MAP 767#ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 768 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page)); 769 sizeof(struct page));
671 vmem_map = (struct page *) vmalloc_end; 770 vmem_map = (struct page *) VMALLOC_END;
672 efi_memmap_walk(create_mem_map_page_table, NULL); 771 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 772 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
674#endif 773#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..b9609c69343a 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45 45
46#ifdef CONFIG_VIRTUAL_MEM_MAP 46#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT; 47unsigned long VMALLOC_END = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end); 48EXPORT_SYMBOL(VMALLOC_END);
49struct page *vmem_map; 49struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map); 50EXPORT_SYMBOL(vmem_map);
51#endif 51#endif
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2bae..e884ba4e031d 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
498 stat->deadlocks, 498 stat->deadlocks,
499 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 499 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
500 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 500 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
501 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 501 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
502 stat->shub_ptc_flushes_not_my_mm, 502 stat->shub_ptc_flushes_not_my_mm,
503 stat->deadlocks2, 503 stat->deadlocks2,
504 stat->shub_ipi_flushes, 504 stat->shub_ipi_flushes,
505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); 505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
506 } 506 }
507 return 0; 507 return 0;
508} 508}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2fe..a3fb7cf9ae1d 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
63} 63}
64 64
65 65
66static DEFINE_PER_CPU(int, timer_irq) = -1; 66static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
67static DEFINE_PER_CPU(int, ipi_irq) = -1; 67static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
68static DEFINE_PER_CPU(int, resched_irq) = -1; 68static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
69static DEFINE_PER_CPU(int, cmc_irq) = -1; 69static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
70static DEFINE_PER_CPU(int, cmcp_irq) = -1; 70static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, cpep_irq) = -1; 71static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
72#define NAME_SIZE 15 72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); 73static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); 74static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); 75static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); 76static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); 77static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); 78static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
79#undef NAME_SIZE 79#undef NAME_SIZE
80 80
81struct saved_irq { 81struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
144 if (xen_slab_ready) { 144 if (xen_slab_ready) {
145 switch (vec) { 145 switch (vec) {
146 case IA64_TIMER_VECTOR: 146 case IA64_TIMER_VECTOR:
147 snprintf(per_cpu(timer_name, cpu), 147 snprintf(per_cpu(xen_timer_name, cpu),
148 sizeof(per_cpu(timer_name, cpu)), 148 sizeof(per_cpu(xen_timer_name, cpu)),
149 "%s%d", action->name, cpu); 149 "%s%d", action->name, cpu);
150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, 150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
151 action->handler, action->flags, 151 action->handler, action->flags,
152 per_cpu(timer_name, cpu), action->dev_id); 152 per_cpu(xen_timer_name, cpu), action->dev_id);
153 per_cpu(timer_irq, cpu) = irq; 153 per_cpu(xen_timer_irq, cpu) = irq;
154 break; 154 break;
155 case IA64_IPI_RESCHEDULE: 155 case IA64_IPI_RESCHEDULE:
156 snprintf(per_cpu(resched_name, cpu), 156 snprintf(per_cpu(xen_resched_name, cpu),
157 sizeof(per_cpu(resched_name, cpu)), 157 sizeof(per_cpu(xen_resched_name, cpu)),
158 "%s%d", action->name, cpu); 158 "%s%d", action->name, cpu);
159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, 159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
160 action->handler, action->flags, 160 action->handler, action->flags,
161 per_cpu(resched_name, cpu), action->dev_id); 161 per_cpu(xen_resched_name, cpu), action->dev_id);
162 per_cpu(resched_irq, cpu) = irq; 162 per_cpu(xen_resched_irq, cpu) = irq;
163 break; 163 break;
164 case IA64_IPI_VECTOR: 164 case IA64_IPI_VECTOR:
165 snprintf(per_cpu(ipi_name, cpu), 165 snprintf(per_cpu(xen_ipi_name, cpu),
166 sizeof(per_cpu(ipi_name, cpu)), 166 sizeof(per_cpu(xen_ipi_name, cpu)),
167 "%s%d", action->name, cpu); 167 "%s%d", action->name, cpu);
168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, 168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
169 action->handler, action->flags, 169 action->handler, action->flags,
170 per_cpu(ipi_name, cpu), action->dev_id); 170 per_cpu(xen_ipi_name, cpu), action->dev_id);
171 per_cpu(ipi_irq, cpu) = irq; 171 per_cpu(xen_ipi_irq, cpu) = irq;
172 break; 172 break;
173 case IA64_CMC_VECTOR: 173 case IA64_CMC_VECTOR:
174 snprintf(per_cpu(cmc_name, cpu), 174 snprintf(per_cpu(xen_cmc_name, cpu),
175 sizeof(per_cpu(cmc_name, cpu)), 175 sizeof(per_cpu(xen_cmc_name, cpu)),
176 "%s%d", action->name, cpu); 176 "%s%d", action->name, cpu);
177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, 177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
178 action->handler, 178 action->handler,
179 action->flags, 179 action->flags,
180 per_cpu(cmc_name, cpu), 180 per_cpu(xen_cmc_name, cpu),
181 action->dev_id); 181 action->dev_id);
182 per_cpu(cmc_irq, cpu) = irq; 182 per_cpu(xen_cmc_irq, cpu) = irq;
183 break; 183 break;
184 case IA64_CMCP_VECTOR: 184 case IA64_CMCP_VECTOR:
185 snprintf(per_cpu(cmcp_name, cpu), 185 snprintf(per_cpu(xen_cmcp_name, cpu),
186 sizeof(per_cpu(cmcp_name, cpu)), 186 sizeof(per_cpu(xen_cmcp_name, cpu)),
187 "%s%d", action->name, cpu); 187 "%s%d", action->name, cpu);
188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, 188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
189 action->handler, 189 action->handler,
190 action->flags, 190 action->flags,
191 per_cpu(cmcp_name, cpu), 191 per_cpu(xen_cmcp_name, cpu),
192 action->dev_id); 192 action->dev_id);
193 per_cpu(cmcp_irq, cpu) = irq; 193 per_cpu(xen_cmcp_irq, cpu) = irq;
194 break; 194 break;
195 case IA64_CPEP_VECTOR: 195 case IA64_CPEP_VECTOR:
196 snprintf(per_cpu(cpep_name, cpu), 196 snprintf(per_cpu(xen_cpep_name, cpu),
197 sizeof(per_cpu(cpep_name, cpu)), 197 sizeof(per_cpu(xen_cpep_name, cpu)),
198 "%s%d", action->name, cpu); 198 "%s%d", action->name, cpu);
199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, 199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
200 action->handler, 200 action->handler,
201 action->flags, 201 action->flags,
202 per_cpu(cpep_name, cpu), 202 per_cpu(xen_cpep_name, cpu),
203 action->dev_id); 203 action->dev_id);
204 per_cpu(cpep_irq, cpu) = irq; 204 per_cpu(xen_cpep_irq, cpu) = irq;
205 break; 205 break;
206 case IA64_CPE_VECTOR: 206 case IA64_CPE_VECTOR:
207 case IA64_MCA_RENDEZ_VECTOR: 207 case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
275 275
276 if (action == CPU_DEAD) { 276 if (action == CPU_DEAD) {
277 /* Unregister evtchn. */ 277 /* Unregister evtchn. */
278 if (per_cpu(cpep_irq, cpu) >= 0) { 278 if (per_cpu(xen_cpep_irq, cpu) >= 0) {
279 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); 279 unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
280 per_cpu(cpep_irq, cpu) = -1; 280 NULL);
281 per_cpu(xen_cpep_irq, cpu) = -1;
281 } 282 }
282 if (per_cpu(cmcp_irq, cpu) >= 0) { 283 if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
283 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); 284 unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
284 per_cpu(cmcp_irq, cpu) = -1; 285 NULL);
286 per_cpu(xen_cmcp_irq, cpu) = -1;
285 } 287 }
286 if (per_cpu(cmc_irq, cpu) >= 0) { 288 if (per_cpu(xen_cmc_irq, cpu) >= 0) {
287 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); 289 unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
288 per_cpu(cmc_irq, cpu) = -1; 290 per_cpu(xen_cmc_irq, cpu) = -1;
289 } 291 }
290 if (per_cpu(ipi_irq, cpu) >= 0) { 292 if (per_cpu(xen_ipi_irq, cpu) >= 0) {
291 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); 293 unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
292 per_cpu(ipi_irq, cpu) = -1; 294 per_cpu(xen_ipi_irq, cpu) = -1;
293 } 295 }
294 if (per_cpu(resched_irq, cpu) >= 0) { 296 if (per_cpu(xen_resched_irq, cpu) >= 0) {
295 unbind_from_irqhandler(per_cpu(resched_irq, cpu), 297 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
296 NULL); 298 NULL);
297 per_cpu(resched_irq, cpu) = -1; 299 per_cpu(xen_resched_irq, cpu) = -1;
298 } 300 }
299 if (per_cpu(timer_irq, cpu) >= 0) { 301 if (per_cpu(xen_timer_irq, cpu) >= 0) {
300 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); 302 unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
301 per_cpu(timer_irq, cpu) = -1; 303 NULL);
304 per_cpu(xen_timer_irq, cpu) = -1;
302 } 305 }
303 } 306 }
304 return NOTIFY_OK; 307 return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e20..c1c544513e8d 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
34 34
35#include "../kernel/fsyscall_gtod_data.h" 35#include "../kernel/fsyscall_gtod_data.h"
36 36
37DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
38DEFINE_PER_CPU(unsigned long, processed_stolen_time); 38static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
39DEFINE_PER_CPU(unsigned long, processed_blocked_time); 39static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
40 40
41/* taken from i386/kernel/time-xen.c */ 41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu) 42static void xen_init_missing_ticks_accounting(int cpu)
43{ 43{
44 struct vcpu_register_runstate_memory_area area; 44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); 45 struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
46 int rc; 46 int rc;
47 47
48 memset(runstate, 0, sizeof(*runstate)); 48 memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
52 &area); 52 &area);
53 WARN_ON(rc && rc != -ENOSYS); 53 WARN_ON(rc && rc != -ENOSYS);
54 54
55 per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; 55 per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] 56 per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline]; 57 + runstate->time[RUNSTATE_offline];
58} 58}
59 59
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
68 68
69 BUG_ON(preemptible()); 69 BUG_ON(preemptible());
70 70
71 state = &__get_cpu_var(runstate); 71 state = &__get_cpu_var(xen_runstate);
72 72
73 /* 73 /*
74 * The runstate info is always updated by the hypervisor on 74 * The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
103 * This function just checks and reject this effect. 103 * This function just checks and reject this effect.
104 */ 104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked], 105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(processed_blocked_time, cpu))) 106 per_cpu(xen_blocked_time, cpu)))
107 blocked = 0; 107 blocked = 0;
108 108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] + 109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline], 110 runstate.time[RUNSTATE_offline],
111 per_cpu(processed_stolen_time, cpu))) 111 per_cpu(xen_stolen_time, cpu)))
112 stolen = 0; 112 stolen = 0;
113 113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc())) 114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
147 } else { 147 } else {
148 local_cpu_data->itm_next = delta_itm + new_itm; 148 local_cpu_data->itm_next = delta_itm + new_itm;
149 } 149 }
150 per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; 150 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
151 per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; 151 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
152 } 152 }
153 return delta_itm; 153 return delta_itm;
154} 154}
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index fe60e1abaee8..aca0e28581c7 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -83,9 +83,9 @@
83#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 83#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
84#define VMALLOC_END KMAP_START 84#define VMALLOC_END KMAP_START
85#else 85#else
86extern unsigned long vmalloc_end; 86extern unsigned long m68k_vmalloc_end;
87#define VMALLOC_START 0x0f800000 87#define VMALLOC_START 0x0f800000
88#define VMALLOC_END vmalloc_end 88#define VMALLOC_END m68k_vmalloc_end
89#endif /* CONFIG_SUN3 */ 89#endif /* CONFIG_SUN3 */
90 90
91/* zero page used for uninitialized stuff */ 91/* zero page used for uninitialized stuff */
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 3cd19390aae5..94f81ecfe3f8 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -45,8 +45,8 @@
45** Globals 45** Globals
46*/ 46*/
47 47
48unsigned long vmalloc_end; 48unsigned long m68k_vmalloc_end;
49EXPORT_SYMBOL(vmalloc_end); 49EXPORT_SYMBOL(m68k_vmalloc_end);
50 50
51unsigned long pmeg_vaddr[PMEGS_NUM]; 51unsigned long pmeg_vaddr[PMEGS_NUM];
52unsigned char pmeg_alloc[PMEGS_NUM]; 52unsigned char pmeg_alloc[PMEGS_NUM];
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
172#endif 172#endif
173 // the lowest mapping here is the end of our 173 // the lowest mapping here is the end of our
174 // vmalloc region 174 // vmalloc region
175 if(!vmalloc_end) 175 if (!m68k_vmalloc_end)
176 vmalloc_end = seg; 176 m68k_vmalloc_end = seg;
177 177
178 // mark the segmap alloc'd, and reserve any 178 // mark the segmap alloc'd, and reserve any
179 // of the first 0xbff pages the hardware is 179 // of the first 0xbff pages the hardware is
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index dacafab00eb2..67e6389d625a 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
31#define KPROBE_HIT_ACTIVE 0x00000001 31#define KPROBE_HIT_ACTIVE 0x00000001
32#define KPROBE_HIT_SS 0x00000002 32#define KPROBE_HIT_SS 0x00000002
33 33
34static struct kprobe *current_kprobe; 34static struct kprobe *cur_kprobe;
35static unsigned long current_kprobe_orig_pc; 35static unsigned long cur_kprobe_orig_pc;
36static unsigned long current_kprobe_next_pc; 36static unsigned long cur_kprobe_next_pc;
37static int current_kprobe_ss_flags; 37static int cur_kprobe_ss_flags;
38static unsigned long kprobe_status; 38static unsigned long kprobe_status;
39static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2]; 39static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
40static unsigned long current_kprobe_bp_addr; 40static unsigned long cur_kprobe_bp_addr;
41 41
42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 43
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
399{ 399{
400 unsigned long nextpc; 400 unsigned long nextpc;
401 401
402 current_kprobe_orig_pc = regs->pc; 402 cur_kprobe_orig_pc = regs->pc;
403 memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); 403 memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
404 regs->pc = (unsigned long) current_kprobe_ss_buf; 404 regs->pc = (unsigned long) cur_kprobe_ss_buf;
405 405
406 nextpc = find_nextpc(regs, &current_kprobe_ss_flags); 406 nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
407 if (current_kprobe_ss_flags & SINGLESTEP_PCREL) 407 if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
408 current_kprobe_next_pc = 408 cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
409 current_kprobe_orig_pc + (nextpc - regs->pc);
410 else 409 else
411 current_kprobe_next_pc = nextpc; 410 cur_kprobe_next_pc = nextpc;
412 411
413 /* branching instructions need special handling */ 412 /* branching instructions need special handling */
414 if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) 413 if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
415 nextpc = singlestep_branch_setup(regs); 414 nextpc = singlestep_branch_setup(regs);
416 415
417 current_kprobe_bp_addr = nextpc; 416 cur_kprobe_bp_addr = nextpc;
418 417
419 *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; 418 *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
420 mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf, 419 mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
421 sizeof(current_kprobe_ss_buf)); 420 sizeof(cur_kprobe_ss_buf));
422 mn10300_icache_inv(); 421 mn10300_icache_inv();
423} 422}
424 423
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
440 disarm_kprobe(p, regs); 439 disarm_kprobe(p, regs);
441 ret = 1; 440 ret = 1;
442 } else { 441 } else {
443 p = current_kprobe; 442 p = cur_kprobe;
444 if (p->break_handler && p->break_handler(p, regs)) 443 if (p->break_handler && p->break_handler(p, regs))
445 goto ss_probe; 444 goto ss_probe;
446 } 445 }
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
464 } 463 }
465 464
466 kprobe_status = KPROBE_HIT_ACTIVE; 465 kprobe_status = KPROBE_HIT_ACTIVE;
467 current_kprobe = p; 466 cur_kprobe = p;
468 if (p->pre_handler(p, regs)) { 467 if (p->pre_handler(p, regs)) {
469 /* handler has already set things up, so skip ss setup */ 468 /* handler has already set things up, so skip ss setup */
470 return 1; 469 return 1;
@@ -491,8 +490,8 @@ no_kprobe:
491static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
492{ 491{
493 /* we may need to fixup regs/stack after singlestepping a call insn */ 492 /* we may need to fixup regs/stack after singlestepping a call insn */
494 if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) { 493 if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
495 regs->pc = current_kprobe_orig_pc; 494 regs->pc = cur_kprobe_orig_pc;
496 switch (p->ainsn.insn[0]) { 495 switch (p->ainsn.insn[0]) {
497 case 0xcd: /* CALL (d16,PC) */ 496 case 0xcd: /* CALL (d16,PC) */
498 *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; 497 *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
523 } 522 }
524 } 523 }
525 524
526 regs->pc = current_kprobe_next_pc; 525 regs->pc = cur_kprobe_next_pc;
527 current_kprobe_bp_addr = 0; 526 cur_kprobe_bp_addr = 0;
528} 527}
529 528
530static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) 529static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
532 if (!kprobe_running()) 531 if (!kprobe_running())
533 return 0; 532 return 0;
534 533
535 if (current_kprobe->post_handler) 534 if (cur_kprobe->post_handler)
536 current_kprobe->post_handler(current_kprobe, regs, 0); 535 cur_kprobe->post_handler(cur_kprobe, regs, 0);
537 536
538 resume_execution(current_kprobe, regs); 537 resume_execution(cur_kprobe, regs);
539 reset_current_kprobe(); 538 reset_current_kprobe();
540 preempt_enable_no_resched(); 539 preempt_enable_no_resched();
541 return 1; 540 return 1;
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
545static inline 544static inline
546int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 545int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
547{ 546{
548 if (current_kprobe->fault_handler && 547 if (cur_kprobe->fault_handler &&
549 current_kprobe->fault_handler(current_kprobe, regs, trapnr)) 548 cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
550 return 1; 549 return 1;
551 550
552 if (kprobe_status & KPROBE_HIT_SS) { 551 if (kprobe_status & KPROBE_HIT_SS) {
553 resume_execution(current_kprobe, regs); 552 resume_execution(cur_kprobe, regs);
554 reset_current_kprobe(); 553 reset_current_kprobe();
555 preempt_enable_no_resched(); 554 preempt_enable_no_resched();
556 } 555 }
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
567 566
568 switch (val) { 567 switch (val) {
569 case DIE_BREAKPOINT: 568 case DIE_BREAKPOINT:
570 if (current_kprobe_bp_addr != args->regs->pc) { 569 if (cur_kprobe_bp_addr != args->regs->pc) {
571 if (kprobe_handler(args->regs)) 570 if (kprobe_handler(args->regs))
572 return NOTIFY_STOP; 571 return NOTIFY_STOP;
573 } else { 572 } else {
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d9ea8d39c342..1d3b270d3083 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -37,7 +37,7 @@ extern void cpu_die(void);
37extern void smp_send_debugger_break(int cpu); 37extern void smp_send_debugger_break(int cpu);
38extern void smp_message_recv(int); 38extern void smp_message_recv(int);
39 39
40DECLARE_PER_CPU(unsigned int, pvr); 40DECLARE_PER_CPU(unsigned int, cpu_pvr);
41 41
42#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
43extern void fixup_irqs(cpumask_t map); 43extern void fixup_irqs(cpumask_t map);
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 936f04dbfc6f..a3c11cac3d71 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
487 * Since we can't get PMU interrupts inside a PMU interrupt handler, 487 * Since we can't get PMU interrupts inside a PMU interrupt handler,
488 * we don't need separate irq and nmi entries here. 488 * we don't need separate irq and nmi entries here.
489 */ 489 */
490static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); 490static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
491 491
492struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) 492struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
493{ 493{
494 struct perf_callchain_entry *entry = &__get_cpu_var(callchain); 494 struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
495 495
496 entry->nr = 0; 496 entry->nr = 0;
497 497
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 845c72ab7357..03dd6a248198 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
157#endif /* CONFIG_TAU */ 157#endif /* CONFIG_TAU */
158 158
159#ifdef CONFIG_SMP 159#ifdef CONFIG_SMP
160DEFINE_PER_CPU(unsigned int, pvr); 160DEFINE_PER_CPU(unsigned int, cpu_pvr);
161#endif 161#endif
162 162
163static int show_cpuinfo(struct seq_file *m, void *v) 163static int show_cpuinfo(struct seq_file *m, void *v)
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
209 } 209 }
210 210
211#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
212 pvr = per_cpu(pvr, cpu_id); 212 pvr = per_cpu(cpu_pvr, cpu_id);
213#else 213#else
214 pvr = mfspr(SPRN_PVR); 214 pvr = mfspr(SPRN_PVR);
215#endif 215#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 97196eefef3e..a521fb8a40ee 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
235 235
236static void __devinit smp_store_cpu_info(int id) 236static void __devinit smp_store_cpu_info(int id)
237{ 237{
238 per_cpu(pvr, id) = mfspr(SPRN_PVR); 238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239} 239}
240 240
241static void __init smp_create_idle(unsigned int cpu) 241static void __init smp_create_idle(unsigned int cpu)
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index f9dbf76a763f..7267effc8078 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -54,7 +54,7 @@ struct iic {
54 struct device_node *node; 54 struct device_node *node;
55}; 55};
56 56
57static DEFINE_PER_CPU(struct iic, iic); 57static DEFINE_PER_CPU(struct iic, cpu_iic);
58#define IIC_NODE_COUNT 2 58#define IIC_NODE_COUNT 2
59static struct irq_host *iic_host; 59static struct irq_host *iic_host;
60 60
@@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
82 82
83static void iic_eoi(unsigned int irq) 83static void iic_eoi(unsigned int irq)
84{ 84{
85 struct iic *iic = &__get_cpu_var(iic); 85 struct iic *iic = &__get_cpu_var(cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0); 87 BUG_ON(iic->eoi_ptr < 0);
88} 88}
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
146 struct iic *iic; 146 struct iic *iic;
147 unsigned int virq; 147 unsigned int virq;
148 148
149 iic = &__get_cpu_var(iic); 149 iic = &__get_cpu_var(cpu_iic);
150 *(unsigned long *) &pending = 150 *(unsigned long *) &pending =
151 in_be64((u64 __iomem *) &iic->regs->pending_destr); 151 in_be64((u64 __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
161 161
162void iic_setup_cpu(void) 162void iic_setup_cpu(void)
163{ 163{
164 out_be64(&__get_cpu_var(iic).regs->prio, 0xff); 164 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
165} 165}
166 166
167u8 iic_get_target_id(int cpu) 167u8 iic_get_target_id(int cpu)
168{ 168{
169 return per_cpu(iic, cpu).target_id; 169 return per_cpu(cpu_iic, cpu).target_id;
170} 170}
171 171
172EXPORT_SYMBOL_GPL(iic_get_target_id); 172EXPORT_SYMBOL_GPL(iic_get_target_id);
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
181 181
182void iic_cause_IPI(int cpu, int mesg) 182void iic_cause_IPI(int cpu, int mesg)
183{ 183{
184 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4); 184 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
185} 185}
186 186
187struct irq_host *iic_get_irq_host(int node) 187struct irq_host *iic_get_irq_host(int node)
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
348 /* XXX FIXME: should locate the linux CPU number from the HW cpu 348 /* XXX FIXME: should locate the linux CPU number from the HW cpu
349 * number properly. We are lucky for now 349 * number properly. We are lucky for now
350 */ 350 */
351 struct iic *iic = &per_cpu(iic, hw_cpu); 351 struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
352 352
353 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); 353 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
354 BUG_ON(iic->regs == NULL); 354 BUG_ON(iic->regs == NULL);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 937a544a236d..c5f3116b6ca5 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -54,7 +54,7 @@ struct dtl {
54 int buf_entries; 54 int buf_entries;
55 u64 last_idx; 55 u64 last_idx;
56}; 56};
57static DEFINE_PER_CPU(struct dtl, dtl); 57static DEFINE_PER_CPU(struct dtl, cpu_dtl);
58 58
59/* 59/*
60 * Dispatch trace log event mask: 60 * Dispatch trace log event mask:
@@ -261,7 +261,7 @@ static int dtl_init(void)
261 261
262 /* set up the per-cpu log structures */ 262 /* set up the per-cpu log structures */
263 for_each_possible_cpu(i) { 263 for_each_possible_cpu(i) {
264 struct dtl *dtl = &per_cpu(dtl, i); 264 struct dtl *dtl = &per_cpu(cpu_dtl, i);
265 dtl->cpu = i; 265 dtl->cpu = i;
266 266
267 rc = dtl_setup_file(dtl); 267 rc = dtl_setup_file(dtl);
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b129611590a4..f30f4a1ead23 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata; 47static int endflag __initdata;
48 48
49static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
50static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(long, alert_counter);
51static DEFINE_PER_CPU(int, nmi_touch); 51static DEFINE_PER_CPU(int, nmi_touch);
52 52
53void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
112 touched = 1; 112 touched = 1;
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 local_inc(&__get_cpu_var(alert_counter)); 115 __this_cpu_inc(per_cpu_var(alert_counter));
116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) 116 if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
120 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
121 local_set(&__get_cpu_var(alert_counter), 0); 121 __this_cpu_write(per_cpu_var(alert_counter), 0);
122 } 122 }
123 if (__get_cpu_var(wd_enabled)) { 123 if (__get_cpu_var(wd_enabled)) {
124 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36defeb7..0c44196b78ac 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
74 74
75#define percpu_to_op(op, var, val) \ 75#define percpu_to_op(op, var, val) \
76do { \ 76do { \
77 typedef typeof(var) T__; \ 77 typedef typeof(var) pto_T__; \
78 if (0) { \ 78 if (0) { \
79 T__ tmp__; \ 79 pto_T__ pto_tmp__; \
80 tmp__ = (val); \ 80 pto_tmp__ = (val); \
81 } \ 81 } \
82 switch (sizeof(var)) { \ 82 switch (sizeof(var)) { \
83 case 1: \ 83 case 1: \
84 asm(op "b %1,"__percpu_arg(0) \ 84 asm(op "b %1,"__percpu_arg(0) \
85 : "+m" (var) \ 85 : "+m" (var) \
86 : "qi" ((T__)(val))); \ 86 : "qi" ((pto_T__)(val))); \
87 break; \ 87 break; \
88 case 2: \ 88 case 2: \
89 asm(op "w %1,"__percpu_arg(0) \ 89 asm(op "w %1,"__percpu_arg(0) \
90 : "+m" (var) \ 90 : "+m" (var) \
91 : "ri" ((T__)(val))); \ 91 : "ri" ((pto_T__)(val))); \
92 break; \ 92 break; \
93 case 4: \ 93 case 4: \
94 asm(op "l %1,"__percpu_arg(0) \ 94 asm(op "l %1,"__percpu_arg(0) \
95 : "+m" (var) \ 95 : "+m" (var) \
96 : "ri" ((T__)(val))); \ 96 : "ri" ((pto_T__)(val))); \
97 break; \ 97 break; \
98 case 8: \ 98 case 8: \
99 asm(op "q %1,"__percpu_arg(0) \ 99 asm(op "q %1,"__percpu_arg(0) \
100 : "+m" (var) \ 100 : "+m" (var) \
101 : "re" ((T__)(val))); \ 101 : "re" ((pto_T__)(val))); \
102 break; \ 102 break; \
103 default: __bad_percpu_size(); \ 103 default: __bad_percpu_size(); \
104 } \ 104 } \
@@ -106,31 +106,31 @@ do { \
106 106
107#define percpu_from_op(op, var, constraint) \ 107#define percpu_from_op(op, var, constraint) \
108({ \ 108({ \
109 typeof(var) ret__; \ 109 typeof(var) pfo_ret__; \
110 switch (sizeof(var)) { \ 110 switch (sizeof(var)) { \
111 case 1: \ 111 case 1: \
112 asm(op "b "__percpu_arg(1)",%0" \ 112 asm(op "b "__percpu_arg(1)",%0" \
113 : "=q" (ret__) \ 113 : "=q" (pfo_ret__) \
114 : constraint); \ 114 : constraint); \
115 break; \ 115 break; \
116 case 2: \ 116 case 2: \
117 asm(op "w "__percpu_arg(1)",%0" \ 117 asm(op "w "__percpu_arg(1)",%0" \
118 : "=r" (ret__) \ 118 : "=r" (pfo_ret__) \
119 : constraint); \ 119 : constraint); \
120 break; \ 120 break; \
121 case 4: \ 121 case 4: \
122 asm(op "l "__percpu_arg(1)",%0" \ 122 asm(op "l "__percpu_arg(1)",%0" \
123 : "=r" (ret__) \ 123 : "=r" (pfo_ret__) \
124 : constraint); \ 124 : constraint); \
125 break; \ 125 break; \
126 case 8: \ 126 case 8: \
127 asm(op "q "__percpu_arg(1)",%0" \ 127 asm(op "q "__percpu_arg(1)",%0" \
128 : "=r" (ret__) \ 128 : "=r" (pfo_ret__) \
129 : constraint); \ 129 : constraint); \
130 break; \ 130 break; \
131 default: __bad_percpu_size(); \ 131 default: __bad_percpu_size(); \
132 } \ 132 } \
133 ret__; \ 133 pfo_ret__; \
134}) 134})
135 135
136/* 136/*
@@ -153,6 +153,84 @@ do { \
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) 153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) 154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 155
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
158#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
159
160#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
161#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
162#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
163#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
164#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
165#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
166#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
167#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
168#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
169#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
170#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
171#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
172#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
173#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
174#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
175
176#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
177#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
178#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
179#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
180#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
181#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
182#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
183#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
184#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
185#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
186#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
187#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
188#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
189#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
190#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
191#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
192#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
193#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
194
195#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
196#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
197#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
198#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
199#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
200#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
201#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
202#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
203#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
204#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
205#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
206#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
207
208/*
209 * Per cpu atomic 64 bit operations are only available under 64 bit.
210 * 32 bit must fall back to generic operations.
211 */
212#ifdef CONFIG_X86_64
213#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
214#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
215#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
216#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
217#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
218#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
219
220#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
221#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
222#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
223#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
224#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
225#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
226
227#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
228#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
229#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
230#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
231
232#endif
233
156/* This is not atomic against other CPUs -- CPU preemption needs to be off */ 234/* This is not atomic against other CPUs -- CPU preemption needs to be off */
157#define x86_test_and_clear_bit_percpu(bit, var) \ 235#define x86_test_and_clear_bit_percpu(bit, var) \
158({ \ 236({ \
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432a9dbf..0159a69396cb 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
361 */ 361 */
362 362
363static DEFINE_PER_CPU(unsigned, last_irq_sum); 363static DEFINE_PER_CPU(unsigned, last_irq_sum);
364static DEFINE_PER_CPU(local_t, alert_counter); 364static DEFINE_PER_CPU(long, alert_counter);
365static DEFINE_PER_CPU(int, nmi_touch); 365static DEFINE_PER_CPU(int, nmi_touch);
366 366
367void touch_nmi_watchdog(void) 367void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
438 * Ayiee, looks like this CPU is stuck ... 438 * Ayiee, looks like this CPU is stuck ...
439 * wait a few IRQs (5 seconds) before doing the oops ... 439 * wait a few IRQs (5 seconds) before doing the oops ...
440 */ 440 */
441 local_inc(&__get_cpu_var(alert_counter)); 441 __this_cpu_inc(per_cpu_var(alert_counter));
442 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 442 if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
443 /* 443 /*
444 * die_nmi will return ONLY if NOTIFY_STOP happens.. 444 * die_nmi will return ONLY if NOTIFY_STOP happens..
445 */ 445 */
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
447 regs, panic_on_timeout); 447 regs, panic_on_timeout);
448 } else { 448 } else {
449 __get_cpu_var(last_irq_sum) = sum; 449 __get_cpu_var(last_irq_sum) = sum;
450 local_set(&__get_cpu_var(alert_counter), 0); 450 __this_cpu_write(per_cpu_var(alert_counter), 0);
451 } 451 }
452 452
453 /* see if the nmi watchdog went off */ 453 /* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1afa990a6c8..20399b7b0c3f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
1093 1093
1094void __cpuinit cpu_init(void) 1094void __cpuinit cpu_init(void)
1095{ 1095{
1096 struct orig_ist *orig_ist; 1096 struct orig_ist *oist;
1097 struct task_struct *me; 1097 struct task_struct *me;
1098 struct tss_struct *t; 1098 struct tss_struct *t;
1099 unsigned long v; 1099 unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
1102 1102
1103 cpu = stack_smp_processor_id(); 1103 cpu = stack_smp_processor_id();
1104 t = &per_cpu(init_tss, cpu); 1104 t = &per_cpu(init_tss, cpu);
1105 orig_ist = &per_cpu(orig_ist, cpu); 1105 oist = &per_cpu(orig_ist, cpu);
1106 1106
1107#ifdef CONFIG_NUMA 1107#ifdef CONFIG_NUMA
1108 if (cpu != 0 && percpu_read(node_number) == 0 && 1108 if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
1143 /* 1143 /*
1144 * set up and load the per-CPU TSS 1144 * set up and load the per-CPU TSS
1145 */ 1145 */
1146 if (!orig_ist->ist[0]) { 1146 if (!oist->ist[0]) {
1147 char *estacks = per_cpu(exception_stacks, cpu); 1147 char *estacks = per_cpu(exception_stacks, cpu);
1148 1148
1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1150 estacks += exception_stack_sizes[v]; 1150 estacks += exception_stack_sizes[v];
1151 orig_ist->ist[v] = t->x86_tss.ist[v] = 1151 oist->ist[v] = t->x86_tss.ist[v] =
1152 (unsigned long)estacks; 1152 (unsigned long)estacks;
1153 } 1153 }
1154 } 1154 }
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpud_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
38 38
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
531 531
532 /* Already intialized */ 532 /* Already intialized */
533 if (file == CPU_INDEX_BIT) 533 if (file == CPU_INDEX_BIT)
534 if (per_cpu(cpu_arr[type].init, cpu)) 534 if (per_cpu(cpud_arr[type].init, cpu))
535 return 0; 535 return 0;
536 536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
543 priv->reg = reg; 543 priv->reg = reg;
544 priv->file = file; 544 priv->file = file;
545 mutex_lock(&cpu_debug_lock); 545 mutex_lock(&cpu_debug_lock);
546 per_cpu(priv_arr[type], cpu) = priv; 546 per_cpu(cpud_priv_arr[type], cpu) = priv;
547 per_cpu(cpu_priv_count, cpu)++; 547 per_cpu(cpud_priv_count, cpu)++;
548 mutex_unlock(&cpu_debug_lock); 548 mutex_unlock(&cpu_debug_lock);
549 549
550 if (file) 550 if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
552 dentry, (void *)priv, &cpu_fops); 552 dentry, (void *)priv, &cpu_fops);
553 else { 553 else {
554 debugfs_create_file(cpu_base[type].name, S_IRUGO, 554 debugfs_create_file(cpu_base[type].name, S_IRUGO,
555 per_cpu(cpu_arr[type].dentry, cpu), 555 per_cpu(cpud_arr[type].dentry, cpu),
556 (void *)priv, &cpu_fops); 556 (void *)priv, &cpu_fops);
557 mutex_lock(&cpu_debug_lock); 557 mutex_lock(&cpu_debug_lock);
558 per_cpu(cpu_arr[type].init, cpu) = 1; 558 per_cpu(cpud_arr[type].init, cpu) = 1;
559 mutex_unlock(&cpu_debug_lock); 559 mutex_unlock(&cpu_debug_lock);
560 } 560 }
561 561
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
615 if (!is_typeflag_valid(cpu, cpu_base[type].flag)) 615 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
616 continue; 616 continue;
617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); 617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
618 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; 618 per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
619 619
620 if (type < CPU_TSS_BIT) 620 if (type < CPU_TSS_BIT)
621 err = cpu_init_msr(cpu, type, cpu_dentry); 621 err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
647 err = cpu_init_allreg(cpu, cpu_dentry); 647 err = cpu_init_allreg(cpu, cpu_dentry);
648 648
649 pr_info("cpu%d(%d) debug files %d\n", 649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); 650 cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
651 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { 651 if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
652 pr_err("Register files count %d exceeds limit %d\n", 652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); 653 per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
654 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; 654 per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
655 err = -ENFILE; 655 err = -ENFILE;
656 } 656 }
657 if (err) 657 if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir); 676 debugfs_remove_recursive(cpu_debugfs_dir);
677 677
678 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 678 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
679 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) 679 for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
680 kfree(per_cpu(priv_arr[i], cpu)); 680 kfree(per_cpu(cpud_priv_arr[i], cpu));
681} 681}
682 682
683module_init(cpu_debug_init); 683module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index d2e7c77c1ea4..f28decf8dde3 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
68 unsigned int cpu_feature; 68 unsigned int cpu_feature;
69}; 69};
70 70
71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
72 72
73static DEFINE_PER_CPU(struct aperfmperf, old_perf); 73static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
74 74
75/* acpi_perf_data is a pointer to percpu data. */ 75/* acpi_perf_data is a pointer to percpu data. */
76static struct acpi_processor_performance *acpi_perf_data; 76static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
214 if (unlikely(cpumask_empty(mask))) 214 if (unlikely(cpumask_empty(mask)))
215 return 0; 215 return 0;
216 216
217 switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { 217 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
218 case SYSTEM_INTEL_MSR_CAPABLE: 218 case SYSTEM_INTEL_MSR_CAPABLE:
219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
221 break; 221 break;
222 case SYSTEM_IO_CAPABLE: 222 case SYSTEM_IO_CAPABLE:
223 cmd.type = SYSTEM_IO_CAPABLE; 223 cmd.type = SYSTEM_IO_CAPABLE;
224 perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; 224 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
225 cmd.addr.io.port = perf->control_register.address; 225 cmd.addr.io.port = perf->control_register.address;
226 cmd.addr.io.bit_width = perf->control_register.bit_width; 226 cmd.addr.io.bit_width = perf->control_register.bit_width;
227 break; 227 break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) 268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
269 return 0; 269 return 0;
270 270
271 ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); 271 ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
272 per_cpu(old_perf, cpu) = perf; 272 per_cpu(acfreq_old_perf, cpu) = perf;
273 273
274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; 274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
275 275
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
278 278
279static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 279static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
280{ 280{
281 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 281 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
282 unsigned int freq; 282 unsigned int freq;
283 unsigned int cached_freq; 283 unsigned int cached_freq;
284 284
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
322static int acpi_cpufreq_target(struct cpufreq_policy *policy, 322static int acpi_cpufreq_target(struct cpufreq_policy *policy,
323 unsigned int target_freq, unsigned int relation) 323 unsigned int target_freq, unsigned int relation)
324{ 324{
325 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 325 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
326 struct acpi_processor_performance *perf; 326 struct acpi_processor_performance *perf;
327 struct cpufreq_freqs freqs; 327 struct cpufreq_freqs freqs;
328 struct drv_cmd cmd; 328 struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
416 416
417static int acpi_cpufreq_verify(struct cpufreq_policy *policy) 417static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
418{ 418{
419 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 419 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
420 420
421 dprintk("acpi_cpufreq_verify\n"); 421 dprintk("acpi_cpufreq_verify\n");
422 422
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
574 return -ENOMEM; 574 return -ENOMEM;
575 575
576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); 576 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
577 per_cpu(drv_data, cpu) = data; 577 per_cpu(acfreq_data, cpu) = data;
578 578
579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 579 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 580 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
725 acpi_processor_unregister_performance(perf, cpu); 725 acpi_processor_unregister_performance(perf, cpu);
726err_free: 726err_free:
727 kfree(data); 727 kfree(data);
728 per_cpu(drv_data, cpu) = NULL; 728 per_cpu(acfreq_data, cpu) = NULL;
729 729
730 return result; 730 return result;
731} 731}
732 732
733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 733static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
734{ 734{
735 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 735 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
736 736
737 dprintk("acpi_cpufreq_cpu_exit\n"); 737 dprintk("acpi_cpufreq_cpu_exit\n");
738 738
739 if (data) { 739 if (data) {
740 cpufreq_frequency_table_put_attr(policy->cpu); 740 cpufreq_frequency_table_put_attr(policy->cpu);
741 per_cpu(drv_data, policy->cpu) = NULL; 741 per_cpu(acfreq_data, policy->cpu) = NULL;
742 acpi_processor_unregister_performance(data->acpi_data, 742 acpi_processor_unregister_performance(data->acpi_data,
743 policy->cpu); 743 policy->cpu);
744 kfree(data); 744 kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
749 749
750static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 750static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
751{ 751{
752 struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); 752 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
753 753
754 dprintk("acpi_cpufreq_resume\n"); 754 dprintk("acpi_cpufreq_resume\n");
755 755
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6c40f6b5b340..0c06bca2a1dc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
499#ifdef CONFIG_SYSFS 499#ifdef CONFIG_SYSFS
500 500
501/* pointer to _cpuid4_info array (for each cache leaf) */ 501/* pointer to _cpuid4_info array (for each cache leaf) */
502static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 502static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 503#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
504 504
505#ifdef CONFIG_SMP 505#ifdef CONFIG_SMP
506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 506static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
514 struct cpuinfo_x86 *d; 514 struct cpuinfo_x86 *d;
515 for_each_online_cpu(i) { 515 for_each_online_cpu(i) {
516 if (!per_cpu(cpuid4_info, i)) 516 if (!per_cpu(ici_cpuid4_info, i))
517 continue; 517 continue;
518 d = &cpu_data(i); 518 d = &cpu_data(i);
519 this_leaf = CPUID4_INFO_IDX(i, index); 519 this_leaf = CPUID4_INFO_IDX(i, index);
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
535 c->apicid >> index_msb) { 535 c->apicid >> index_msb) {
536 cpumask_set_cpu(i, 536 cpumask_set_cpu(i,
537 to_cpumask(this_leaf->shared_cpu_map)); 537 to_cpumask(this_leaf->shared_cpu_map));
538 if (i != cpu && per_cpu(cpuid4_info, i)) { 538 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
539 sibling_leaf = 539 sibling_leaf =
540 CPUID4_INFO_IDX(i, index); 540 CPUID4_INFO_IDX(i, index);
541 cpumask_set_cpu(cpu, to_cpumask( 541 cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
574 for (i = 0; i < num_cache_leaves; i++) 574 for (i = 0; i < num_cache_leaves; i++)
575 cache_remove_shared_cpu_map(cpu, i); 575 cache_remove_shared_cpu_map(cpu, i);
576 576
577 kfree(per_cpu(cpuid4_info, cpu)); 577 kfree(per_cpu(ici_cpuid4_info, cpu));
578 per_cpu(cpuid4_info, cpu) = NULL; 578 per_cpu(ici_cpuid4_info, cpu) = NULL;
579} 579}
580 580
581static int 581static int
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
614 if (num_cache_leaves == 0) 614 if (num_cache_leaves == 0)
615 return -ENOENT; 615 return -ENOENT;
616 616
617 per_cpu(cpuid4_info, cpu) = kzalloc( 617 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
618 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 618 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
619 if (per_cpu(cpuid4_info, cpu) == NULL) 619 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
620 return -ENOMEM; 620 return -ENOMEM;
621 621
622 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 622 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
623 if (retval) { 623 if (retval) {
624 kfree(per_cpu(cpuid4_info, cpu)); 624 kfree(per_cpu(ici_cpuid4_info, cpu));
625 per_cpu(cpuid4_info, cpu) = NULL; 625 per_cpu(ici_cpuid4_info, cpu) = NULL;
626 } 626 }
627 627
628 return retval; 628 return retval;
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
634extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 634extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
635 635
636/* pointer to kobject for cpuX/cache */ 636/* pointer to kobject for cpuX/cache */
637static DEFINE_PER_CPU(struct kobject *, cache_kobject); 637static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
638 638
639struct _index_kobject { 639struct _index_kobject {
640 struct kobject kobj; 640 struct kobject kobj;
@@ -643,8 +643,8 @@ struct _index_kobject {
643}; 643};
644 644
645/* pointer to array of kobjects for cpuX/cache/indexY */ 645/* pointer to array of kobjects for cpuX/cache/indexY */
646static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 646static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
647#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 647#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
648 648
649#define show_one_plus(file_name, object, val) \ 649#define show_one_plus(file_name, object, val) \
650static ssize_t show_##file_name \ 650static ssize_t show_##file_name \
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = {
863 863
864static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 864static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
865{ 865{
866 kfree(per_cpu(cache_kobject, cpu)); 866 kfree(per_cpu(ici_cache_kobject, cpu));
867 kfree(per_cpu(index_kobject, cpu)); 867 kfree(per_cpu(ici_index_kobject, cpu));
868 per_cpu(cache_kobject, cpu) = NULL; 868 per_cpu(ici_cache_kobject, cpu) = NULL;
869 per_cpu(index_kobject, cpu) = NULL; 869 per_cpu(ici_index_kobject, cpu) = NULL;
870 free_cache_attributes(cpu); 870 free_cache_attributes(cpu);
871} 871}
872 872
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
882 return err; 882 return err;
883 883
884 /* Allocate all required memory */ 884 /* Allocate all required memory */
885 per_cpu(cache_kobject, cpu) = 885 per_cpu(ici_cache_kobject, cpu) =
886 kzalloc(sizeof(struct kobject), GFP_KERNEL); 886 kzalloc(sizeof(struct kobject), GFP_KERNEL);
887 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 887 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
888 goto err_out; 888 goto err_out;
889 889
890 per_cpu(index_kobject, cpu) = kzalloc( 890 per_cpu(ici_index_kobject, cpu) = kzalloc(
891 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 891 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
892 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 892 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
893 goto err_out; 893 goto err_out;
894 894
895 return 0; 895 return 0;
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
913 if (unlikely(retval < 0)) 913 if (unlikely(retval < 0))
914 return retval; 914 return retval;
915 915
916 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 916 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
917 &ktype_percpu_entry, 917 &ktype_percpu_entry,
918 &sys_dev->kobj, "%s", "cache"); 918 &sys_dev->kobj, "%s", "cache");
919 if (retval < 0) { 919 if (retval < 0) {
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
927 this_object->index = i; 927 this_object->index = i;
928 retval = kobject_init_and_add(&(this_object->kobj), 928 retval = kobject_init_and_add(&(this_object->kobj),
929 &ktype_cache, 929 &ktype_cache,
930 per_cpu(cache_kobject, cpu), 930 per_cpu(ici_cache_kobject, cpu),
931 "index%1lu", i); 931 "index%1lu", i);
932 if (unlikely(retval)) { 932 if (unlikely(retval)) {
933 for (j = 0; j < i; j++) 933 for (j = 0; j < i; j++)
934 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 934 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
935 kobject_put(per_cpu(cache_kobject, cpu)); 935 kobject_put(per_cpu(ici_cache_kobject, cpu));
936 cpuid4_cache_sysfs_exit(cpu); 936 cpuid4_cache_sysfs_exit(cpu);
937 return retval; 937 return retval;
938 } 938 }
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
940 } 940 }
941 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 941 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
942 942
943 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 943 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
944 return 0; 944 return 0;
945} 945}
946 946
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
949 unsigned int cpu = sys_dev->id; 949 unsigned int cpu = sys_dev->id;
950 unsigned long i; 950 unsigned long i;
951 951
952 if (per_cpu(cpuid4_info, cpu) == NULL) 952 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
953 return; 953 return;
954 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 954 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
955 return; 955 return;
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
957 957
958 for (i = 0; i < num_cache_leaves; i++) 958 for (i = 0; i < num_cache_leaves; i++)
959 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 959 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
960 kobject_put(per_cpu(cache_kobject, cpu)); 960 kobject_put(per_cpu(ici_cache_kobject, cpu));
961 cpuid4_cache_sysfs_exit(cpu); 961 cpuid4_cache_sysfs_exit(cpu);
962} 962}
963 963
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
265 int cpu; 265 int cpu;
266}; 266};
267 267
268static DEFINE_PER_CPU(struct ds_context *, cpu_context); 268static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
269 269
270 270
271static struct ds_context *ds_get_context(struct task_struct *task, int cpu) 271static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
272{ 272{
273 struct ds_context **p_context = 273 struct ds_context **p_context =
274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); 274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
275 struct ds_context *context = NULL; 275 struct ds_context *context = NULL;
276 struct ds_context *new_context = NULL; 276 struct ds_context *new_context = NULL;
277 277
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37ec038..1d9b33843c80 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
316static int svm_hardware_enable(void *garbage) 316static int svm_hardware_enable(void *garbage)
317{ 317{
318 318
319 struct svm_cpu_data *svm_data; 319 struct svm_cpu_data *sd;
320 uint64_t efer; 320 uint64_t efer;
321 struct descriptor_table gdt_descr; 321 struct descriptor_table gdt_descr;
322 struct desc_struct *gdt; 322 struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
331 me); 331 me);
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 svm_data = per_cpu(svm_data, me); 334 sd = per_cpu(svm_data, me);
335 335
336 if (!svm_data) { 336 if (!sd) {
337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", 337 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
338 me); 338 me);
339 return -EINVAL; 339 return -EINVAL;
340 } 340 }
341 341
342 svm_data->asid_generation = 1; 342 sd->asid_generation = 1;
343 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 343 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
344 svm_data->next_asid = svm_data->max_asid + 1; 344 sd->next_asid = sd->max_asid + 1;
345 345
346 kvm_get_gdt(&gdt_descr); 346 kvm_get_gdt(&gdt_descr);
347 gdt = (struct desc_struct *)gdt_descr.base; 347 gdt = (struct desc_struct *)gdt_descr.base;
348 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 348 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
349 349
350 wrmsrl(MSR_EFER, efer | EFER_SVME); 350 wrmsrl(MSR_EFER, efer | EFER_SVME);
351 351
352 wrmsrl(MSR_VM_HSAVE_PA, 352 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
353 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
354 353
355 return 0; 354 return 0;
356} 355}
357 356
358static void svm_cpu_uninit(int cpu) 357static void svm_cpu_uninit(int cpu)
359{ 358{
360 struct svm_cpu_data *svm_data 359 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
361 = per_cpu(svm_data, raw_smp_processor_id());
362 360
363 if (!svm_data) 361 if (!sd)
364 return; 362 return;
365 363
366 per_cpu(svm_data, raw_smp_processor_id()) = NULL; 364 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
367 __free_page(svm_data->save_area); 365 __free_page(sd->save_area);
368 kfree(svm_data); 366 kfree(sd);
369} 367}
370 368
371static int svm_cpu_init(int cpu) 369static int svm_cpu_init(int cpu)
372{ 370{
373 struct svm_cpu_data *svm_data; 371 struct svm_cpu_data *sd;
374 int r; 372 int r;
375 373
376 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 374 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
377 if (!svm_data) 375 if (!sd)
378 return -ENOMEM; 376 return -ENOMEM;
379 svm_data->cpu = cpu; 377 sd->cpu = cpu;
380 svm_data->save_area = alloc_page(GFP_KERNEL); 378 sd->save_area = alloc_page(GFP_KERNEL);
381 r = -ENOMEM; 379 r = -ENOMEM;
382 if (!svm_data->save_area) 380 if (!sd->save_area)
383 goto err_1; 381 goto err_1;
384 382
385 per_cpu(svm_data, cpu) = svm_data; 383 per_cpu(svm_data, cpu) = sd;
386 384
387 return 0; 385 return 0;
388 386
389err_1: 387err_1:
390 kfree(svm_data); 388 kfree(sd);
391 return r; 389 return r;
392 390
393} 391}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
1092#endif 1090#endif
1093} 1091}
1094 1092
1095static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) 1093static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1096{ 1094{
1097 if (svm_data->next_asid > svm_data->max_asid) { 1095 if (sd->next_asid > sd->max_asid) {
1098 ++svm_data->asid_generation; 1096 ++sd->asid_generation;
1099 svm_data->next_asid = 1; 1097 sd->next_asid = 1;
1100 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1098 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1101 } 1099 }
1102 1100
1103 svm->asid_generation = svm_data->asid_generation; 1101 svm->asid_generation = sd->asid_generation;
1104 svm->vmcb->control.asid = svm_data->next_asid++; 1102 svm->vmcb->control.asid = sd->next_asid++;
1105} 1103}
1106 1104
1107static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1105static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2429{ 2427{
2430 int cpu = raw_smp_processor_id(); 2428 int cpu = raw_smp_processor_id();
2431 2429
2432 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2430 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2433 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ 2431 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2434 load_TR_desc(); 2432 load_TR_desc();
2435} 2433}
2436 2434
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
2438{ 2436{
2439 int cpu = raw_smp_processor_id(); 2437 int cpu = raw_smp_processor_id();
2440 2438
2441 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2439 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2442 2440
2443 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 2441 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2444 /* FIXME: handle wraparound of asid_generation */ 2442 /* FIXME: handle wraparound of asid_generation */
2445 if (svm->asid_generation != svm_data->asid_generation) 2443 if (svm->asid_generation != sd->asid_generation)
2446 new_asid(svm, svm_data); 2444 new_asid(svm, sd);
2447} 2445}
2448 2446
2449static void svm_inject_nmi(struct kvm_vcpu *vcpu) 2447static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0ba5fc..563d20504988 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
35 35
36cpumask_var_t xen_cpu_initialized_map; 36cpumask_var_t xen_cpu_initialized_map;
37 37
38static DEFINE_PER_CPU(int, resched_irq); 38static DEFINE_PER_CPU(int, xen_resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq); 39static DEFINE_PER_CPU(int, xen_callfunc_irq);
40static DEFINE_PER_CPU(int, callfuncsingle_irq); 40static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
41static DEFINE_PER_CPU(int, debug_irq) = -1; 41static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
42 42
43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
103 NULL); 103 NULL);
104 if (rc < 0) 104 if (rc < 0)
105 goto fail; 105 goto fail;
106 per_cpu(resched_irq, cpu) = rc; 106 per_cpu(xen_resched_irq, cpu) = rc;
107 107
108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); 108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, 109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
114 NULL); 114 NULL);
115 if (rc < 0) 115 if (rc < 0)
116 goto fail; 116 goto fail;
117 per_cpu(callfunc_irq, cpu) = rc; 117 per_cpu(xen_callfunc_irq, cpu) = rc;
118 118
119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); 119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, 120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
122 debug_name, NULL); 122 debug_name, NULL);
123 if (rc < 0) 123 if (rc < 0)
124 goto fail; 124 goto fail;
125 per_cpu(debug_irq, cpu) = rc; 125 per_cpu(xen_debug_irq, cpu) = rc;
126 126
127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); 127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, 128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
133 NULL); 133 NULL);
134 if (rc < 0) 134 if (rc < 0)
135 goto fail; 135 goto fail;
136 per_cpu(callfuncsingle_irq, cpu) = rc; 136 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
137 137
138 return 0; 138 return 0;
139 139
140 fail: 140 fail:
141 if (per_cpu(resched_irq, cpu) >= 0) 141 if (per_cpu(xen_resched_irq, cpu) >= 0)
142 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 142 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
143 if (per_cpu(callfunc_irq, cpu) >= 0) 143 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
144 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 144 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
145 if (per_cpu(debug_irq, cpu) >= 0) 145 if (per_cpu(xen_debug_irq, cpu) >= 0)
146 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 146 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
147 if (per_cpu(callfuncsingle_irq, cpu) >= 0) 147 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
148 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 148 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
149 NULL);
149 150
150 return rc; 151 return rc;
151} 152}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
349 current->state = TASK_UNINTERRUPTIBLE; 350 current->state = TASK_UNINTERRUPTIBLE;
350 schedule_timeout(HZ/10); 351 schedule_timeout(HZ/10);
351 } 352 }
352 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); 353 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
353 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); 354 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
354 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); 355 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); 356 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
356 xen_uninit_lock_cpu(cpu); 357 xen_uninit_lock_cpu(cpu);
357 xen_teardown_timer(cpu); 358 xen_teardown_timer(cpu);
358 359
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853120d8..0d3f07cd1b5f 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
31#define NS_PER_TICK (1000000000LL / HZ) 31#define NS_PER_TICK (1000000000LL / HZ)
32 32
33/* runstate info updated by Xen */ 33/* runstate info updated by Xen */
34static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 34static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
35 35
36/* snapshots of runstate info */ 36/* snapshots of runstate info */
37static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 38
39/* unused ns of stolen and blocked time */ 39/* unused ns of stolen and blocked time */
40static DEFINE_PER_CPU(u64, residual_stolen); 40static DEFINE_PER_CPU(u64, xen_residual_stolen);
41static DEFINE_PER_CPU(u64, residual_blocked); 41static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 42
43/* return an consistent snapshot of 64-bit time/counter value */ 43/* return an consistent snapshot of 64-bit time/counter value */
44static u64 get64(const u64 *p) 44static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
79 79
80 BUG_ON(preemptible()); 80 BUG_ON(preemptible());
81 81
82 state = &__get_cpu_var(runstate); 82 state = &__get_cpu_var(xen_runstate);
83 83
84 /* 84 /*
85 * The runstate info is always updated by the hypervisor on 85 * The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
97/* return true when a vcpu could run but has no real cpu to run on */ 97/* return true when a vcpu could run but has no real cpu to run on */
98bool xen_vcpu_stolen(int vcpu) 98bool xen_vcpu_stolen(int vcpu)
99{ 99{
100 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; 100 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
101} 101}
102 102
103void xen_setup_runstate_info(int cpu) 103void xen_setup_runstate_info(int cpu)
104{ 104{
105 struct vcpu_register_runstate_memory_area area; 105 struct vcpu_register_runstate_memory_area area;
106 106
107 area.addr.v = &per_cpu(runstate, cpu); 107 area.addr.v = &per_cpu(xen_runstate, cpu);
108 108
109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, 109 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
110 cpu, &area)) 110 cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
122 122
123 WARN_ON(state.state != RUNSTATE_running); 123 WARN_ON(state.state != RUNSTATE_running);
124 124
125 snap = &__get_cpu_var(runstate_snapshot); 125 snap = &__get_cpu_var(xen_runstate_snapshot);
126 126
127 /* work out how much time the VCPU has not been runn*ing* */ 127 /* work out how much time the VCPU has not been runn*ing* */
128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; 128 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
133 133
134 /* Add the appropriate number of ticks of stolen time, 134 /* Add the appropriate number of ticks of stolen time,
135 including any left-overs from last time. */ 135 including any left-overs from last time. */
136 stolen = runnable + offline + __get_cpu_var(residual_stolen); 136 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
137 137
138 if (stolen < 0) 138 if (stolen < 0)
139 stolen = 0; 139 stolen = 0;
140 140
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __get_cpu_var(residual_stolen) = stolen; 142 __get_cpu_var(xen_residual_stolen) = stolen;
143 account_steal_ticks(ticks); 143 account_steal_ticks(ticks);
144 144
145 /* Add the appropriate number of ticks of blocked time, 145 /* Add the appropriate number of ticks of blocked time,
146 including any left-overs from last time. */ 146 including any left-overs from last time. */
147 blocked += __get_cpu_var(residual_blocked); 147 blocked += __get_cpu_var(xen_residual_blocked);
148 148
149 if (blocked < 0) 149 if (blocked < 0)
150 blocked = 0; 150 blocked = 0;
151 151
152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 152 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
153 __get_cpu_var(residual_blocked) = blocked; 153 __get_cpu_var(xen_residual_blocked) = blocked;
154 account_idle_ticks(ticks); 154 account_idle_ticks(ticks);
155} 155}
156 156
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index f8ae0d94a647..704c14115323 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
99 struct cryptd_cpu_queue *cpu_queue; 99 struct cryptd_cpu_queue *cpu_queue;
100 100
101 cpu = get_cpu(); 101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); 102 cpu_queue = this_cpu_ptr(queue->cpu_queue);
103 err = crypto_enqueue_request(&cpu_queue->queue, request); 103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu(); 105 put_cpu();
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 27fd775375b0..958bd1540c30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
131 * boot up and this data does not change there after. Hence this 131 * boot up and this data does not change there after. Hence this
132 * operation should be safe. No locking required. 132 * operation should be safe. No locking required.
133 */ 133 */
134 addr = __pa(per_cpu_ptr(crash_notes, cpunum)); 134 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
135 rc = sprintf(buf, "%Lx\n", addr); 135 rc = sprintf(buf, "%Lx\n", addr);
136 return rc; 136 return rc;
137} 137}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f20668c09ce0..67bc2ece7b4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
64 * - Lock should not be held across 64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP); 65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */ 66 */
67static DEFINE_PER_CPU(int, policy_cpu); 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 69
70#define lock_policy_rwsem(mode, cpu) \ 70#define lock_policy_rwsem(mode, cpu) \
71int lock_policy_rwsem_##mode \ 71int lock_policy_rwsem_##mode \
72(int cpu) \ 72(int cpu) \
73{ \ 73{ \
74 int policy_cpu = per_cpu(policy_cpu, cpu); \ 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
75 BUG_ON(policy_cpu == -1); \ 75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \ 77 if (unlikely(!cpu_online(cpu))) { \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90 90
91void unlock_policy_rwsem_read(int cpu) 91void unlock_policy_rwsem_read(int cpu)
92{ 92{
93 int policy_cpu = per_cpu(policy_cpu, cpu); 93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1); 94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96} 96}
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98 98
99void unlock_policy_rwsem_write(int cpu) 99void unlock_policy_rwsem_write(int cpu)
100{ 100{
101 int policy_cpu = per_cpu(policy_cpu, cpu); 101 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1); 102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); 103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104} 104}
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
818 818
819 /* Set proper policy_cpu */ 819 /* Set proper policy_cpu */
820 unlock_policy_rwsem_write(cpu); 820 unlock_policy_rwsem_write(cpu);
821 per_cpu(policy_cpu, cpu) = managed_policy->cpu; 821 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
822 822
823 if (lock_policy_rwsem_write(cpu) < 0) { 823 if (lock_policy_rwsem_write(cpu) < 0) {
824 /* Should not go through policy unlock path */ 824 /* Should not go through policy unlock path */
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
932 if (!cpu_online(j)) 932 if (!cpu_online(j))
933 continue; 933 continue;
934 per_cpu(cpufreq_cpu_data, j) = policy; 934 per_cpu(cpufreq_cpu_data, j) = policy;
935 per_cpu(policy_cpu, j) = policy->cpu; 935 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
936 } 936 }
937 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 937 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
938 938
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
1020 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1020 cpumask_copy(policy->cpus, cpumask_of(cpu));
1021 1021
1022 /* Initially set CPU itself as the policy_cpu */ 1022 /* Initially set CPU itself as the policy_cpu */
1023 per_cpu(policy_cpu, cpu) = cpu; 1023 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1024 ret = (lock_policy_rwsem_write(cpu) < 0); 1024 ret = (lock_policy_rwsem_write(cpu) < 0);
1025 WARN_ON(ret); 1025 WARN_ON(ret);
1026 1026
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
2002 int cpu; 2002 int cpu;
2003 2003
2004 for_each_possible_cpu(cpu) { 2004 for_each_possible_cpu(cpu) {
2005 per_cpu(policy_cpu, cpu) = -1; 2005 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2006 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); 2006 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2007 } 2007 }
2008 2008
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a9bd3a05a684..05432216e224 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
174} 174}
175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); 175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
176 176
177static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table); 177static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
178/** 178/**
179 * show_available_freqs - show available frequencies for the specified CPU 179 * show_available_freqs - show available frequencies for the specified CPU
180 */ 180 */
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
185 ssize_t count = 0; 185 ssize_t count = 0;
186 struct cpufreq_frequency_table *table; 186 struct cpufreq_frequency_table *table;
187 187
188 if (!per_cpu(show_table, cpu)) 188 if (!per_cpu(cpufreq_show_table, cpu))
189 return -ENODEV; 189 return -ENODEV;
190 190
191 table = per_cpu(show_table, cpu); 191 table = per_cpu(cpufreq_show_table, cpu);
192 192
193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
217 unsigned int cpu) 217 unsigned int cpu)
218{ 218{
219 dprintk("setting show_table for cpu %u to %p\n", cpu, table); 219 dprintk("setting show_table for cpu %u to %p\n", cpu, table);
220 per_cpu(show_table, cpu) = table; 220 per_cpu(cpufreq_show_table, cpu) = table;
221} 221}
222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); 222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
223 223
224void cpufreq_frequency_table_put_attr(unsigned int cpu) 224void cpufreq_frequency_table_put_attr(unsigned int cpu)
225{ 225{
226 dprintk("clearing show_table for cpu %u\n", cpu); 226 dprintk("clearing show_table for cpu %u\n", cpu);
227 per_cpu(show_table, cpu) = NULL; 227 per_cpu(cpufreq_show_table, cpu) = NULL;
228} 228}
229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); 229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
230 230
231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
232{ 232{
233 return per_cpu(show_table, cpu); 233 return per_cpu(cpufreq_show_table, cpu);
234} 234}
235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
236 236
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 84c51e177269..8c2f3703ec85 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -64,7 +64,7 @@ struct aes_ctx {
64 u32 *D; 64 u32 *D;
65}; 65};
66 66
67static DEFINE_PER_CPU(struct cword *, last_cword); 67static DEFINE_PER_CPU(struct cword *, paes_last_cword);
68 68
69/* Tells whether the ACE is capable to generate 69/* Tells whether the ACE is capable to generate
70 the extended key for a given key_len. */ 70 the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
152 152
153ok: 153ok:
154 for_each_online_cpu(cpu) 154 for_each_online_cpu(cpu)
155 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || 155 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
156 &ctx->cword.decrypt == per_cpu(last_cword, cpu)) 156 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
157 per_cpu(last_cword, cpu) = NULL; 157 per_cpu(paes_last_cword, cpu) = NULL;
158 158
159 return 0; 159 return 0;
160} 160}
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
166{ 166{
167 int cpu = raw_smp_processor_id(); 167 int cpu = raw_smp_processor_id();
168 168
169 if (cword != per_cpu(last_cword, cpu)) 169 if (cword != per_cpu(paes_last_cword, cpu))
170#ifndef CONFIG_X86_64 170#ifndef CONFIG_X86_64
171 asm volatile ("pushfl; popfl"); 171 asm volatile ("pushfl; popfl");
172#else 172#else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
176 176
177static inline void padlock_store_cword(struct cword *cword) 177static inline void padlock_store_cword(struct cword *cword)
178{ 178{
179 per_cpu(last_cword, raw_smp_processor_id()) = cword; 179 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
180} 180}
181 181
182/* 182/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082ce..6f51a0a7a8bb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
326 */ 326 */
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
328{ 328{
329 struct dma_chan *chan; 329 return this_cpu_read(channel_table[tx_type]->chan);
330 int cpu;
331
332 cpu = get_cpu();
333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
334 put_cpu();
335
336 return chan;
337} 330}
338EXPORT_SYMBOL(dma_find_channel); 331EXPORT_SYMBOL(dma_find_channel);
339 332
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
857 struct dma_async_tx_descriptor *tx; 850 struct dma_async_tx_descriptor *tx;
858 dma_addr_t dma_dest, dma_src; 851 dma_addr_t dma_dest, dma_src;
859 dma_cookie_t cookie; 852 dma_cookie_t cookie;
860 int cpu;
861 unsigned long flags; 853 unsigned long flags;
862 854
863 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 855 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
876 tx->callback = NULL; 868 tx->callback = NULL;
877 cookie = tx->tx_submit(tx); 869 cookie = tx->tx_submit(tx);
878 870
879 cpu = get_cpu(); 871 preempt_disable();
880 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 872 __this_cpu_add(chan->local->bytes_transferred, len);
881 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 873 __this_cpu_inc(chan->local->memcpy_count);
882 put_cpu(); 874 preempt_enable();
883 875
884 return cookie; 876 return cookie;
885} 877}
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
906 struct dma_async_tx_descriptor *tx; 898 struct dma_async_tx_descriptor *tx;
907 dma_addr_t dma_dest, dma_src; 899 dma_addr_t dma_dest, dma_src;
908 dma_cookie_t cookie; 900 dma_cookie_t cookie;
909 int cpu;
910 unsigned long flags; 901 unsigned long flags;
911 902
912 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 903 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
923 tx->callback = NULL; 914 tx->callback = NULL;
924 cookie = tx->tx_submit(tx); 915 cookie = tx->tx_submit(tx);
925 916
926 cpu = get_cpu(); 917 preempt_disable();
927 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 918 __this_cpu_add(chan->local->bytes_transferred, len);
928 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 919 __this_cpu_inc(chan->local->memcpy_count);
929 put_cpu(); 920 preempt_enable();
930 921
931 return cookie; 922 return cookie;
932} 923}
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
955 struct dma_async_tx_descriptor *tx; 946 struct dma_async_tx_descriptor *tx;
956 dma_addr_t dma_dest, dma_src; 947 dma_addr_t dma_dest, dma_src;
957 dma_cookie_t cookie; 948 dma_cookie_t cookie;
958 int cpu;
959 unsigned long flags; 949 unsigned long flags;
960 950
961 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 951 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
973 tx->callback = NULL; 963 tx->callback = NULL;
974 cookie = tx->tx_submit(tx); 964 cookie = tx->tx_submit(tx);
975 965
976 cpu = get_cpu(); 966 preempt_disable();
977 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 967 __this_cpu_add(chan->local->bytes_transferred, len);
978 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 968 __this_cpu_inc(chan->local->memcpy_count);
979 put_cpu(); 969 preempt_enable();
980 970
981 return cookie; 971 return cookie;
982} 972}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4b89b791be6a..42be0b15084b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
826 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 826 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
827 827
828 list_del(&cq->entry); 828 list_del(&cq->entry);
829 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, 829 __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
830 smp_processor_id()));
831 } 830 }
832 831
833 spin_unlock_irqrestore(&cct->task_lock, flags_cct); 832 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6ae388849a3b..fb2b7ef7868e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
69 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); 69 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
70} 70}
71 71
72static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); 72static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
73 73
74/*S:010 74/*S:010
75 * We approach the Switcher. 75 * We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
90 * meanwhile). If that's not the case, we pretend everything in the 90 * meanwhile). If that's not the case, we pretend everything in the
91 * Guest has changed. 91 * Guest has changed.
92 */ 92 */
93 if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { 93 if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
94 __get_cpu_var(last_cpu) = cpu; 94 __get_cpu_var(lg_last_cpu) = cpu;
95 cpu->last_pages = pages; 95 cpu->last_pages = pages;
96 cpu->changed = CHANGED_ALL; 96 cpu->changed = CHANGED_ALL;
97 } 97 }
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8c658cf6f62f..109d2783e4d8 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1378 } 1378 }
1379 __skb_pull(skb, sizeof(*p)); 1379 __skb_pull(skb, sizeof(*p));
1380 1380
1381 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); 1381 st = this_cpu_ptr(sge->port_stats[p->iff]);
1382 1382
1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
1384 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1384 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1780{ 1780{
1781 struct adapter *adapter = dev->ml_priv; 1781 struct adapter *adapter = dev->ml_priv;
1782 struct sge *sge = adapter->sge; 1782 struct sge *sge = adapter->sge;
1783 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], 1783 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1784 smp_processor_id());
1785 struct cpl_tx_pkt *cpl; 1784 struct cpl_tx_pkt *cpl;
1786 struct sk_buff *orig_skb = skb; 1785 struct sk_buff *orig_skb = skb;
1787 int ret; 1786 int ret;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index eae4ad749e9d..b9fcc9819837 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
81 81
82 /* it's OK to use per_cpu_ptr() because BHs are off */ 82 /* it's OK to use per_cpu_ptr() because BHs are off */
83 pcpu_lstats = dev->ml_priv; 83 pcpu_lstats = dev->ml_priv;
84 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); 84 lb_stats = this_cpu_ptr(pcpu_lstats);
85 85
86 len = skb->len; 86 len = skb->len;
87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 63099c58a6dd..3a15de56df9c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
153 struct net_device *rcv = NULL; 153 struct net_device *rcv = NULL;
154 struct veth_priv *priv, *rcv_priv; 154 struct veth_priv *priv, *rcv_priv;
155 struct veth_net_stats *stats, *rcv_stats; 155 struct veth_net_stats *stats, *rcv_stats;
156 int length, cpu; 156 int length;
157 157
158 priv = netdev_priv(dev); 158 priv = netdev_priv(dev);
159 rcv = priv->peer; 159 rcv = priv->peer;
160 rcv_priv = netdev_priv(rcv); 160 rcv_priv = netdev_priv(rcv);
161 161
162 cpu = smp_processor_id(); 162 stats = this_cpu_ptr(priv->stats);
163 stats = per_cpu_ptr(priv->stats, cpu); 163 rcv_stats = this_cpu_ptr(rcv_priv->stats);
164 rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
165 164
166 if (!(rcv->flags & IFF_UP)) 165 if (!(rcv->flags & IFF_UP))
167 goto tx_drop; 166 goto tx_drop;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a7aae24f2889..166b67ea622f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -47,7 +47,7 @@
47 */ 47 */
48static struct ring_buffer *op_ring_buffer_read; 48static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write; 49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
51 51
52static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
53 53
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
61 61
62void oprofile_cpu_buffer_inc_smpl_lost(void) 62void oprofile_cpu_buffer_inc_smpl_lost(void)
63{ 63{
64 struct oprofile_cpu_buffer *cpu_buf 64 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
65 = &__get_cpu_var(cpu_buffer);
66 65
67 cpu_buf->sample_lost_overflow++; 66 cpu_buf->sample_lost_overflow++;
68} 67}
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
95 goto fail; 94 goto fail;
96 95
97 for_each_possible_cpu(i) { 96 for_each_possible_cpu(i) {
98 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 97 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
99 98
100 b->last_task = NULL; 99 b->last_task = NULL;
101 b->last_is_kernel = -1; 100 b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
122 work_enabled = 1; 121 work_enabled = 1;
123 122
124 for_each_online_cpu(i) { 123 for_each_online_cpu(i) {
125 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 124 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
126 125
127 /* 126 /*
128 * Spread the work by 1 jiffy per cpu so they dont all 127 * Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
139 work_enabled = 0; 138 work_enabled = 0;
140 139
141 for_each_online_cpu(i) { 140 for_each_online_cpu(i) {
142 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 141 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
143 142
144 cancel_delayed_work(&b->work); 143 cancel_delayed_work(&b->work);
145 } 144 }
@@ -330,7 +329,7 @@ static inline void
330__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 329__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
331 unsigned long event, int is_kernel) 330 unsigned long event, int is_kernel)
332{ 331{
333 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 332 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
334 unsigned long backtrace = oprofile_backtrace_depth; 333 unsigned long backtrace = oprofile_backtrace_depth;
335 334
336 /* 335 /*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
375{ 374{
376 struct op_sample *sample; 375 struct op_sample *sample;
377 int is_kernel = !user_mode(regs); 376 int is_kernel = !user_mode(regs);
378 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 377 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
379 378
380 cpu_buf->sample_received++; 379 cpu_buf->sample_received++;
381 380
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
430 429
431void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 430void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
432{ 431{
433 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 432 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
434 log_sample(cpu_buf, pc, 0, is_kernel, event); 433 log_sample(cpu_buf, pc, 0, is_kernel, event);
435} 434}
436 435
437void oprofile_add_trace(unsigned long pc) 436void oprofile_add_trace(unsigned long pc)
438{ 437{
439 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 438 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
440 439
441 if (!cpu_buf->tracing) 440 if (!cpu_buf->tracing)
442 return; 441 return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 272995d20293..68ea16ab645f 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
50 struct delayed_work work; 50 struct delayed_work work;
51}; 51};
52 52
53DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 53DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
54 54
55/* 55/*
56 * Resets the cpu buffer to a sane state. 56 * Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
60 */ 60 */
61static inline void op_cpu_buffer_reset(int cpu) 61static inline void op_cpu_buffer_reset(int cpu)
62{ 62{
63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
64 64
65 cpu_buf->last_is_kernel = -1; 65 cpu_buf->last_is_kernel = -1;
66 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 61689e814d46..917d28ebeacd 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
23 int i; 23 int i;
24 24
25 for_each_possible_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &per_cpu(cpu_buffer, i); 26 cpu_buf = &per_cpu(op_cpu_buffer, i);
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
28 cpu_buf->sample_lost_overflow = 0; 28 cpu_buf->sample_lost_overflow = 0;
29 cpu_buf->backtrace_aborted = 0; 29 cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
51 return; 51 return;
52 52
53 for_each_possible_cpu(i) { 53 for_each_possible_cpu(i) {
54 cpu_buf = &per_cpu(cpu_buffer, i); 54 cpu_buf = &per_cpu(op_cpu_buffer, i);
55 snprintf(buf, 10, "cpu%d", i); 55 snprintf(buf, 10, "cpu%d", i);
56 cpudir = oprofilefs_mkdir(sb, dir, buf); 56 cpudir = oprofilefs_mkdir(sb, dir, buf);
57 57
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index cd5082d3ca19..58bee55a3dd3 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -208,7 +208,7 @@ config PCMCIA_PXA2XX
208 depends on ARM && ARCH_PXA && PCMCIA 208 depends on ARM && ARCH_PXA && PCMCIA
209 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 209 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
210 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 210 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
211 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) 211 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2)
212 select PCMCIA_SOC_COMMON 212 select PCMCIA_SOC_COMMON
213 help 213 help
214 Say Y here to include support for the PXA2xx PCMCIA controller 214 Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 382938313991..83ff802de544 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -67,7 +67,7 @@ pxa2xx-obj-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock_cs.o
67pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o 67pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
68pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 68pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
69pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o 69pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
70pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o 70pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
71pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o 71pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
72pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 72pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
73pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o 73pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 84dde7768ad5..da346eb7e77e 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,7 +214,8 @@ static void pxa2xx_configure_sockets(struct device *dev)
214 MECR |= MECR_CIT; 214 MECR |= MECR_CIT;
215 215
216 /* Set MECR:NOS (Number Of Sockets) */ 216 /* Set MECR:NOS (Number Of Sockets) */
217 if ((ops->first + ops->nr) > 1 || machine_is_viper()) 217 if ((ops->first + ops->nr) > 1 ||
218 machine_is_viper() || machine_is_arcom_zeus())
218 MECR |= MECR_NOS; 219 MECR |= MECR_NOS;
219 else 220 else
220 MECR &= ~MECR_NOS; 221 MECR &= ~MECR_NOS;
@@ -252,6 +253,7 @@ int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
252 253
253 return soc_pcmcia_add_one(skt); 254 return soc_pcmcia_add_one(skt);
254} 255}
256EXPORT_SYMBOL(pxa2xx_drv_pcmcia_add_one);
255 257
256void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops) 258void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
257{ 259{
@@ -261,19 +263,19 @@ void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
261 ops->frequency_change = pxa2xx_pcmcia_frequency_change; 263 ops->frequency_change = pxa2xx_pcmcia_frequency_change;
262#endif 264#endif
263} 265}
266EXPORT_SYMBOL(pxa2xx_drv_pcmcia_ops);
264 267
265int __pxa2xx_drv_pcmcia_probe(struct device *dev) 268static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
266{ 269{
267 int i, ret = 0; 270 int i, ret = 0;
268 struct pcmcia_low_level *ops; 271 struct pcmcia_low_level *ops;
269 struct skt_dev_info *sinfo; 272 struct skt_dev_info *sinfo;
270 struct soc_pcmcia_socket *skt; 273 struct soc_pcmcia_socket *skt;
271 274
272 if (!dev || !dev->platform_data) 275 ops = (struct pcmcia_low_level *)dev->dev.platform_data;
276 if (!ops)
273 return -ENODEV; 277 return -ENODEV;
274 278
275 ops = (struct pcmcia_low_level *)dev->platform_data;
276
277 pxa2xx_drv_pcmcia_ops(ops); 279 pxa2xx_drv_pcmcia_ops(ops);
278 280
279 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); 281 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
@@ -308,13 +310,6 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
308 310
309 return ret; 311 return ret;
310} 312}
311EXPORT_SYMBOL(__pxa2xx_drv_pcmcia_probe);
312
313
314static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
315{
316 return __pxa2xx_drv_pcmcia_probe(&dev->dev);
317}
318 313
319static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) 314static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
320{ 315{
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index cb5efaec886f..bb62ea87b8f9 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,6 +1,3 @@
1/* temporary measure */
2extern int __pxa2xx_drv_pcmcia_probe(struct device *);
3
4int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); 1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
5void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); 2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
6 3
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 3a8993ed5621..459a232d66be 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -67,7 +67,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
67 if (ret) 67 if (ret)
68 goto err7; 68 goto err7;
69 69
70 skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); 70 skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
71 return 0; 71 return 0;
72 72
73err7: 73err7:
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 490749ea677f..d08802fe35f9 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -40,7 +40,7 @@ static struct pcmcia_irqs irqs[] = {
40 40
41static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 41static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
42{ 42{
43 skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY); 43 skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
44 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 44 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
45} 45}
46 46
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index 27be2e154df2..a51f2077644a 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * VIPER PCMCIA support 2 * Viper/Zeus PCMCIA support
3 * Copyright 2004 Arcom Control Systems 3 * Copyright 2004 Arcom Control Systems
4 * 4 *
5 * Maintained by Marc Zyngier <maz@misterjones.org> 5 * Maintained by Marc Zyngier <maz@misterjones.org>
6 * <marc.zyngier@altran.com>
7 * 6 *
8 * Based on: 7 * Based on:
9 * iPAQ h2200 PCMCIA support 8 * iPAQ h2200 PCMCIA support
@@ -26,37 +25,47 @@
26 25
27#include <asm/irq.h> 26#include <asm/irq.h>
28 27
29#include <mach/viper.h> 28#include <mach/arcom-pcmcia.h>
30#include <asm/mach-types.h>
31 29
32#include "soc_common.h" 30#include "soc_common.h"
33#include "pxa2xx_base.h" 31#include "pxa2xx_base.h"
34 32
33static struct platform_device *arcom_pcmcia_dev;
34
35static struct pcmcia_irqs irqs[] = { 35static struct pcmcia_irqs irqs[] = {
36 { 0, gpio_to_irq(VIPER_CF_CD_GPIO), "PCMCIA_CD" } 36 {
37 .sock = 0,
38 .str = "PCMCIA_CD",
39 },
37}; 40};
38 41
42static inline struct arcom_pcmcia_pdata *viper_get_pdata(void)
43{
44 return arcom_pcmcia_dev->dev.platform_data;
45}
46
39static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 47static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
40{ 48{
49 struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
41 unsigned long flags; 50 unsigned long flags;
42 51
43 skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO); 52 skt->socket.pci_irq = gpio_to_irq(pdata->rdy_gpio);
53 irqs[0].irq = gpio_to_irq(pdata->cd_gpio);
44 54
45 if (gpio_request(VIPER_CF_CD_GPIO, "CF detect")) 55 if (gpio_request(pdata->cd_gpio, "CF detect"))
46 goto err_request_cd; 56 goto err_request_cd;
47 57
48 if (gpio_request(VIPER_CF_RDY_GPIO, "CF ready")) 58 if (gpio_request(pdata->rdy_gpio, "CF ready"))
49 goto err_request_rdy; 59 goto err_request_rdy;
50 60
51 if (gpio_request(VIPER_CF_POWER_GPIO, "CF power")) 61 if (gpio_request(pdata->pwr_gpio, "CF power"))
52 goto err_request_pwr; 62 goto err_request_pwr;
53 63
54 local_irq_save(flags); 64 local_irq_save(flags);
55 65
56 /* GPIO 82 is the CF power enable line. initially off */ 66 if (gpio_direction_output(pdata->pwr_gpio, 0) ||
57 if (gpio_direction_output(VIPER_CF_POWER_GPIO, 0) || 67 gpio_direction_input(pdata->cd_gpio) ||
58 gpio_direction_input(VIPER_CF_CD_GPIO) || 68 gpio_direction_input(pdata->rdy_gpio)) {
59 gpio_direction_input(VIPER_CF_RDY_GPIO)) {
60 local_irq_restore(flags); 69 local_irq_restore(flags);
61 goto err_dir; 70 goto err_dir;
62 } 71 }
@@ -66,13 +75,13 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
66 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 75 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
67 76
68err_dir: 77err_dir:
69 gpio_free(VIPER_CF_POWER_GPIO); 78 gpio_free(pdata->pwr_gpio);
70err_request_pwr: 79err_request_pwr:
71 gpio_free(VIPER_CF_RDY_GPIO); 80 gpio_free(pdata->rdy_gpio);
72err_request_rdy: 81err_request_rdy:
73 gpio_free(VIPER_CF_CD_GPIO); 82 gpio_free(pdata->cd_gpio);
74err_request_cd: 83err_request_cd:
75 printk(KERN_ERR "viper: Failed to setup PCMCIA GPIOs\n"); 84 dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n");
76 return -1; 85 return -1;
77} 86}
78 87
@@ -81,17 +90,21 @@ err_request_cd:
81 */ 90 */
82static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 91static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
83{ 92{
93 struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
94
84 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 95 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
85 gpio_free(VIPER_CF_POWER_GPIO); 96 gpio_free(pdata->pwr_gpio);
86 gpio_free(VIPER_CF_RDY_GPIO); 97 gpio_free(pdata->rdy_gpio);
87 gpio_free(VIPER_CF_CD_GPIO); 98 gpio_free(pdata->cd_gpio);
88} 99}
89 100
90static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 101static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
91 struct pcmcia_state *state) 102 struct pcmcia_state *state)
92{ 103{
93 state->detect = gpio_get_value(VIPER_CF_CD_GPIO) ? 0 : 1; 104 struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
94 state->ready = gpio_get_value(VIPER_CF_RDY_GPIO) ? 1 : 0; 105
106 state->detect = !gpio_get_value(pdata->cd_gpio);
107 state->ready = !!gpio_get_value(pdata->rdy_gpio);
95 state->bvd1 = 1; 108 state->bvd1 = 1;
96 state->bvd2 = 1; 109 state->bvd2 = 1;
97 state->wrprot = 0; 110 state->wrprot = 0;
@@ -102,20 +115,21 @@ static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
102static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 115static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
103 const socket_state_t *state) 116 const socket_state_t *state)
104{ 117{
118 struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
119
105 /* Silently ignore Vpp, output enable, speaker enable. */ 120 /* Silently ignore Vpp, output enable, speaker enable. */
106 viper_cf_rst(state->flags & SS_RESET); 121 pdata->reset(state->flags & SS_RESET);
107 122
108 /* Apply socket voltage */ 123 /* Apply socket voltage */
109 switch (state->Vcc) { 124 switch (state->Vcc) {
110 case 0: 125 case 0:
111 gpio_set_value(VIPER_CF_POWER_GPIO, 0); 126 gpio_set_value(pdata->pwr_gpio, 0);
112 break; 127 break;
113 case 33: 128 case 33:
114 gpio_set_value(VIPER_CF_POWER_GPIO, 1); 129 gpio_set_value(pdata->pwr_gpio, 1);
115 break; 130 break;
116 default: 131 default:
117 printk(KERN_ERR "%s: Unsupported Vcc:%d\n", 132 dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc);
118 __func__, state->Vcc);
119 return -1; 133 return -1;
120 } 134 }
121 135
@@ -130,7 +144,7 @@ static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
130{ 144{
131} 145}
132 146
133static struct pcmcia_low_level viper_pcmcia_ops __initdata = { 147static struct pcmcia_low_level viper_pcmcia_ops = {
134 .owner = THIS_MODULE, 148 .owner = THIS_MODULE,
135 .hw_init = viper_pcmcia_hw_init, 149 .hw_init = viper_pcmcia_hw_init,
136 .hw_shutdown = viper_pcmcia_hw_shutdown, 150 .hw_shutdown = viper_pcmcia_hw_shutdown,
@@ -143,17 +157,25 @@ static struct pcmcia_low_level viper_pcmcia_ops __initdata = {
143 157
144static struct platform_device *viper_pcmcia_device; 158static struct platform_device *viper_pcmcia_device;
145 159
146static int __init viper_pcmcia_init(void) 160static int viper_pcmcia_probe(struct platform_device *pdev)
147{ 161{
148 int ret; 162 int ret;
149 163
150 if (!machine_is_viper()) 164 /* I can't imagine more than one device, but you never know... */
151 return -ENODEV; 165 if (arcom_pcmcia_dev)
166 return -EEXIST;
167
168 if (!pdev->dev.platform_data)
169 return -EINVAL;
152 170
153 viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 171 viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
154 if (!viper_pcmcia_device) 172 if (!viper_pcmcia_device)
155 return -ENOMEM; 173 return -ENOMEM;
156 174
175 arcom_pcmcia_dev = pdev;
176
177 viper_pcmcia_device->dev.parent = &pdev->dev;
178
157 ret = platform_device_add_data(viper_pcmcia_device, 179 ret = platform_device_add_data(viper_pcmcia_device,
158 &viper_pcmcia_ops, 180 &viper_pcmcia_ops,
159 sizeof(viper_pcmcia_ops)); 181 sizeof(viper_pcmcia_ops));
@@ -161,18 +183,49 @@ static int __init viper_pcmcia_init(void)
161 if (!ret) 183 if (!ret)
162 ret = platform_device_add(viper_pcmcia_device); 184 ret = platform_device_add(viper_pcmcia_device);
163 185
164 if (ret) 186 if (ret) {
165 platform_device_put(viper_pcmcia_device); 187 platform_device_put(viper_pcmcia_device);
188 arcom_pcmcia_dev = NULL;
189 }
166 190
167 return ret; 191 return ret;
168} 192}
169 193
170static void __exit viper_pcmcia_exit(void) 194static int viper_pcmcia_remove(struct platform_device *pdev)
171{ 195{
172 platform_device_unregister(viper_pcmcia_device); 196 platform_device_unregister(viper_pcmcia_device);
197 arcom_pcmcia_dev = NULL;
198 return 0;
199}
200
201static struct platform_device_id viper_pcmcia_id_table[] = {
202 { .name = "viper-pcmcia", },
203 { .name = "zeus-pcmcia", },
204 { },
205};
206
207static struct platform_driver viper_pcmcia_driver = {
208 .probe = viper_pcmcia_probe,
209 .remove = viper_pcmcia_remove,
210 .driver = {
211 .name = "arcom-pcmcia",
212 .owner = THIS_MODULE,
213 },
214 .id_table = viper_pcmcia_id_table,
215};
216
217static int __init viper_pcmcia_init(void)
218{
219 return platform_driver_register(&viper_pcmcia_driver);
220}
221
222static void __exit viper_pcmcia_exit(void)
223{
224 return platform_driver_unregister(&viper_pcmcia_driver);
173} 225}
174 226
175module_init(viper_pcmcia_init); 227module_init(viper_pcmcia_init);
176module_exit(viper_pcmcia_exit); 228module_exit(viper_pcmcia_exit);
177 229
230MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
178MODULE_LICENSE("GPL"); 231MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 395c04c2b00f..98c04cac43c1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
113#define IUCV_DBF_TEXT_(name, level, text...) \ 113#define IUCV_DBF_TEXT_(name, level, text...) \
114 do { \ 114 do { \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* iucv_dbf_txt_buf = \ 116 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
117 get_cpu_var(iucv_dbf_txt_buf); \ 117 sprintf(__buf, text); \
118 sprintf(iucv_dbf_txt_buf, text); \ 118 debug_text_event(iucv_dbf_##name, level, __buf); \
119 debug_text_event(iucv_dbf_##name, level, \
120 iucv_dbf_txt_buf); \
121 put_cpu_var(iucv_dbf_txt_buf); \ 119 put_cpu_var(iucv_dbf_txt_buf); \
122 } \ 120 } \
123 } while (0) 121 } while (0)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c1e19d5b5985..b1fd3daadc9c 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3955 * per cpu locality group is to reduce the contention between block 3955 * per cpu locality group is to reduce the contention between block
3956 * request from multiple CPUs. 3956 * request from multiple CPUs.
3957 */ 3957 */
3958 ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id()); 3958 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
3959 3959
3960 /* we're going to use group allocation */ 3960 /* we're going to use group allocation */
3961 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 3961 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 293fa0528a6e..73ab220354df 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -78,11 +78,6 @@ nfs4_callback_svc(void *vrqstp)
78 78
79 set_freezable(); 79 set_freezable();
80 80
81 /*
82 * FIXME: do we really need to run this under the BKL? If so, please
83 * add a comment about what it's intended to protect.
84 */
85 lock_kernel();
86 while (!kthread_should_stop()) { 81 while (!kthread_should_stop()) {
87 /* 82 /*
88 * Listen for a request on the socket 83 * Listen for a request on the socket
@@ -104,7 +99,6 @@ nfs4_callback_svc(void *vrqstp)
104 preverr = err; 99 preverr = err;
105 svc_process(rqstp); 100 svc_process(rqstp);
106 } 101 }
107 unlock_kernel();
108 return 0; 102 return 0;
109} 103}
110 104
@@ -160,11 +154,6 @@ nfs41_callback_svc(void *vrqstp)
160 154
161 set_freezable(); 155 set_freezable();
162 156
163 /*
164 * FIXME: do we really need to run this under the BKL? If so, please
165 * add a comment about what it's intended to protect.
166 */
167 lock_kernel();
168 while (!kthread_should_stop()) { 157 while (!kthread_should_stop()) {
169 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); 158 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
170 spin_lock_bh(&serv->sv_cb_lock); 159 spin_lock_bh(&serv->sv_cb_lock);
@@ -183,7 +172,6 @@ nfs41_callback_svc(void *vrqstp)
183 } 172 }
184 finish_wait(&serv->sv_cb_waitq, &wq); 173 finish_wait(&serv->sv_cb_waitq, &wq);
185 } 174 }
186 unlock_kernel();
187 return 0; 175 return 0;
188} 176}
189 177
@@ -397,6 +385,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
397 */ 385 */
398static struct svc_version *nfs4_callback_version[] = { 386static struct svc_version *nfs4_callback_version[] = {
399 [1] = &nfs4_callback_version1, 387 [1] = &nfs4_callback_version1,
388 [4] = &nfs4_callback_version4,
400}; 389};
401 390
402static struct svc_stat nfs4_callback_stats; 391static struct svc_stat nfs4_callback_stats;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 07baa8254ca1..d4036be0b589 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -106,6 +106,19 @@ struct cb_sequenceres {
106extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, 106extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
107 struct cb_sequenceres *res); 107 struct cb_sequenceres *res);
108 108
109extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
110 const nfs4_stateid *stateid);
111
112#define RCA4_TYPE_MASK_RDATA_DLG 0
113#define RCA4_TYPE_MASK_WDATA_DLG 1
114
115struct cb_recallanyargs {
116 struct sockaddr *craa_addr;
117 uint32_t craa_objs_to_keep;
118 uint32_t craa_type_mask;
119};
120
121extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
109#endif /* CONFIG_NFS_V4_1 */ 122#endif /* CONFIG_NFS_V4_1 */
110 123
111extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); 124extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
@@ -114,8 +127,9 @@ extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
114#ifdef CONFIG_NFS_V4 127#ifdef CONFIG_NFS_V4
115extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); 128extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
116extern void nfs_callback_down(int minorversion); 129extern void nfs_callback_down(int minorversion);
130extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
131 const nfs4_stateid *stateid);
117#endif /* CONFIG_NFS_V4 */ 132#endif /* CONFIG_NFS_V4 */
118
119/* 133/*
120 * nfs41: Callbacks are expected to not cause substantial latency, 134 * nfs41: Callbacks are expected to not cause substantial latency,
121 * so we limit their concurrency to 1 by setting up the maximum number 135 * so we limit their concurrency to 1 by setting up the maximum number
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index b7da1f54da68..defa9b4c470e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -61,6 +61,16 @@ out:
61 return res->status; 61 return res->status;
62} 62}
63 63
64static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
65{
66#if defined(CONFIG_NFS_V4_1)
67 if (clp->cl_minorversion > 0)
68 return nfs41_validate_delegation_stateid;
69#endif
70 return nfs4_validate_delegation_stateid;
71}
72
73
64__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy) 74__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
65{ 75{
66 struct nfs_client *clp; 76 struct nfs_client *clp;
@@ -81,7 +91,8 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
81 inode = nfs_delegation_find_inode(clp, &args->fh); 91 inode = nfs_delegation_find_inode(clp, &args->fh);
82 if (inode != NULL) { 92 if (inode != NULL) {
83 /* Set up a helper thread to actually return the delegation */ 93 /* Set up a helper thread to actually return the delegation */
84 switch(nfs_async_inode_return_delegation(inode, &args->stateid)) { 94 switch (nfs_async_inode_return_delegation(inode, &args->stateid,
95 nfs_validate_delegation_stateid(clp))) {
85 case 0: 96 case 0:
86 res = 0; 97 res = 0;
87 break; 98 break;
@@ -102,8 +113,31 @@ out:
102 return res; 113 return res;
103} 114}
104 115
116int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
117{
118 if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
119 sizeof(delegation->stateid.data)) != 0)
120 return 0;
121 return 1;
122}
123
105#if defined(CONFIG_NFS_V4_1) 124#if defined(CONFIG_NFS_V4_1)
106 125
126int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
127{
128 if (delegation == NULL)
129 return 0;
130
131 /* seqid is 4-bytes long */
132 if (((u32 *) &stateid->data)[0] != 0)
133 return 0;
134 if (memcmp(&delegation->stateid.data[4], &stateid->data[4],
135 sizeof(stateid->data)-4))
136 return 0;
137
138 return 1;
139}
140
107/* 141/*
108 * Validate the sequenceID sent by the server. 142 * Validate the sequenceID sent by the server.
109 * Return success if the sequenceID is one more than what we last saw on 143 * Return success if the sequenceID is one more than what we last saw on
@@ -227,4 +261,32 @@ out:
227 return res->csr_status; 261 return res->csr_status;
228} 262}
229 263
264unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
265{
266 struct nfs_client *clp;
267 int status;
268 fmode_t flags = 0;
269
270 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
271 clp = nfs_find_client(args->craa_addr, 4);
272 if (clp == NULL)
273 goto out;
274
275 dprintk("NFS: RECALL_ANY callback request from %s\n",
276 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
277
278 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
279 &args->craa_type_mask))
280 flags = FMODE_READ;
281 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
282 &args->craa_type_mask))
283 flags |= FMODE_WRITE;
284
285 if (flags)
286 nfs_expire_all_delegation_types(clp, flags);
287 status = htonl(NFS4_OK);
288out:
289 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
290 return status;
291}
230#endif /* CONFIG_NFS_V4_1 */ 292#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 76b0aa0f73bf..8e1a2511c8be 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -23,6 +23,7 @@
23#if defined(CONFIG_NFS_V4_1) 23#if defined(CONFIG_NFS_V4_1)
24#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 24#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
25 4 + 1 + 3) 25 4 + 1 + 3)
26#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
26#endif /* CONFIG_NFS_V4_1 */ 27#endif /* CONFIG_NFS_V4_1 */
27 28
28#define NFSDBG_FACILITY NFSDBG_CALLBACK 29#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -326,6 +327,25 @@ out_free:
326 goto out; 327 goto out;
327} 328}
328 329
330static unsigned decode_recallany_args(struct svc_rqst *rqstp,
331 struct xdr_stream *xdr,
332 struct cb_recallanyargs *args)
333{
334 uint32_t *p;
335
336 args->craa_addr = svc_addr(rqstp);
337 p = read_buf(xdr, 4);
338 if (unlikely(p == NULL))
339 return htonl(NFS4ERR_BADXDR);
340 args->craa_objs_to_keep = ntohl(*p++);
341 p = read_buf(xdr, 4);
342 if (unlikely(p == NULL))
343 return htonl(NFS4ERR_BADXDR);
344 args->craa_type_mask = ntohl(*p);
345
346 return 0;
347}
348
329#endif /* CONFIG_NFS_V4_1 */ 349#endif /* CONFIG_NFS_V4_1 */
330 350
331static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) 351static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
@@ -533,6 +553,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
533 case OP_CB_GETATTR: 553 case OP_CB_GETATTR:
534 case OP_CB_RECALL: 554 case OP_CB_RECALL:
535 case OP_CB_SEQUENCE: 555 case OP_CB_SEQUENCE:
556 case OP_CB_RECALL_ANY:
536 *op = &callback_ops[op_nr]; 557 *op = &callback_ops[op_nr];
537 break; 558 break;
538 559
@@ -540,7 +561,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
540 case OP_CB_NOTIFY_DEVICEID: 561 case OP_CB_NOTIFY_DEVICEID:
541 case OP_CB_NOTIFY: 562 case OP_CB_NOTIFY:
542 case OP_CB_PUSH_DELEG: 563 case OP_CB_PUSH_DELEG:
543 case OP_CB_RECALL_ANY:
544 case OP_CB_RECALLABLE_OBJ_AVAIL: 564 case OP_CB_RECALLABLE_OBJ_AVAIL:
545 case OP_CB_RECALL_SLOT: 565 case OP_CB_RECALL_SLOT:
546 case OP_CB_WANTS_CANCELLED: 566 case OP_CB_WANTS_CANCELLED:
@@ -688,6 +708,11 @@ static struct callback_op callback_ops[] = {
688 .encode_res = (callback_encode_res_t)encode_cb_sequence_res, 708 .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
689 .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ, 709 .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
690 }, 710 },
711 [OP_CB_RECALL_ANY] = {
712 .process_op = (callback_process_op_t)nfs4_callback_recallany,
713 .decode_args = (callback_decode_arg_t)decode_recallany_args,
714 .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
715 },
691#endif /* CONFIG_NFS_V4_1 */ 716#endif /* CONFIG_NFS_V4_1 */
692}; 717};
693 718
@@ -718,3 +743,10 @@ struct svc_version nfs4_callback_version1 = {
718 .vs_dispatch = NULL, 743 .vs_dispatch = NULL,
719}; 744};
720 745
746struct svc_version nfs4_callback_version4 = {
747 .vs_vers = 4,
748 .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
749 .vs_proc = nfs4_callback_procedures1,
750 .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
751 .vs_dispatch = NULL,
752};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 99ea196f071f..ee77713ce68b 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1260,10 +1260,20 @@ error:
1260static void nfs4_session_set_rwsize(struct nfs_server *server) 1260static void nfs4_session_set_rwsize(struct nfs_server *server)
1261{ 1261{
1262#ifdef CONFIG_NFS_V4_1 1262#ifdef CONFIG_NFS_V4_1
1263 struct nfs4_session *sess;
1264 u32 server_resp_sz;
1265 u32 server_rqst_sz;
1266
1263 if (!nfs4_has_session(server->nfs_client)) 1267 if (!nfs4_has_session(server->nfs_client))
1264 return; 1268 return;
1265 server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 1269 sess = server->nfs_client->cl_session;
1266 server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz; 1270 server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
1271 server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
1272
1273 if (server->rsize > server_resp_sz)
1274 server->rsize = server_resp_sz;
1275 if (server->wsize > server_rqst_sz)
1276 server->wsize = server_rqst_sz;
1267#endif /* CONFIG_NFS_V4_1 */ 1277#endif /* CONFIG_NFS_V4_1 */
1268} 1278}
1269 1279
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 6dd48a4405b4..2563bebc4c67 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -92,7 +92,7 @@ out:
92 return status; 92 return status;
93} 93}
94 94
95static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) 95static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
96{ 96{
97 struct nfs_inode *nfsi = NFS_I(inode); 97 struct nfs_inode *nfsi = NFS_I(inode);
98 struct nfs_open_context *ctx; 98 struct nfs_open_context *ctx;
@@ -116,10 +116,11 @@ again:
116 err = nfs_delegation_claim_locks(ctx, state); 116 err = nfs_delegation_claim_locks(ctx, state);
117 put_nfs_open_context(ctx); 117 put_nfs_open_context(ctx);
118 if (err != 0) 118 if (err != 0)
119 return; 119 return err;
120 goto again; 120 goto again;
121 } 121 }
122 spin_unlock(&inode->i_lock); 122 spin_unlock(&inode->i_lock);
123 return 0;
123} 124}
124 125
125/* 126/*
@@ -261,30 +262,34 @@ static void nfs_msync_inode(struct inode *inode)
261/* 262/*
262 * Basic procedure for returning a delegation to the server 263 * Basic procedure for returning a delegation to the server
263 */ 264 */
264static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation) 265static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
265{ 266{
266 struct nfs_inode *nfsi = NFS_I(inode); 267 struct nfs_inode *nfsi = NFS_I(inode);
268 int err;
267 269
268 nfs_msync_inode(inode);
269 /* 270 /*
270 * Guard against new delegated open/lock/unlock calls and against 271 * Guard against new delegated open/lock/unlock calls and against
271 * state recovery 272 * state recovery
272 */ 273 */
273 down_write(&nfsi->rwsem); 274 down_write(&nfsi->rwsem);
274 nfs_delegation_claim_opens(inode, &delegation->stateid); 275 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
275 up_write(&nfsi->rwsem); 276 up_write(&nfsi->rwsem);
276 nfs_msync_inode(inode); 277 if (err)
278 goto out;
277 279
278 return nfs_do_return_delegation(inode, delegation, 1); 280 err = nfs_do_return_delegation(inode, delegation, issync);
281out:
282 return err;
279} 283}
280 284
281/* 285/*
282 * Return all delegations that have been marked for return 286 * Return all delegations that have been marked for return
283 */ 287 */
284void nfs_client_return_marked_delegations(struct nfs_client *clp) 288int nfs_client_return_marked_delegations(struct nfs_client *clp)
285{ 289{
286 struct nfs_delegation *delegation; 290 struct nfs_delegation *delegation;
287 struct inode *inode; 291 struct inode *inode;
292 int err = 0;
288 293
289restart: 294restart:
290 rcu_read_lock(); 295 rcu_read_lock();
@@ -298,12 +303,18 @@ restart:
298 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); 303 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
299 spin_unlock(&clp->cl_lock); 304 spin_unlock(&clp->cl_lock);
300 rcu_read_unlock(); 305 rcu_read_unlock();
301 if (delegation != NULL) 306 if (delegation != NULL) {
302 __nfs_inode_return_delegation(inode, delegation); 307 filemap_flush(inode->i_mapping);
308 err = __nfs_inode_return_delegation(inode, delegation, 0);
309 }
303 iput(inode); 310 iput(inode);
304 goto restart; 311 if (!err)
312 goto restart;
313 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
314 return err;
305 } 315 }
306 rcu_read_unlock(); 316 rcu_read_unlock();
317 return 0;
307} 318}
308 319
309/* 320/*
@@ -338,8 +349,10 @@ int nfs_inode_return_delegation(struct inode *inode)
338 spin_lock(&clp->cl_lock); 349 spin_lock(&clp->cl_lock);
339 delegation = nfs_detach_delegation_locked(nfsi, NULL); 350 delegation = nfs_detach_delegation_locked(nfsi, NULL);
340 spin_unlock(&clp->cl_lock); 351 spin_unlock(&clp->cl_lock);
341 if (delegation != NULL) 352 if (delegation != NULL) {
342 err = __nfs_inode_return_delegation(inode, delegation); 353 nfs_msync_inode(inode);
354 err = __nfs_inode_return_delegation(inode, delegation, 1);
355 }
343 } 356 }
344 return err; 357 return err;
345} 358}
@@ -368,33 +381,47 @@ void nfs_super_return_all_delegations(struct super_block *sb)
368 spin_unlock(&delegation->lock); 381 spin_unlock(&delegation->lock);
369 } 382 }
370 rcu_read_unlock(); 383 rcu_read_unlock();
371 nfs_client_return_marked_delegations(clp); 384 if (nfs_client_return_marked_delegations(clp) != 0)
385 nfs4_schedule_state_manager(clp);
372} 386}
373 387
374static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) 388static
389void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
375{ 390{
376 struct nfs_delegation *delegation; 391 struct nfs_delegation *delegation;
377 392
378 rcu_read_lock(); 393 rcu_read_lock();
379 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 394 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
380 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 395 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
381 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 396 continue;
397 if (delegation->type & flags)
398 nfs_mark_return_delegation(clp, delegation);
382 } 399 }
383 rcu_read_unlock(); 400 rcu_read_unlock();
384} 401}
385 402
403static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
404{
405 nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
406}
407
386static void nfs_delegation_run_state_manager(struct nfs_client *clp) 408static void nfs_delegation_run_state_manager(struct nfs_client *clp)
387{ 409{
388 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) 410 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
389 nfs4_schedule_state_manager(clp); 411 nfs4_schedule_state_manager(clp);
390} 412}
391 413
392void nfs_expire_all_delegations(struct nfs_client *clp) 414void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
393{ 415{
394 nfs_client_mark_return_all_delegations(clp); 416 nfs_client_mark_return_all_delegation_types(clp, flags);
395 nfs_delegation_run_state_manager(clp); 417 nfs_delegation_run_state_manager(clp);
396} 418}
397 419
420void nfs_expire_all_delegations(struct nfs_client *clp)
421{
422 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
423}
424
398/* 425/*
399 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error. 426 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
400 */ 427 */
@@ -413,8 +440,7 @@ static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *c
413 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 440 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
414 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) 441 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
415 continue; 442 continue;
416 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 443 nfs_mark_return_delegation(clp, delegation);
417 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
418 } 444 }
419 rcu_read_unlock(); 445 rcu_read_unlock();
420} 446}
@@ -428,18 +454,21 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
428/* 454/*
429 * Asynchronous delegation recall! 455 * Asynchronous delegation recall!
430 */ 456 */
431int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) 457int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
458 int (*validate_stateid)(struct nfs_delegation *delegation,
459 const nfs4_stateid *stateid))
432{ 460{
433 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 461 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
434 struct nfs_delegation *delegation; 462 struct nfs_delegation *delegation;
435 463
436 rcu_read_lock(); 464 rcu_read_lock();
437 delegation = rcu_dereference(NFS_I(inode)->delegation); 465 delegation = rcu_dereference(NFS_I(inode)->delegation);
438 if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data, 466
439 sizeof(delegation->stateid.data)) != 0) { 467 if (!validate_stateid(delegation, stateid)) {
440 rcu_read_unlock(); 468 rcu_read_unlock();
441 return -ENOENT; 469 return -ENOENT;
442 } 470 }
471
443 nfs_mark_return_delegation(clp, delegation); 472 nfs_mark_return_delegation(clp, delegation);
444 rcu_read_unlock(); 473 rcu_read_unlock();
445 nfs_delegation_run_state_manager(clp); 474 nfs_delegation_run_state_manager(clp);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 09f383795174..944b627ec6e1 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,15 +34,18 @@ enum {
34int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 34int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
35void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 35void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
36int nfs_inode_return_delegation(struct inode *inode); 36int nfs_inode_return_delegation(struct inode *inode);
37int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); 37int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
38 int (*validate_stateid)(struct nfs_delegation *delegation,
39 const nfs4_stateid *stateid));
38void nfs_inode_return_delegation_noreclaim(struct inode *inode); 40void nfs_inode_return_delegation_noreclaim(struct inode *inode);
39 41
40struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); 42struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
41void nfs_super_return_all_delegations(struct super_block *sb); 43void nfs_super_return_all_delegations(struct super_block *sb);
42void nfs_expire_all_delegations(struct nfs_client *clp); 44void nfs_expire_all_delegations(struct nfs_client *clp);
45void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
43void nfs_expire_unreferenced_delegations(struct nfs_client *clp); 46void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
44void nfs_handle_cb_pathdown(struct nfs_client *clp); 47void nfs_handle_cb_pathdown(struct nfs_client *clp);
45void nfs_client_return_marked_delegations(struct nfs_client *clp); 48int nfs_client_return_marked_delegations(struct nfs_client *clp);
46 49
47void nfs_delegation_mark_reclaim(struct nfs_client *clp); 50void nfs_delegation_mark_reclaim(struct nfs_client *clp);
48void nfs_delegation_reap_unclaimed(struct nfs_client *clp); 51void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7cb298525eef..2c5ace4f00a7 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1579,55 +1579,46 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1579 struct dentry *dentry = NULL, *rehash = NULL; 1579 struct dentry *dentry = NULL, *rehash = NULL;
1580 int error = -EBUSY; 1580 int error = -EBUSY;
1581 1581
1582 /*
1583 * To prevent any new references to the target during the rename,
1584 * we unhash the dentry and free the inode in advance.
1585 */
1586 if (!d_unhashed(new_dentry)) {
1587 d_drop(new_dentry);
1588 rehash = new_dentry;
1589 }
1590
1591 dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n", 1582 dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
1592 old_dentry->d_parent->d_name.name, old_dentry->d_name.name, 1583 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
1593 new_dentry->d_parent->d_name.name, new_dentry->d_name.name, 1584 new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
1594 atomic_read(&new_dentry->d_count)); 1585 atomic_read(&new_dentry->d_count));
1595 1586
1596 /* 1587 /*
1597 * First check whether the target is busy ... we can't 1588 * For non-directories, check whether the target is busy and if so,
1598 * safely do _any_ rename if the target is in use. 1589 * make a copy of the dentry and then do a silly-rename. If the
1599 * 1590 * silly-rename succeeds, the copied dentry is hashed and becomes
1600 * For files, make a copy of the dentry and then do a 1591 * the new target.
1601 * silly-rename. If the silly-rename succeeds, the
1602 * copied dentry is hashed and becomes the new target.
1603 */ 1592 */
1604 if (!new_inode) 1593 if (new_inode && !S_ISDIR(new_inode->i_mode)) {
1605 goto go_ahead; 1594 /*
1606 if (S_ISDIR(new_inode->i_mode)) { 1595 * To prevent any new references to the target during the
1607 error = -EISDIR; 1596 * rename, we unhash the dentry in advance.
1608 if (!S_ISDIR(old_inode->i_mode)) 1597 */
1609 goto out; 1598 if (!d_unhashed(new_dentry)) {
1610 } else if (atomic_read(&new_dentry->d_count) > 2) { 1599 d_drop(new_dentry);
1611 int err; 1600 rehash = new_dentry;
1612 /* copy the target dentry's name */ 1601 }
1613 dentry = d_alloc(new_dentry->d_parent, 1602
1614 &new_dentry->d_name); 1603 if (atomic_read(&new_dentry->d_count) > 2) {
1615 if (!dentry) 1604 int err;
1616 goto out; 1605
1606 /* copy the target dentry's name */
1607 dentry = d_alloc(new_dentry->d_parent,
1608 &new_dentry->d_name);
1609 if (!dentry)
1610 goto out;
1617 1611
1618 /* silly-rename the existing target ... */ 1612 /* silly-rename the existing target ... */
1619 err = nfs_sillyrename(new_dir, new_dentry); 1613 err = nfs_sillyrename(new_dir, new_dentry);
1620 if (!err) { 1614 if (err)
1621 new_dentry = rehash = dentry; 1615 goto out;
1616
1617 new_dentry = dentry;
1622 new_inode = NULL; 1618 new_inode = NULL;
1623 /* instantiate the replacement target */ 1619 }
1624 d_instantiate(new_dentry, NULL);
1625 } else if (atomic_read(&new_dentry->d_count) > 1)
1626 /* dentry still busy? */
1627 goto out;
1628 } 1620 }
1629 1621
1630go_ahead:
1631 /* 1622 /*
1632 * ... prune child dentries and writebacks if needed. 1623 * ... prune child dentries and writebacks if needed.
1633 */ 1624 */
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index f4d54ba97cc6..95e1ca765d47 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -146,7 +146,7 @@ static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
146 return 0; 146 return 0;
147} 147}
148 148
149struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd, 149static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
150 struct nfs_dns_ent *key) 150 struct nfs_dns_ent *key)
151{ 151{
152 struct cache_head *ch; 152 struct cache_head *ch;
@@ -159,7 +159,7 @@ struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
159 return container_of(ch, struct nfs_dns_ent, h); 159 return container_of(ch, struct nfs_dns_ent, h);
160} 160}
161 161
162struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd, 162static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
163 struct nfs_dns_ent *new, 163 struct nfs_dns_ent *new,
164 struct nfs_dns_ent *key) 164 struct nfs_dns_ent *key)
165{ 165{
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e21b1bb9972f..29e464d23b32 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -30,6 +30,15 @@ static inline int nfs4_has_session(const struct nfs_client *clp)
30 return 0; 30 return 0;
31} 31}
32 32
33static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
34{
35#ifdef CONFIG_NFS_V4_1
36 if (nfs4_has_session(clp))
37 return (clp->cl_session->flags & SESSION4_PERSIST);
38#endif /* CONFIG_NFS_V4_1 */
39 return 0;
40}
41
33struct nfs_clone_mount { 42struct nfs_clone_mount {
34 const struct super_block *sb; 43 const struct super_block *sb;
35 const struct dentry *dentry; 44 const struct dentry *dentry;
@@ -156,6 +165,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr
156 165
157/* callback_xdr.c */ 166/* callback_xdr.c */
158extern struct svc_version nfs4_callback_version1; 167extern struct svc_version nfs4_callback_version1;
168extern struct svc_version nfs4_callback_version4;
159 169
160/* pagelist.c */ 170/* pagelist.c */
161extern int __init nfs_init_nfspagecache(void); 171extern int __init nfs_init_nfspagecache(void);
@@ -177,24 +187,14 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
177extern struct rpc_procinfo nfs3_procedures[]; 187extern struct rpc_procinfo nfs3_procedures[];
178extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int); 188extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
179 189
180/* nfs4proc.c */
181static inline void nfs4_restart_rpc(struct rpc_task *task,
182 const struct nfs_client *clp)
183{
184#ifdef CONFIG_NFS_V4_1
185 if (nfs4_has_session(clp) &&
186 test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
187 rpc_restart_call_prepare(task);
188 return;
189 }
190#endif /* CONFIG_NFS_V4_1 */
191 rpc_restart_call(task);
192}
193
194/* nfs4xdr.c */ 190/* nfs4xdr.c */
195#ifdef CONFIG_NFS_V4 191#ifdef CONFIG_NFS_V4
196extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus); 192extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
197#endif 193#endif
194#ifdef CONFIG_NFS_V4_1
195extern const u32 nfs41_maxread_overhead;
196extern const u32 nfs41_maxwrite_overhead;
197#endif
198 198
199/* nfs4proc.c */ 199/* nfs4proc.c */
200#ifdef CONFIG_NFS_V4 200#ifdef CONFIG_NFS_V4
@@ -273,20 +273,6 @@ extern int _nfs4_call_sync_session(struct nfs_server *server,
273 struct nfs4_sequence_res *res, 273 struct nfs4_sequence_res *res,
274 int cache_reply); 274 int cache_reply);
275 275
276#ifdef CONFIG_NFS_V4_1
277extern void nfs41_sequence_free_slot(const struct nfs_client *,
278 struct nfs4_sequence_res *res);
279#endif /* CONFIG_NFS_V4_1 */
280
281static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
282 struct nfs4_sequence_res *res)
283{
284#ifdef CONFIG_NFS_V4_1
285 if (nfs4_has_session(clp))
286 nfs41_sequence_free_slot(clp, res);
287#endif /* CONFIG_NFS_V4_1 */
288}
289
290/* 276/*
291 * Determine the device name as a string 277 * Determine the device name as a string
292 */ 278 */
@@ -380,3 +366,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
380 return ((unsigned long)len + (unsigned long)base + 366 return ((unsigned long)len + (unsigned long)base +
381 PAGE_SIZE - 1) >> PAGE_SHIFT; 367 PAGE_SIZE - 1) >> PAGE_SHIFT;
382} 368}
369
370/*
371 * Helper for restarting RPC calls in the possible presence of NFSv4.1
372 * sessions.
373 */
374static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
375{
376 if (nfs4_has_session(clp))
377 rpc_restart_call_prepare(task);
378 else
379 rpc_restart_call(task);
380}
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index ceda50aad73c..46d779abafd3 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -25,13 +25,7 @@ struct nfs_iostats {
25static inline void nfs_inc_server_stats(const struct nfs_server *server, 25static inline void nfs_inc_server_stats(const struct nfs_server *server,
26 enum nfs_stat_eventcounters stat) 26 enum nfs_stat_eventcounters stat)
27{ 27{
28 struct nfs_iostats *iostats; 28 this_cpu_inc(server->io_stats->events[stat]);
29 int cpu;
30
31 cpu = get_cpu();
32 iostats = per_cpu_ptr(server->io_stats, cpu);
33 iostats->events[stat]++;
34 put_cpu();
35} 29}
36 30
37static inline void nfs_inc_stats(const struct inode *inode, 31static inline void nfs_inc_stats(const struct inode *inode,
@@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
44 enum nfs_stat_bytecounters stat, 38 enum nfs_stat_bytecounters stat,
45 unsigned long addend) 39 unsigned long addend)
46{ 40{
47 struct nfs_iostats *iostats; 41 this_cpu_add(server->io_stats->bytes[stat], addend);
48 int cpu;
49
50 cpu = get_cpu();
51 iostats = per_cpu_ptr(server->io_stats, cpu);
52 iostats->bytes[stat] += addend;
53 put_cpu();
54} 42}
55 43
56static inline void nfs_add_stats(const struct inode *inode, 44static inline void nfs_add_stats(const struct inode *inode,
@@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
65 enum nfs_stat_fscachecounters stat, 53 enum nfs_stat_fscachecounters stat,
66 unsigned long addend) 54 unsigned long addend)
67{ 55{
68 struct nfs_iostats *iostats; 56 this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
69 int cpu;
70
71 cpu = get_cpu();
72 iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
73 iostats->fscache[stat] += addend;
74 put_cpu();
75} 57}
76#endif 58#endif
77 59
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6ea07a3c75d4..7e57b04e4014 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,7 +44,8 @@ enum nfs4_client_state {
44 NFS4CLNT_RECLAIM_REBOOT, 44 NFS4CLNT_RECLAIM_REBOOT,
45 NFS4CLNT_RECLAIM_NOGRACE, 45 NFS4CLNT_RECLAIM_NOGRACE,
46 NFS4CLNT_DELEGRETURN, 46 NFS4CLNT_DELEGRETURN,
47 NFS4CLNT_SESSION_SETUP, 47 NFS4CLNT_SESSION_RESET,
48 NFS4CLNT_SESSION_DRAINING,
48}; 49};
49 50
50/* 51/*
@@ -180,6 +181,7 @@ struct nfs4_state_recovery_ops {
180 int (*recover_lock)(struct nfs4_state *, struct file_lock *); 181 int (*recover_lock)(struct nfs4_state *, struct file_lock *);
181 int (*establish_clid)(struct nfs_client *, struct rpc_cred *); 182 int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
182 struct rpc_cred * (*get_clid_cred)(struct nfs_client *); 183 struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
184 int (*reclaim_complete)(struct nfs_client *);
183}; 185};
184 186
185struct nfs4_state_maintenance_ops { 187struct nfs4_state_maintenance_ops {
@@ -200,9 +202,11 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
200/* nfs4proc.c */ 202/* nfs4proc.c */
201extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *); 203extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
202extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); 204extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
205extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
203extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); 206extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
204extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 207extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
205extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 208extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
209extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
206extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait); 210extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
207extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); 211extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
208extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); 212extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -218,9 +222,11 @@ extern int nfs4_setup_sequence(struct nfs_client *clp,
218 int cache_reply, struct rpc_task *task); 222 int cache_reply, struct rpc_task *task);
219extern void nfs4_destroy_session(struct nfs4_session *session); 223extern void nfs4_destroy_session(struct nfs4_session *session);
220extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp); 224extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
221extern int nfs4_proc_create_session(struct nfs_client *, int reset); 225extern int nfs4_proc_create_session(struct nfs_client *);
222extern int nfs4_proc_destroy_session(struct nfs4_session *); 226extern int nfs4_proc_destroy_session(struct nfs4_session *);
223extern int nfs4_init_session(struct nfs_server *server); 227extern int nfs4_init_session(struct nfs_server *server);
228extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
229 struct nfs_fsinfo *fsinfo);
224#else /* CONFIG_NFS_v4_1 */ 230#else /* CONFIG_NFS_v4_1 */
225static inline int nfs4_setup_sequence(struct nfs_client *clp, 231static inline int nfs4_setup_sequence(struct nfs_client *clp,
226 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, 232 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -267,6 +273,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
267extern void nfs4_schedule_state_recovery(struct nfs_client *); 273extern void nfs4_schedule_state_recovery(struct nfs_client *);
268extern void nfs4_schedule_state_manager(struct nfs_client *); 274extern void nfs4_schedule_state_manager(struct nfs_client *);
269extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 275extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
276extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
270extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 277extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
271extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 278extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
272extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); 279extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
@@ -287,6 +294,7 @@ struct nfs4_mount_data;
287 294
288/* callback_xdr.c */ 295/* callback_xdr.c */
289extern struct svc_version nfs4_callback_version1; 296extern struct svc_version nfs4_callback_version1;
297extern struct svc_version nfs4_callback_version4;
290 298
291#else 299#else
292 300
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 741a562177fc..9f5f11ecfd93 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -270,11 +270,18 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
270 case -NFS4ERR_SEQ_MISORDERED: 270 case -NFS4ERR_SEQ_MISORDERED:
271 dprintk("%s ERROR: %d Reset session\n", __func__, 271 dprintk("%s ERROR: %d Reset session\n", __func__,
272 errorcode); 272 errorcode);
273 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); 273 nfs4_schedule_state_recovery(clp);
274 exception->retry = 1; 274 exception->retry = 1;
275 /* FALLTHROUGH */ 275 break;
276#endif /* !defined(CONFIG_NFS_V4_1) */ 276#endif /* !defined(CONFIG_NFS_V4_1) */
277 case -NFS4ERR_FILE_OPEN: 277 case -NFS4ERR_FILE_OPEN:
278 if (exception->timeout > HZ) {
279 /* We have retried a decent amount, time to
280 * fail
281 */
282 ret = -EBUSY;
283 break;
284 }
278 case -NFS4ERR_GRACE: 285 case -NFS4ERR_GRACE:
279 case -NFS4ERR_DELAY: 286 case -NFS4ERR_DELAY:
280 ret = nfs4_delay(server->client, &exception->timeout); 287 ret = nfs4_delay(server->client, &exception->timeout);
@@ -311,48 +318,54 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
311 * so we need to scan down from highest_used_slotid to 0 looking for the now 318 * so we need to scan down from highest_used_slotid to 0 looking for the now
312 * highest slotid in use. 319 * highest slotid in use.
313 * If none found, highest_used_slotid is set to -1. 320 * If none found, highest_used_slotid is set to -1.
321 *
322 * Must be called while holding tbl->slot_tbl_lock
314 */ 323 */
315static void 324static void
316nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) 325nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
317{ 326{
318 int slotid = free_slotid; 327 int slotid = free_slotid;
319 328
320 spin_lock(&tbl->slot_tbl_lock);
321 /* clear used bit in bitmap */ 329 /* clear used bit in bitmap */
322 __clear_bit(slotid, tbl->used_slots); 330 __clear_bit(slotid, tbl->used_slots);
323 331
324 /* update highest_used_slotid when it is freed */ 332 /* update highest_used_slotid when it is freed */
325 if (slotid == tbl->highest_used_slotid) { 333 if (slotid == tbl->highest_used_slotid) {
326 slotid = find_last_bit(tbl->used_slots, tbl->max_slots); 334 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
327 if (slotid >= 0 && slotid < tbl->max_slots) 335 if (slotid < tbl->max_slots)
328 tbl->highest_used_slotid = slotid; 336 tbl->highest_used_slotid = slotid;
329 else 337 else
330 tbl->highest_used_slotid = -1; 338 tbl->highest_used_slotid = -1;
331 } 339 }
332 rpc_wake_up_next(&tbl->slot_tbl_waitq);
333 spin_unlock(&tbl->slot_tbl_lock);
334 dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__, 340 dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
335 free_slotid, tbl->highest_used_slotid); 341 free_slotid, tbl->highest_used_slotid);
336} 342}
337 343
338void nfs41_sequence_free_slot(const struct nfs_client *clp, 344static void nfs41_sequence_free_slot(const struct nfs_client *clp,
339 struct nfs4_sequence_res *res) 345 struct nfs4_sequence_res *res)
340{ 346{
341 struct nfs4_slot_table *tbl; 347 struct nfs4_slot_table *tbl;
342 348
343 if (!nfs4_has_session(clp)) {
344 dprintk("%s: No session\n", __func__);
345 return;
346 }
347 tbl = &clp->cl_session->fc_slot_table; 349 tbl = &clp->cl_session->fc_slot_table;
348 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) { 350 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
349 dprintk("%s: No slot\n", __func__);
350 /* just wake up the next guy waiting since 351 /* just wake up the next guy waiting since
351 * we may have not consumed a slot after all */ 352 * we may have not consumed a slot after all */
352 rpc_wake_up_next(&tbl->slot_tbl_waitq); 353 dprintk("%s: No slot\n", __func__);
353 return; 354 return;
354 } 355 }
356
357 spin_lock(&tbl->slot_tbl_lock);
355 nfs4_free_slot(tbl, res->sr_slotid); 358 nfs4_free_slot(tbl, res->sr_slotid);
359
360 /* Signal state manager thread if session is drained */
361 if (test_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
362 if (tbl->highest_used_slotid == -1) {
363 dprintk("%s COMPLETE: Session Drained\n", __func__);
364 complete(&clp->cl_session->complete);
365 }
366 } else
367 rpc_wake_up_next(&tbl->slot_tbl_waitq);
368 spin_unlock(&tbl->slot_tbl_lock);
356 res->sr_slotid = NFS4_MAX_SLOT_TABLE; 369 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
357} 370}
358 371
@@ -377,10 +390,10 @@ static void nfs41_sequence_done(struct nfs_client *clp,
377 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) 390 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
378 goto out; 391 goto out;
379 392
380 tbl = &clp->cl_session->fc_slot_table; 393 /* Check the SEQUENCE operation status */
381 slot = tbl->slots + res->sr_slotid;
382
383 if (res->sr_status == 0) { 394 if (res->sr_status == 0) {
395 tbl = &clp->cl_session->fc_slot_table;
396 slot = tbl->slots + res->sr_slotid;
384 /* Update the slot's sequence and clientid lease timer */ 397 /* Update the slot's sequence and clientid lease timer */
385 ++slot->seq_nr; 398 ++slot->seq_nr;
386 timestamp = res->sr_renewal_time; 399 timestamp = res->sr_renewal_time;
@@ -388,7 +401,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,
388 if (time_before(clp->cl_last_renewal, timestamp)) 401 if (time_before(clp->cl_last_renewal, timestamp))
389 clp->cl_last_renewal = timestamp; 402 clp->cl_last_renewal = timestamp;
390 spin_unlock(&clp->cl_lock); 403 spin_unlock(&clp->cl_lock);
391 return; 404 /* Check sequence flags */
405 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
392 } 406 }
393out: 407out:
394 /* The session may be reset by one of the error handlers. */ 408 /* The session may be reset by one of the error handlers. */
@@ -429,24 +443,6 @@ out:
429 return ret_id; 443 return ret_id;
430} 444}
431 445
432static int nfs4_recover_session(struct nfs4_session *session)
433{
434 struct nfs_client *clp = session->clp;
435 unsigned int loop;
436 int ret;
437
438 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
439 ret = nfs4_wait_clnt_recover(clp);
440 if (ret != 0)
441 break;
442 if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
443 break;
444 nfs4_schedule_state_manager(clp);
445 ret = -EIO;
446 }
447 return ret;
448}
449
450static int nfs41_setup_sequence(struct nfs4_session *session, 446static int nfs41_setup_sequence(struct nfs4_session *session,
451 struct nfs4_sequence_args *args, 447 struct nfs4_sequence_args *args,
452 struct nfs4_sequence_res *res, 448 struct nfs4_sequence_res *res,
@@ -455,7 +451,6 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
455{ 451{
456 struct nfs4_slot *slot; 452 struct nfs4_slot *slot;
457 struct nfs4_slot_table *tbl; 453 struct nfs4_slot_table *tbl;
458 int status = 0;
459 u8 slotid; 454 u8 slotid;
460 455
461 dprintk("--> %s\n", __func__); 456 dprintk("--> %s\n", __func__);
@@ -468,21 +463,15 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
468 tbl = &session->fc_slot_table; 463 tbl = &session->fc_slot_table;
469 464
470 spin_lock(&tbl->slot_tbl_lock); 465 spin_lock(&tbl->slot_tbl_lock);
471 if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) { 466 if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state)) {
472 if (tbl->highest_used_slotid != -1) { 467 /*
473 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 468 * The state manager will wait until the slot table is empty.
474 spin_unlock(&tbl->slot_tbl_lock); 469 * Schedule the reset thread
475 dprintk("<-- %s: Session reset: draining\n", __func__); 470 */
476 return -EAGAIN; 471 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
477 }
478
479 /* The slot table is empty; start the reset thread */
480 dprintk("%s Session Reset\n", __func__);
481 spin_unlock(&tbl->slot_tbl_lock); 472 spin_unlock(&tbl->slot_tbl_lock);
482 status = nfs4_recover_session(session); 473 dprintk("%s Schedule Session Reset\n", __func__);
483 if (status) 474 return -EAGAIN;
484 return status;
485 spin_lock(&tbl->slot_tbl_lock);
486 } 475 }
487 476
488 slotid = nfs4_find_slot(tbl, task); 477 slotid = nfs4_find_slot(tbl, task);
@@ -527,7 +516,7 @@ int nfs4_setup_sequence(struct nfs_client *clp,
527 goto out; 516 goto out;
528 ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply, 517 ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
529 task); 518 task);
530 if (ret != -EAGAIN) { 519 if (ret && ret != -EAGAIN) {
531 /* terminate rpc task */ 520 /* terminate rpc task */
532 task->tk_status = ret; 521 task->tk_status = ret;
533 task->tk_action = NULL; 522 task->tk_action = NULL;
@@ -561,7 +550,6 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
561 struct nfs41_call_sync_data *data = calldata; 550 struct nfs41_call_sync_data *data = calldata;
562 551
563 nfs41_sequence_done(data->clp, data->seq_res, task->tk_status); 552 nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
564 nfs41_sequence_free_slot(data->clp, data->seq_res);
565} 553}
566 554
567struct rpc_call_ops nfs41_call_sync_ops = { 555struct rpc_call_ops nfs41_call_sync_ops = {
@@ -637,15 +625,6 @@ static void nfs4_sequence_done(const struct nfs_server *server,
637#endif /* CONFIG_NFS_V4_1 */ 625#endif /* CONFIG_NFS_V4_1 */
638} 626}
639 627
640/* no restart, therefore free slot here */
641static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
642 struct nfs4_sequence_res *res,
643 int rpc_status)
644{
645 nfs4_sequence_done(server, res, rpc_status);
646 nfs4_sequence_free_slot(server->nfs_client, res);
647}
648
649static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 628static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
650{ 629{
651 struct nfs_inode *nfsi = NFS_I(dir); 630 struct nfs_inode *nfsi = NFS_I(dir);
@@ -720,9 +699,15 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
720 p->o_arg.bitmask = server->attr_bitmask; 699 p->o_arg.bitmask = server->attr_bitmask;
721 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 700 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
722 if (flags & O_EXCL) { 701 if (flags & O_EXCL) {
723 u32 *s = (u32 *) p->o_arg.u.verifier.data; 702 if (nfs4_has_persistent_session(server->nfs_client)) {
724 s[0] = jiffies; 703 /* GUARDED */
725 s[1] = current->pid; 704 p->o_arg.u.attrs = &p->attrs;
705 memcpy(&p->attrs, attrs, sizeof(p->attrs));
706 } else { /* EXCLUSIVE4_1 */
707 u32 *s = (u32 *) p->o_arg.u.verifier.data;
708 s[0] = jiffies;
709 s[1] = current->pid;
710 }
726 } else if (flags & O_CREAT) { 711 } else if (flags & O_CREAT) {
727 p->o_arg.u.attrs = &p->attrs; 712 p->o_arg.u.attrs = &p->attrs;
728 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 713 memcpy(&p->attrs, attrs, sizeof(p->attrs));
@@ -776,13 +761,16 @@ static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode
776 goto out; 761 goto out;
777 switch (mode & (FMODE_READ|FMODE_WRITE)) { 762 switch (mode & (FMODE_READ|FMODE_WRITE)) {
778 case FMODE_READ: 763 case FMODE_READ:
779 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; 764 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
765 && state->n_rdonly != 0;
780 break; 766 break;
781 case FMODE_WRITE: 767 case FMODE_WRITE:
782 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0; 768 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
769 && state->n_wronly != 0;
783 break; 770 break;
784 case FMODE_READ|FMODE_WRITE: 771 case FMODE_READ|FMODE_WRITE:
785 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; 772 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
773 && state->n_rdwr != 0;
786 } 774 }
787out: 775out:
788 return ret; 776 return ret;
@@ -1183,6 +1171,14 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1183 case -ENOENT: 1171 case -ENOENT:
1184 case -ESTALE: 1172 case -ESTALE:
1185 goto out; 1173 goto out;
1174 case -NFS4ERR_BADSESSION:
1175 case -NFS4ERR_BADSLOT:
1176 case -NFS4ERR_BAD_HIGH_SLOT:
1177 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1178 case -NFS4ERR_DEADSESSION:
1179 nfs4_schedule_state_recovery(
1180 server->nfs_client);
1181 goto out;
1186 case -NFS4ERR_STALE_CLIENTID: 1182 case -NFS4ERR_STALE_CLIENTID:
1187 case -NFS4ERR_STALE_STATEID: 1183 case -NFS4ERR_STALE_STATEID:
1188 case -NFS4ERR_EXPIRED: 1184 case -NFS4ERR_EXPIRED:
@@ -1336,8 +1332,8 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
1336 1332
1337 data->rpc_status = task->tk_status; 1333 data->rpc_status = task->tk_status;
1338 1334
1339 nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res, 1335 nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
1340 task->tk_status); 1336 task->tk_status);
1341 1337
1342 if (RPC_ASSASSINATED(task)) 1338 if (RPC_ASSASSINATED(task))
1343 return; 1339 return;
@@ -1488,7 +1484,7 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
1488 return ret; 1484 return ret;
1489} 1485}
1490 1486
1491static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1487static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1492{ 1488{
1493 struct nfs_server *server = NFS_SERVER(state->inode); 1489 struct nfs_server *server = NFS_SERVER(state->inode);
1494 struct nfs4_exception exception = { }; 1490 struct nfs4_exception exception = { };
@@ -1496,10 +1492,16 @@ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4
1496 1492
1497 do { 1493 do {
1498 err = _nfs4_open_expired(ctx, state); 1494 err = _nfs4_open_expired(ctx, state);
1499 if (err != -NFS4ERR_DELAY) 1495 switch (err) {
1500 break; 1496 default:
1501 nfs4_handle_exception(server, err, &exception); 1497 goto out;
1498 case -NFS4ERR_GRACE:
1499 case -NFS4ERR_DELAY:
1500 nfs4_handle_exception(server, err, &exception);
1501 err = 0;
1502 }
1502 } while (exception.retry); 1503 } while (exception.retry);
1504out:
1503 return err; 1505 return err;
1504} 1506}
1505 1507
@@ -1712,6 +1714,18 @@ static void nfs4_free_closedata(void *data)
1712 kfree(calldata); 1714 kfree(calldata);
1713} 1715}
1714 1716
1717static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
1718 fmode_t fmode)
1719{
1720 spin_lock(&state->owner->so_lock);
1721 if (!(fmode & FMODE_READ))
1722 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1723 if (!(fmode & FMODE_WRITE))
1724 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1725 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1726 spin_unlock(&state->owner->so_lock);
1727}
1728
1715static void nfs4_close_done(struct rpc_task *task, void *data) 1729static void nfs4_close_done(struct rpc_task *task, void *data)
1716{ 1730{
1717 struct nfs4_closedata *calldata = data; 1731 struct nfs4_closedata *calldata = data;
@@ -1728,6 +1742,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
1728 case 0: 1742 case 0:
1729 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 1743 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
1730 renew_lease(server, calldata->timestamp); 1744 renew_lease(server, calldata->timestamp);
1745 nfs4_close_clear_stateid_flags(state,
1746 calldata->arg.fmode);
1731 break; 1747 break;
1732 case -NFS4ERR_STALE_STATEID: 1748 case -NFS4ERR_STALE_STATEID:
1733 case -NFS4ERR_OLD_STATEID: 1749 case -NFS4ERR_OLD_STATEID:
@@ -1737,11 +1753,10 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
1737 break; 1753 break;
1738 default: 1754 default:
1739 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) { 1755 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
1740 nfs4_restart_rpc(task, server->nfs_client); 1756 nfs_restart_rpc(task, server->nfs_client);
1741 return; 1757 return;
1742 } 1758 }
1743 } 1759 }
1744 nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
1745 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 1760 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
1746} 1761}
1747 1762
@@ -1749,38 +1764,39 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
1749{ 1764{
1750 struct nfs4_closedata *calldata = data; 1765 struct nfs4_closedata *calldata = data;
1751 struct nfs4_state *state = calldata->state; 1766 struct nfs4_state *state = calldata->state;
1752 int clear_rd, clear_wr, clear_rdwr; 1767 int call_close = 0;
1753 1768
1754 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 1769 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
1755 return; 1770 return;
1756 1771
1757 clear_rd = clear_wr = clear_rdwr = 0; 1772 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1773 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
1758 spin_lock(&state->owner->so_lock); 1774 spin_lock(&state->owner->so_lock);
1759 /* Calculate the change in open mode */ 1775 /* Calculate the change in open mode */
1760 if (state->n_rdwr == 0) { 1776 if (state->n_rdwr == 0) {
1761 if (state->n_rdonly == 0) { 1777 if (state->n_rdonly == 0) {
1762 clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1778 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
1763 clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); 1779 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1780 calldata->arg.fmode &= ~FMODE_READ;
1764 } 1781 }
1765 if (state->n_wronly == 0) { 1782 if (state->n_wronly == 0) {
1766 clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1783 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
1767 clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags); 1784 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
1785 calldata->arg.fmode &= ~FMODE_WRITE;
1768 } 1786 }
1769 } 1787 }
1770 spin_unlock(&state->owner->so_lock); 1788 spin_unlock(&state->owner->so_lock);
1771 if (!clear_rd && !clear_wr && !clear_rdwr) { 1789
1790 if (!call_close) {
1772 /* Note: exit _without_ calling nfs4_close_done */ 1791 /* Note: exit _without_ calling nfs4_close_done */
1773 task->tk_action = NULL; 1792 task->tk_action = NULL;
1774 return; 1793 return;
1775 } 1794 }
1795
1796 if (calldata->arg.fmode == 0)
1797 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
1798
1776 nfs_fattr_init(calldata->res.fattr); 1799 nfs_fattr_init(calldata->res.fattr);
1777 if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
1778 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1779 calldata->arg.fmode = FMODE_READ;
1780 } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
1781 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1782 calldata->arg.fmode = FMODE_WRITE;
1783 }
1784 calldata->timestamp = jiffies; 1800 calldata->timestamp = jiffies;
1785 if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client, 1801 if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
1786 &calldata->arg.seq_args, &calldata->res.seq_res, 1802 &calldata->arg.seq_args, &calldata->res.seq_res,
@@ -1981,7 +1997,7 @@ out_drop:
1981 return 0; 1997 return 0;
1982} 1998}
1983 1999
1984void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2000static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
1985{ 2001{
1986 if (ctx->state == NULL) 2002 if (ctx->state == NULL)
1987 return; 2003 return;
@@ -2532,7 +2548,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2532 nfs4_sequence_done(res->server, &res->seq_res, task->tk_status); 2548 nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
2533 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2549 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2534 return 0; 2550 return 0;
2535 nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
2536 update_changeattr(dir, &res->cinfo); 2551 update_changeattr(dir, &res->cinfo);
2537 nfs_post_op_update_inode(dir, &res->dir_attr); 2552 nfs_post_op_update_inode(dir, &res->dir_attr);
2538 return 1; 2553 return 1;
@@ -2971,11 +2986,10 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
2971 2986
2972 dprintk("--> %s\n", __func__); 2987 dprintk("--> %s\n", __func__);
2973 2988
2974 /* nfs4_sequence_free_slot called in the read rpc_call_done */
2975 nfs4_sequence_done(server, &data->res.seq_res, task->tk_status); 2989 nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
2976 2990
2977 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 2991 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
2978 nfs4_restart_rpc(task, server->nfs_client); 2992 nfs_restart_rpc(task, server->nfs_client);
2979 return -EAGAIN; 2993 return -EAGAIN;
2980 } 2994 }
2981 2995
@@ -2995,12 +3009,11 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
2995{ 3009{
2996 struct inode *inode = data->inode; 3010 struct inode *inode = data->inode;
2997 3011
2998 /* slot is freed in nfs_writeback_done */
2999 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, 3012 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
3000 task->tk_status); 3013 task->tk_status);
3001 3014
3002 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3015 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3003 nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client); 3016 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3004 return -EAGAIN; 3017 return -EAGAIN;
3005 } 3018 }
3006 if (task->tk_status >= 0) { 3019 if (task->tk_status >= 0) {
@@ -3028,11 +3041,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3028 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, 3041 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
3029 task->tk_status); 3042 task->tk_status);
3030 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3043 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3031 nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client); 3044 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3032 return -EAGAIN; 3045 return -EAGAIN;
3033 } 3046 }
3034 nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
3035 &data->res.seq_res);
3036 nfs_refresh_inode(inode, data->res.fattr); 3047 nfs_refresh_inode(inode, data->res.fattr);
3037 return 0; 3048 return 0;
3038} 3049}
@@ -3350,7 +3361,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3350 case -NFS4ERR_SEQ_MISORDERED: 3361 case -NFS4ERR_SEQ_MISORDERED:
3351 dprintk("%s ERROR %d, Reset session\n", __func__, 3362 dprintk("%s ERROR %d, Reset session\n", __func__,
3352 task->tk_status); 3363 task->tk_status);
3353 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); 3364 nfs4_schedule_state_recovery(clp);
3354 task->tk_status = 0; 3365 task->tk_status = 0;
3355 return -EAGAIN; 3366 return -EAGAIN;
3356#endif /* CONFIG_NFS_V4_1 */ 3367#endif /* CONFIG_NFS_V4_1 */
@@ -3483,12 +3494,23 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3483{ 3494{
3484 struct nfs4_delegreturndata *data = calldata; 3495 struct nfs4_delegreturndata *data = calldata;
3485 3496
3486 nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res, 3497 nfs4_sequence_done(data->res.server, &data->res.seq_res,
3487 task->tk_status); 3498 task->tk_status);
3488 3499
3489 data->rpc_status = task->tk_status; 3500 switch (task->tk_status) {
3490 if (data->rpc_status == 0) 3501 case -NFS4ERR_STALE_STATEID:
3502 case -NFS4ERR_EXPIRED:
3503 case 0:
3491 renew_lease(data->res.server, data->timestamp); 3504 renew_lease(data->res.server, data->timestamp);
3505 break;
3506 default:
3507 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
3508 -EAGAIN) {
3509 nfs_restart_rpc(task, data->res.server->nfs_client);
3510 return;
3511 }
3512 }
3513 data->rpc_status = task->tk_status;
3492} 3514}
3493 3515
3494static void nfs4_delegreturn_release(void *calldata) 3516static void nfs4_delegreturn_release(void *calldata)
@@ -3741,11 +3763,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
3741 break; 3763 break;
3742 default: 3764 default:
3743 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 3765 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
3744 nfs4_restart_rpc(task, 3766 nfs_restart_rpc(task,
3745 calldata->server->nfs_client); 3767 calldata->server->nfs_client);
3746 } 3768 }
3747 nfs4_sequence_free_slot(calldata->server->nfs_client,
3748 &calldata->res.seq_res);
3749} 3769}
3750 3770
3751static void nfs4_locku_prepare(struct rpc_task *task, void *data) 3771static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3927,8 +3947,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
3927 3947
3928 dprintk("%s: begin!\n", __func__); 3948 dprintk("%s: begin!\n", __func__);
3929 3949
3930 nfs4_sequence_done_free_slot(data->server, &data->res.seq_res, 3950 nfs4_sequence_done(data->server, &data->res.seq_res,
3931 task->tk_status); 3951 task->tk_status);
3932 3952
3933 data->rpc_status = task->tk_status; 3953 data->rpc_status = task->tk_status;
3934 if (RPC_ASSASSINATED(task)) 3954 if (RPC_ASSASSINATED(task))
@@ -4049,10 +4069,16 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
4049 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4069 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4050 return 0; 4070 return 0;
4051 err = _nfs4_do_setlk(state, F_SETLK, request, 0); 4071 err = _nfs4_do_setlk(state, F_SETLK, request, 0);
4052 if (err != -NFS4ERR_DELAY) 4072 switch (err) {
4053 break; 4073 default:
4054 nfs4_handle_exception(server, err, &exception); 4074 goto out;
4075 case -NFS4ERR_GRACE:
4076 case -NFS4ERR_DELAY:
4077 nfs4_handle_exception(server, err, &exception);
4078 err = 0;
4079 }
4055 } while (exception.retry); 4080 } while (exception.retry);
4081out:
4056 return err; 4082 return err;
4057} 4083}
4058 4084
@@ -4172,6 +4198,11 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4172 case -NFS4ERR_EXPIRED: 4198 case -NFS4ERR_EXPIRED:
4173 case -NFS4ERR_STALE_CLIENTID: 4199 case -NFS4ERR_STALE_CLIENTID:
4174 case -NFS4ERR_STALE_STATEID: 4200 case -NFS4ERR_STALE_STATEID:
4201 case -NFS4ERR_BADSESSION:
4202 case -NFS4ERR_BADSLOT:
4203 case -NFS4ERR_BAD_HIGH_SLOT:
4204 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4205 case -NFS4ERR_DEADSESSION:
4175 nfs4_schedule_state_recovery(server->nfs_client); 4206 nfs4_schedule_state_recovery(server->nfs_client);
4176 goto out; 4207 goto out;
4177 case -ERESTARTSYS: 4208 case -ERESTARTSYS:
@@ -4296,7 +4327,7 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4296 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 4327 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
4297 * be in some phase of session reset. 4328 * be in some phase of session reset.
4298 */ 4329 */
4299static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 4330int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4300{ 4331{
4301 nfs4_verifier verifier; 4332 nfs4_verifier verifier;
4302 struct nfs41_exchange_id_args args = { 4333 struct nfs41_exchange_id_args args = {
@@ -4318,6 +4349,9 @@ static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4318 dprintk("--> %s\n", __func__); 4349 dprintk("--> %s\n", __func__);
4319 BUG_ON(clp == NULL); 4350 BUG_ON(clp == NULL);
4320 4351
4352 /* Remove server-only flags */
4353 args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
4354
4321 p = (u32 *)verifier.data; 4355 p = (u32 *)verifier.data;
4322 *p++ = htonl((u32)clp->cl_boot_time.tv_sec); 4356 *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4323 *p = htonl((u32)clp->cl_boot_time.tv_nsec); 4357 *p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4389,10 +4423,9 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4389 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 4423 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4390 rpc_delay(task, NFS4_POLL_RETRY_MIN); 4424 rpc_delay(task, NFS4_POLL_RETRY_MIN);
4391 task->tk_status = 0; 4425 task->tk_status = 0;
4392 nfs4_restart_rpc(task, data->clp); 4426 nfs_restart_rpc(task, data->clp);
4393 return; 4427 return;
4394 } 4428 }
4395 nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
4396 dprintk("<-- %s\n", __func__); 4429 dprintk("<-- %s\n", __func__);
4397} 4430}
4398 4431
@@ -4465,7 +4498,6 @@ static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
4465 spin_lock(&tbl->slot_tbl_lock); 4498 spin_lock(&tbl->slot_tbl_lock);
4466 for (i = 0; i < max_slots; ++i) 4499 for (i = 0; i < max_slots; ++i)
4467 tbl->slots[i].seq_nr = ivalue; 4500 tbl->slots[i].seq_nr = ivalue;
4468 tbl->highest_used_slotid = -1;
4469 spin_unlock(&tbl->slot_tbl_lock); 4501 spin_unlock(&tbl->slot_tbl_lock);
4470 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 4502 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4471 tbl, tbl->slots, tbl->max_slots); 4503 tbl, tbl->slots, tbl->max_slots);
@@ -4515,7 +4547,6 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session)
4515static int nfs4_init_slot_table(struct nfs4_slot_table *tbl, 4547static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4516 int max_slots, int ivalue) 4548 int max_slots, int ivalue)
4517{ 4549{
4518 int i;
4519 struct nfs4_slot *slot; 4550 struct nfs4_slot *slot;
4520 int ret = -ENOMEM; 4551 int ret = -ENOMEM;
4521 4552
@@ -4526,18 +4557,9 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4526 slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL); 4557 slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
4527 if (!slot) 4558 if (!slot)
4528 goto out; 4559 goto out;
4529 for (i = 0; i < max_slots; ++i)
4530 slot[i].seq_nr = ivalue;
4531 ret = 0; 4560 ret = 0;
4532 4561
4533 spin_lock(&tbl->slot_tbl_lock); 4562 spin_lock(&tbl->slot_tbl_lock);
4534 if (tbl->slots != NULL) {
4535 spin_unlock(&tbl->slot_tbl_lock);
4536 dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
4537 __func__, tbl, tbl->slots);
4538 WARN_ON(1);
4539 goto out_free;
4540 }
4541 tbl->max_slots = max_slots; 4563 tbl->max_slots = max_slots;
4542 tbl->slots = slot; 4564 tbl->slots = slot;
4543 tbl->highest_used_slotid = -1; /* no slot is currently used */ 4565 tbl->highest_used_slotid = -1; /* no slot is currently used */
@@ -4547,10 +4569,6 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4547out: 4569out:
4548 dprintk("<-- %s: return %d\n", __func__, ret); 4570 dprintk("<-- %s: return %d\n", __func__, ret);
4549 return ret; 4571 return ret;
4550
4551out_free:
4552 kfree(slot);
4553 goto out;
4554} 4572}
4555 4573
4556/* 4574/*
@@ -4558,17 +4576,24 @@ out_free:
4558 */ 4576 */
4559static int nfs4_init_slot_tables(struct nfs4_session *session) 4577static int nfs4_init_slot_tables(struct nfs4_session *session)
4560{ 4578{
4561 int status; 4579 struct nfs4_slot_table *tbl;
4580 int status = 0;
4562 4581
4563 status = nfs4_init_slot_table(&session->fc_slot_table, 4582 tbl = &session->fc_slot_table;
4564 session->fc_attrs.max_reqs, 1); 4583 if (tbl->slots == NULL) {
4565 if (status) 4584 status = nfs4_init_slot_table(tbl,
4566 return status; 4585 session->fc_attrs.max_reqs, 1);
4586 if (status)
4587 return status;
4588 }
4567 4589
4568 status = nfs4_init_slot_table(&session->bc_slot_table, 4590 tbl = &session->bc_slot_table;
4569 session->bc_attrs.max_reqs, 0); 4591 if (tbl->slots == NULL) {
4570 if (status) 4592 status = nfs4_init_slot_table(tbl,
4571 nfs4_destroy_slot_tables(session); 4593 session->bc_attrs.max_reqs, 0);
4594 if (status)
4595 nfs4_destroy_slot_tables(session);
4596 }
4572 4597
4573 return status; 4598 return status;
4574} 4599}
@@ -4582,7 +4607,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4582 if (!session) 4607 if (!session)
4583 return NULL; 4608 return NULL;
4584 4609
4585 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
4586 /* 4610 /*
4587 * The create session reply races with the server back 4611 * The create session reply races with the server back
4588 * channel probe. Mark the client NFS_CS_SESSION_INITING 4612 * channel probe. Mark the client NFS_CS_SESSION_INITING
@@ -4590,12 +4614,15 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4590 * nfs_client struct 4614 * nfs_client struct
4591 */ 4615 */
4592 clp->cl_cons_state = NFS_CS_SESSION_INITING; 4616 clp->cl_cons_state = NFS_CS_SESSION_INITING;
4617 init_completion(&session->complete);
4593 4618
4594 tbl = &session->fc_slot_table; 4619 tbl = &session->fc_slot_table;
4620 tbl->highest_used_slotid = -1;
4595 spin_lock_init(&tbl->slot_tbl_lock); 4621 spin_lock_init(&tbl->slot_tbl_lock);
4596 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 4622 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
4597 4623
4598 tbl = &session->bc_slot_table; 4624 tbl = &session->bc_slot_table;
4625 tbl->highest_used_slotid = -1;
4599 spin_lock_init(&tbl->slot_tbl_lock); 4626 spin_lock_init(&tbl->slot_tbl_lock);
4600 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 4627 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
4601 4628
@@ -4747,11 +4774,10 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
4747 * It is the responsibility of the caller to verify the session is 4774 * It is the responsibility of the caller to verify the session is
4748 * expired before calling this routine. 4775 * expired before calling this routine.
4749 */ 4776 */
4750int nfs4_proc_create_session(struct nfs_client *clp, int reset) 4777int nfs4_proc_create_session(struct nfs_client *clp)
4751{ 4778{
4752 int status; 4779 int status;
4753 unsigned *ptr; 4780 unsigned *ptr;
4754 struct nfs_fsinfo fsinfo;
4755 struct nfs4_session *session = clp->cl_session; 4781 struct nfs4_session *session = clp->cl_session;
4756 4782
4757 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 4783 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
@@ -4760,35 +4786,19 @@ int nfs4_proc_create_session(struct nfs_client *clp, int reset)
4760 if (status) 4786 if (status)
4761 goto out; 4787 goto out;
4762 4788
4763 /* Init or reset the fore channel */ 4789 /* Init and reset the fore channel */
4764 if (reset) 4790 status = nfs4_init_slot_tables(session);
4765 status = nfs4_reset_slot_tables(session); 4791 dprintk("slot table initialization returned %d\n", status);
4766 else 4792 if (status)
4767 status = nfs4_init_slot_tables(session); 4793 goto out;
4768 dprintk("fore channel slot table initialization returned %d\n", status); 4794 status = nfs4_reset_slot_tables(session);
4795 dprintk("slot table reset returned %d\n", status);
4769 if (status) 4796 if (status)
4770 goto out; 4797 goto out;
4771 4798
4772 ptr = (unsigned *)&session->sess_id.data[0]; 4799 ptr = (unsigned *)&session->sess_id.data[0];
4773 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 4800 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
4774 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 4801 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
4775
4776 if (reset)
4777 /* Lease time is aleady set */
4778 goto out;
4779
4780 /* Get the lease time */
4781 status = nfs4_proc_get_lease_time(clp, &fsinfo);
4782 if (status == 0) {
4783 /* Update lease time and schedule renewal */
4784 spin_lock(&clp->cl_lock);
4785 clp->cl_lease_time = fsinfo.lease_time * HZ;
4786 clp->cl_last_renewal = jiffies;
4787 clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
4788 spin_unlock(&clp->cl_lock);
4789
4790 nfs4_schedule_state_renewal(clp);
4791 }
4792out: 4802out:
4793 dprintk("<-- %s\n", __func__); 4803 dprintk("<-- %s\n", __func__);
4794 return status; 4804 return status;
@@ -4827,13 +4837,16 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
4827int nfs4_init_session(struct nfs_server *server) 4837int nfs4_init_session(struct nfs_server *server)
4828{ 4838{
4829 struct nfs_client *clp = server->nfs_client; 4839 struct nfs_client *clp = server->nfs_client;
4840 struct nfs4_session *session;
4830 int ret; 4841 int ret;
4831 4842
4832 if (!nfs4_has_session(clp)) 4843 if (!nfs4_has_session(clp))
4833 return 0; 4844 return 0;
4834 4845
4835 clp->cl_session->fc_attrs.max_rqst_sz = server->wsize; 4846 session = clp->cl_session;
4836 clp->cl_session->fc_attrs.max_resp_sz = server->rsize; 4847 session->fc_attrs.max_rqst_sz = server->wsize + nfs41_maxwrite_overhead;
4848 session->fc_attrs.max_resp_sz = server->rsize + nfs41_maxread_overhead;
4849
4837 ret = nfs4_recover_expired_lease(server); 4850 ret = nfs4_recover_expired_lease(server);
4838 if (!ret) 4851 if (!ret)
4839 ret = nfs4_check_client_ready(clp); 4852 ret = nfs4_check_client_ready(clp);
@@ -4872,11 +4885,10 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
4872 4885
4873 if (_nfs4_async_handle_error(task, NULL, clp, NULL) 4886 if (_nfs4_async_handle_error(task, NULL, clp, NULL)
4874 == -EAGAIN) { 4887 == -EAGAIN) {
4875 nfs4_restart_rpc(task, clp); 4888 nfs_restart_rpc(task, clp);
4876 return; 4889 return;
4877 } 4890 }
4878 } 4891 }
4879 nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
4880 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 4892 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
4881 4893
4882 kfree(task->tk_msg.rpc_argp); 4894 kfree(task->tk_msg.rpc_argp);
@@ -4931,6 +4943,109 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
4931 &nfs41_sequence_ops, (void *)clp); 4943 &nfs41_sequence_ops, (void *)clp);
4932} 4944}
4933 4945
4946struct nfs4_reclaim_complete_data {
4947 struct nfs_client *clp;
4948 struct nfs41_reclaim_complete_args arg;
4949 struct nfs41_reclaim_complete_res res;
4950};
4951
4952static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
4953{
4954 struct nfs4_reclaim_complete_data *calldata = data;
4955
4956 if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args,
4957 &calldata->res.seq_res, 0, task))
4958 return;
4959
4960 rpc_call_start(task);
4961}
4962
4963static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
4964{
4965 struct nfs4_reclaim_complete_data *calldata = data;
4966 struct nfs_client *clp = calldata->clp;
4967 struct nfs4_sequence_res *res = &calldata->res.seq_res;
4968
4969 dprintk("--> %s\n", __func__);
4970 nfs41_sequence_done(clp, res, task->tk_status);
4971 switch (task->tk_status) {
4972 case 0:
4973 case -NFS4ERR_COMPLETE_ALREADY:
4974 break;
4975 case -NFS4ERR_BADSESSION:
4976 case -NFS4ERR_DEADSESSION:
4977 /*
4978 * Handle the session error, but do not retry the operation, as
4979 * we have no way of telling whether the clientid had to be
4980 * reset before we got our reply. If reset, a new wave of
4981 * reclaim operations will follow, containing their own reclaim
4982 * complete. We don't want our retry to get on the way of
4983 * recovery by incorrectly indicating to the server that we're
4984 * done reclaiming state since the process had to be restarted.
4985 */
4986 _nfs4_async_handle_error(task, NULL, clp, NULL);
4987 break;
4988 default:
4989 if (_nfs4_async_handle_error(
4990 task, NULL, clp, NULL) == -EAGAIN) {
4991 rpc_restart_call_prepare(task);
4992 return;
4993 }
4994 }
4995
4996 dprintk("<-- %s\n", __func__);
4997}
4998
4999static void nfs4_free_reclaim_complete_data(void *data)
5000{
5001 struct nfs4_reclaim_complete_data *calldata = data;
5002
5003 kfree(calldata);
5004}
5005
5006static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5007 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
5008 .rpc_call_done = nfs4_reclaim_complete_done,
5009 .rpc_release = nfs4_free_reclaim_complete_data,
5010};
5011
5012/*
5013 * Issue a global reclaim complete.
5014 */
5015static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5016{
5017 struct nfs4_reclaim_complete_data *calldata;
5018 struct rpc_task *task;
5019 struct rpc_message msg = {
5020 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5021 };
5022 struct rpc_task_setup task_setup_data = {
5023 .rpc_client = clp->cl_rpcclient,
5024 .rpc_message = &msg,
5025 .callback_ops = &nfs4_reclaim_complete_call_ops,
5026 .flags = RPC_TASK_ASYNC,
5027 };
5028 int status = -ENOMEM;
5029
5030 dprintk("--> %s\n", __func__);
5031 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
5032 if (calldata == NULL)
5033 goto out;
5034 calldata->clp = clp;
5035 calldata->arg.one_fs = 0;
5036 calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
5037
5038 msg.rpc_argp = &calldata->arg;
5039 msg.rpc_resp = &calldata->res;
5040 task_setup_data.callback_data = calldata;
5041 task = rpc_run_task(&task_setup_data);
5042 if (IS_ERR(task))
5043 status = PTR_ERR(task);
5044 rpc_put_task(task);
5045out:
5046 dprintk("<-- %s status=%d\n", __func__, status);
5047 return status;
5048}
4934#endif /* CONFIG_NFS_V4_1 */ 5049#endif /* CONFIG_NFS_V4_1 */
4935 5050
4936struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 5051struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -4948,8 +5063,9 @@ struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
4948 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 5063 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
4949 .recover_open = nfs4_open_reclaim, 5064 .recover_open = nfs4_open_reclaim,
4950 .recover_lock = nfs4_lock_reclaim, 5065 .recover_lock = nfs4_lock_reclaim,
4951 .establish_clid = nfs4_proc_exchange_id, 5066 .establish_clid = nfs41_init_clientid,
4952 .get_clid_cred = nfs4_get_exchange_id_cred, 5067 .get_clid_cred = nfs4_get_exchange_id_cred,
5068 .reclaim_complete = nfs41_proc_reclaim_complete,
4953}; 5069};
4954#endif /* CONFIG_NFS_V4_1 */ 5070#endif /* CONFIG_NFS_V4_1 */
4955 5071
@@ -4968,7 +5084,7 @@ struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
4968 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 5084 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
4969 .recover_open = nfs4_open_expired, 5085 .recover_open = nfs4_open_expired,
4970 .recover_lock = nfs4_lock_expired, 5086 .recover_lock = nfs4_lock_expired,
4971 .establish_clid = nfs4_proc_exchange_id, 5087 .establish_clid = nfs41_init_clientid,
4972 .get_clid_cred = nfs4_get_exchange_id_cred, 5088 .get_clid_cred = nfs4_get_exchange_id_cred,
4973}; 5089};
4974#endif /* CONFIG_NFS_V4_1 */ 5090#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2ef4fecf3984..e76427e6346f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -116,6 +116,68 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
116 116
117#if defined(CONFIG_NFS_V4_1) 117#if defined(CONFIG_NFS_V4_1)
118 118
119static int nfs41_setup_state_renewal(struct nfs_client *clp)
120{
121 int status;
122 struct nfs_fsinfo fsinfo;
123
124 status = nfs4_proc_get_lease_time(clp, &fsinfo);
125 if (status == 0) {
126 /* Update lease time and schedule renewal */
127 spin_lock(&clp->cl_lock);
128 clp->cl_lease_time = fsinfo.lease_time * HZ;
129 clp->cl_last_renewal = jiffies;
130 spin_unlock(&clp->cl_lock);
131
132 nfs4_schedule_state_renewal(clp);
133 }
134
135 return status;
136}
137
138static void nfs41_end_drain_session(struct nfs_client *clp,
139 struct nfs4_session *ses)
140{
141 if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state))
142 rpc_wake_up(&ses->fc_slot_table.slot_tbl_waitq);
143}
144
145static int nfs41_begin_drain_session(struct nfs_client *clp,
146 struct nfs4_session *ses)
147{
148 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
149
150 spin_lock(&tbl->slot_tbl_lock);
151 set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
152 if (tbl->highest_used_slotid != -1) {
153 INIT_COMPLETION(ses->complete);
154 spin_unlock(&tbl->slot_tbl_lock);
155 return wait_for_completion_interruptible(&ses->complete);
156 }
157 spin_unlock(&tbl->slot_tbl_lock);
158 return 0;
159}
160
161int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
162{
163 int status;
164
165 status = nfs41_begin_drain_session(clp, clp->cl_session);
166 if (status != 0)
167 goto out;
168 status = nfs4_proc_exchange_id(clp, cred);
169 if (status != 0)
170 goto out;
171 status = nfs4_proc_create_session(clp);
172 if (status != 0)
173 goto out;
174 nfs41_end_drain_session(clp, clp->cl_session);
175 nfs41_setup_state_renewal(clp);
176 nfs_mark_client_ready(clp, NFS_CS_READY);
177out:
178 return status;
179}
180
119struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp) 181struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
120{ 182{
121 struct rpc_cred *cred; 183 struct rpc_cred *cred;
@@ -877,6 +939,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
877 case -NFS4ERR_EXPIRED: 939 case -NFS4ERR_EXPIRED:
878 case -NFS4ERR_NO_GRACE: 940 case -NFS4ERR_NO_GRACE:
879 case -NFS4ERR_STALE_CLIENTID: 941 case -NFS4ERR_STALE_CLIENTID:
942 case -NFS4ERR_BADSESSION:
943 case -NFS4ERR_BADSLOT:
944 case -NFS4ERR_BAD_HIGH_SLOT:
945 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
880 goto out; 946 goto out;
881 default: 947 default:
882 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 948 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
@@ -959,6 +1025,10 @@ restart:
959 case -NFS4ERR_NO_GRACE: 1025 case -NFS4ERR_NO_GRACE:
960 nfs4_state_mark_reclaim_nograce(sp->so_client, state); 1026 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
961 case -NFS4ERR_STALE_CLIENTID: 1027 case -NFS4ERR_STALE_CLIENTID:
1028 case -NFS4ERR_BADSESSION:
1029 case -NFS4ERR_BADSLOT:
1030 case -NFS4ERR_BAD_HIGH_SLOT:
1031 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
962 goto out_err; 1032 goto out_err;
963 } 1033 }
964 nfs4_put_open_state(state); 1034 nfs4_put_open_state(state);
@@ -1011,6 +1081,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1011 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); 1081 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1012} 1082}
1013 1083
1084static void nfs4_reclaim_complete(struct nfs_client *clp,
1085 const struct nfs4_state_recovery_ops *ops)
1086{
1087 /* Notify the server we're done reclaiming our state */
1088 if (ops->reclaim_complete)
1089 (void)ops->reclaim_complete(clp);
1090}
1091
1014static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) 1092static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1015{ 1093{
1016 struct nfs4_state_owner *sp; 1094 struct nfs4_state_owner *sp;
@@ -1020,6 +1098,9 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1020 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1098 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1021 return; 1099 return;
1022 1100
1101 nfs4_reclaim_complete(clp,
1102 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1103
1023 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1104 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1024 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1105 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1025 spin_lock(&sp->so_lock); 1106 spin_lock(&sp->so_lock);
@@ -1046,25 +1127,25 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1046 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); 1127 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1047} 1128}
1048 1129
1049static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp) 1130static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1050{
1051 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1052}
1053
1054static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1055{ 1131{
1056 switch (error) { 1132 switch (error) {
1057 case -NFS4ERR_CB_PATH_DOWN: 1133 case -NFS4ERR_CB_PATH_DOWN:
1058 nfs_handle_cb_pathdown(clp); 1134 nfs_handle_cb_pathdown(clp);
1059 break; 1135 return 0;
1136 case -NFS4ERR_NO_GRACE:
1137 nfs4_state_end_reclaim_reboot(clp);
1138 return 0;
1060 case -NFS4ERR_STALE_CLIENTID: 1139 case -NFS4ERR_STALE_CLIENTID:
1061 case -NFS4ERR_LEASE_MOVED: 1140 case -NFS4ERR_LEASE_MOVED:
1062 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1141 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1142 nfs4_state_end_reclaim_reboot(clp);
1063 nfs4_state_start_reclaim_reboot(clp); 1143 nfs4_state_start_reclaim_reboot(clp);
1064 break; 1144 break;
1065 case -NFS4ERR_EXPIRED: 1145 case -NFS4ERR_EXPIRED:
1066 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1146 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1067 nfs4_state_start_reclaim_nograce(clp); 1147 nfs4_state_start_reclaim_nograce(clp);
1148 break;
1068 case -NFS4ERR_BADSESSION: 1149 case -NFS4ERR_BADSESSION:
1069 case -NFS4ERR_BADSLOT: 1150 case -NFS4ERR_BADSLOT:
1070 case -NFS4ERR_BAD_HIGH_SLOT: 1151 case -NFS4ERR_BAD_HIGH_SLOT:
@@ -1072,8 +1153,11 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1072 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1153 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1073 case -NFS4ERR_SEQ_FALSE_RETRY: 1154 case -NFS4ERR_SEQ_FALSE_RETRY:
1074 case -NFS4ERR_SEQ_MISORDERED: 1155 case -NFS4ERR_SEQ_MISORDERED:
1075 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); 1156 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1157 /* Zero session reset errors */
1158 return 0;
1076 } 1159 }
1160 return error;
1077} 1161}
1078 1162
1079static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) 1163static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1093,8 +1177,7 @@ restart:
1093 if (status < 0) { 1177 if (status < 0) {
1094 set_bit(ops->owner_flag_bit, &sp->so_flags); 1178 set_bit(ops->owner_flag_bit, &sp->so_flags);
1095 nfs4_put_state_owner(sp); 1179 nfs4_put_state_owner(sp);
1096 nfs4_recovery_handle_error(clp, status); 1180 return nfs4_recovery_handle_error(clp, status);
1097 return status;
1098 } 1181 }
1099 nfs4_put_state_owner(sp); 1182 nfs4_put_state_owner(sp);
1100 goto restart; 1183 goto restart;
@@ -1124,8 +1207,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
1124 status = ops->renew_lease(clp, cred); 1207 status = ops->renew_lease(clp, cred);
1125 put_rpccred(cred); 1208 put_rpccred(cred);
1126out: 1209out:
1127 nfs4_recovery_handle_error(clp, status); 1210 return nfs4_recovery_handle_error(clp, status);
1128 return status;
1129} 1211}
1130 1212
1131static int nfs4_reclaim_lease(struct nfs_client *clp) 1213static int nfs4_reclaim_lease(struct nfs_client *clp)
@@ -1151,55 +1233,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
1151} 1233}
1152 1234
1153#ifdef CONFIG_NFS_V4_1 1235#ifdef CONFIG_NFS_V4_1
1154static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err) 1236void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1155{ 1237{
1156 switch (err) { 1238 if (!flags)
1157 case -NFS4ERR_STALE_CLIENTID: 1239 return;
1240 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) {
1158 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1241 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1159 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); 1242 nfs4_state_start_reclaim_reboot(clp);
1160 } 1243 nfs4_schedule_state_recovery(clp);
1244 } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1245 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1246 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1247 SEQ4_STATUS_RECALLABLE_STATE_REVOKED |
1248 SEQ4_STATUS_LEASE_MOVED)) {
1249 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1250 nfs4_state_start_reclaim_nograce(clp);
1251 nfs4_schedule_state_recovery(clp);
1252 } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1253 SEQ4_STATUS_BACKCHANNEL_FAULT |
1254 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1255 nfs_expire_all_delegations(clp);
1161} 1256}
1162 1257
1163static int nfs4_reset_session(struct nfs_client *clp) 1258static int nfs4_reset_session(struct nfs_client *clp)
1164{ 1259{
1260 struct nfs4_session *ses = clp->cl_session;
1165 int status; 1261 int status;
1166 1262
1263 status = nfs41_begin_drain_session(clp, ses);
1264 if (status != 0)
1265 return status;
1266
1167 status = nfs4_proc_destroy_session(clp->cl_session); 1267 status = nfs4_proc_destroy_session(clp->cl_session);
1168 if (status && status != -NFS4ERR_BADSESSION && 1268 if (status && status != -NFS4ERR_BADSESSION &&
1169 status != -NFS4ERR_DEADSESSION) { 1269 status != -NFS4ERR_DEADSESSION) {
1170 nfs4_session_recovery_handle_error(clp, status); 1270 status = nfs4_recovery_handle_error(clp, status);
1171 goto out; 1271 goto out;
1172 } 1272 }
1173 1273
1174 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1274 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1175 status = nfs4_proc_create_session(clp, 1); 1275 status = nfs4_proc_create_session(clp);
1176 if (status) 1276 if (status)
1177 nfs4_session_recovery_handle_error(clp, status); 1277 status = nfs4_recovery_handle_error(clp, status);
1178 /* fall through*/
1179out:
1180 /* Wake up the next rpc task even on error */
1181 rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
1182 return status;
1183}
1184
1185static int nfs4_initialize_session(struct nfs_client *clp)
1186{
1187 int status;
1188 1278
1189 status = nfs4_proc_create_session(clp, 0); 1279out:
1190 if (!status) { 1280 /*
1191 nfs_mark_client_ready(clp, NFS_CS_READY); 1281 * Let the state manager reestablish state
1192 } else if (status == -NFS4ERR_STALE_CLIENTID) { 1282 * without waking other tasks yet.
1193 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1283 */
1194 set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); 1284 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1195 } else { 1285 /* Wake up the next rpc task */
1196 nfs_mark_client_ready(clp, status); 1286 nfs41_end_drain_session(clp, ses);
1287 if (status == 0)
1288 nfs41_setup_state_renewal(clp);
1197 } 1289 }
1198 return status; 1290 return status;
1199} 1291}
1292
1200#else /* CONFIG_NFS_V4_1 */ 1293#else /* CONFIG_NFS_V4_1 */
1201static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 1294static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1202static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
1203#endif /* CONFIG_NFS_V4_1 */ 1295#endif /* CONFIG_NFS_V4_1 */
1204 1296
1205/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors 1297/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
@@ -1234,7 +1326,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
1234 status = nfs4_reclaim_lease(clp); 1326 status = nfs4_reclaim_lease(clp);
1235 if (status) { 1327 if (status) {
1236 nfs4_set_lease_expired(clp, status); 1328 nfs4_set_lease_expired(clp, status);
1237 if (status == -EAGAIN) 1329 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1330 &clp->cl_state))
1238 continue; 1331 continue;
1239 if (clp->cl_cons_state == 1332 if (clp->cl_cons_state ==
1240 NFS_CS_SESSION_INITING) 1333 NFS_CS_SESSION_INITING)
@@ -1242,55 +1335,51 @@ static void nfs4_state_manager(struct nfs_client *clp)
1242 goto out_error; 1335 goto out_error;
1243 } 1336 }
1244 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1337 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1338 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1245 } 1339 }
1246 1340
1247 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { 1341 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1248 status = nfs4_check_lease(clp); 1342 status = nfs4_check_lease(clp);
1249 if (status != 0) 1343 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1250 continue; 1344 continue;
1345 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1346 goto out_error;
1251 } 1347 }
1348
1252 /* Initialize or reset the session */ 1349 /* Initialize or reset the session */
1253 if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state) 1350 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1254 && nfs4_has_session(clp)) { 1351 && nfs4_has_session(clp)) {
1255 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) 1352 status = nfs4_reset_session(clp);
1256 status = nfs4_initialize_session(clp); 1353 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1257 else 1354 continue;
1258 status = nfs4_reset_session(clp); 1355 if (status < 0)
1259 if (status) {
1260 if (status == -NFS4ERR_STALE_CLIENTID)
1261 continue;
1262 goto out_error; 1356 goto out_error;
1263 }
1264 } 1357 }
1358
1265 /* First recover reboot state... */ 1359 /* First recover reboot state... */
1266 if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { 1360 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1267 status = nfs4_do_reclaim(clp, 1361 status = nfs4_do_reclaim(clp,
1268 nfs4_reboot_recovery_ops[clp->cl_minorversion]); 1362 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1269 if (status == -NFS4ERR_STALE_CLIENTID) 1363 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1270 continue; 1364 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1271 if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
1272 continue; 1365 continue;
1273 nfs4_state_end_reclaim_reboot(clp); 1366 nfs4_state_end_reclaim_reboot(clp);
1274 continue; 1367 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1368 continue;
1369 if (status < 0)
1370 goto out_error;
1275 } 1371 }
1276 1372
1277 /* Now recover expired state... */ 1373 /* Now recover expired state... */
1278 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { 1374 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1279 status = nfs4_do_reclaim(clp, 1375 status = nfs4_do_reclaim(clp,
1280 nfs4_nograce_recovery_ops[clp->cl_minorversion]); 1376 nfs4_nograce_recovery_ops[clp->cl_minorversion]);
1281 if (status < 0) { 1377 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1282 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); 1378 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1283 if (status == -NFS4ERR_STALE_CLIENTID) 1379 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1284 continue; 1380 continue;
1285 if (status == -NFS4ERR_EXPIRED) 1381 if (status < 0)
1286 continue;
1287 if (test_bit(NFS4CLNT_SESSION_SETUP,
1288 &clp->cl_state))
1289 continue;
1290 goto out_error; 1382 goto out_error;
1291 } else
1292 nfs4_state_end_reclaim_nograce(clp);
1293 continue;
1294 } 1383 }
1295 1384
1296 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 1385 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
@@ -1309,8 +1398,6 @@ static void nfs4_state_manager(struct nfs_client *clp)
1309out_error: 1398out_error:
1310 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" 1399 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1311 " with error %d\n", clp->cl_hostname, -status); 1400 " with error %d\n", clp->cl_hostname, -status);
1312 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1313 nfs4_state_end_reclaim_reboot(clp);
1314 nfs4_clear_state_manager_bit(clp); 1401 nfs4_clear_state_manager_bit(clp);
1315} 1402}
1316 1403
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 20b4e30e6c82..e437fd6a819f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -46,11 +46,13 @@
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/kdev_t.h> 47#include <linux/kdev_t.h>
48#include <linux/sunrpc/clnt.h> 48#include <linux/sunrpc/clnt.h>
49#include <linux/sunrpc/msg_prot.h>
49#include <linux/nfs.h> 50#include <linux/nfs.h>
50#include <linux/nfs4.h> 51#include <linux/nfs4.h>
51#include <linux/nfs_fs.h> 52#include <linux/nfs_fs.h>
52#include <linux/nfs_idmap.h> 53#include <linux/nfs_idmap.h>
53#include "nfs4_fs.h" 54#include "nfs4_fs.h"
55#include "internal.h"
54 56
55#define NFSDBG_FACILITY NFSDBG_XDR 57#define NFSDBG_FACILITY NFSDBG_XDR
56 58
@@ -134,7 +136,7 @@ static int nfs4_stat_to_errno(int);
134#define decode_lookup_maxsz (op_decode_hdr_maxsz) 136#define decode_lookup_maxsz (op_decode_hdr_maxsz)
135#define encode_share_access_maxsz \ 137#define encode_share_access_maxsz \
136 (2) 138 (2)
137#define encode_createmode_maxsz (1 + encode_attrs_maxsz) 139#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
138#define encode_opentype_maxsz (1 + encode_createmode_maxsz) 140#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
139#define encode_claim_null_maxsz (1 + nfs4_name_maxsz) 141#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
140#define encode_open_maxsz (op_encode_hdr_maxsz + \ 142#define encode_open_maxsz (op_encode_hdr_maxsz + \
@@ -299,6 +301,8 @@ static int nfs4_stat_to_errno(int);
299 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4) 301 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
300#define decode_sequence_maxsz (op_decode_hdr_maxsz + \ 302#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
301 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) 303 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
304#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
305#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
302#else /* CONFIG_NFS_V4_1 */ 306#else /* CONFIG_NFS_V4_1 */
303#define encode_sequence_maxsz 0 307#define encode_sequence_maxsz 0
304#define decode_sequence_maxsz 0 308#define decode_sequence_maxsz 0
@@ -676,6 +680,25 @@ static int nfs4_stat_to_errno(int);
676 decode_sequence_maxsz + \ 680 decode_sequence_maxsz + \
677 decode_putrootfh_maxsz + \ 681 decode_putrootfh_maxsz + \
678 decode_fsinfo_maxsz) 682 decode_fsinfo_maxsz)
683#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
684 encode_sequence_maxsz + \
685 encode_reclaim_complete_maxsz)
686#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
687 decode_sequence_maxsz + \
688 decode_reclaim_complete_maxsz)
689
690const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
691 compound_encode_hdr_maxsz +
692 encode_sequence_maxsz +
693 encode_putfh_maxsz +
694 encode_getattr_maxsz) *
695 XDR_UNIT);
696
697const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
698 compound_decode_hdr_maxsz +
699 decode_sequence_maxsz +
700 decode_putfh_maxsz) *
701 XDR_UNIT);
679#endif /* CONFIG_NFS_V4_1 */ 702#endif /* CONFIG_NFS_V4_1 */
680 703
681static const umode_t nfs_type2fmt[] = { 704static const umode_t nfs_type2fmt[] = {
@@ -1140,6 +1163,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
1140static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) 1163static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
1141{ 1164{
1142 __be32 *p; 1165 __be32 *p;
1166 struct nfs_client *clp;
1143 1167
1144 p = reserve_space(xdr, 4); 1168 p = reserve_space(xdr, 4);
1145 switch(arg->open_flags & O_EXCL) { 1169 switch(arg->open_flags & O_EXCL) {
@@ -1148,8 +1172,23 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
1148 encode_attrs(xdr, arg->u.attrs, arg->server); 1172 encode_attrs(xdr, arg->u.attrs, arg->server);
1149 break; 1173 break;
1150 default: 1174 default:
1151 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE); 1175 clp = arg->server->nfs_client;
1152 encode_nfs4_verifier(xdr, &arg->u.verifier); 1176 if (clp->cl_minorversion > 0) {
1177 if (nfs4_has_persistent_session(clp)) {
1178 *p = cpu_to_be32(NFS4_CREATE_GUARDED);
1179 encode_attrs(xdr, arg->u.attrs, arg->server);
1180 } else {
1181 struct iattr dummy;
1182
1183 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
1184 encode_nfs4_verifier(xdr, &arg->u.verifier);
1185 dummy.ia_valid = 0;
1186 encode_attrs(xdr, &dummy, arg->server);
1187 }
1188 } else {
1189 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
1190 encode_nfs4_verifier(xdr, &arg->u.verifier);
1191 }
1153 } 1192 }
1154} 1193}
1155 1194
@@ -1592,6 +1631,19 @@ static void encode_destroy_session(struct xdr_stream *xdr,
1592 hdr->nops++; 1631 hdr->nops++;
1593 hdr->replen += decode_destroy_session_maxsz; 1632 hdr->replen += decode_destroy_session_maxsz;
1594} 1633}
1634
1635static void encode_reclaim_complete(struct xdr_stream *xdr,
1636 struct nfs41_reclaim_complete_args *args,
1637 struct compound_hdr *hdr)
1638{
1639 __be32 *p;
1640
1641 p = reserve_space(xdr, 8);
1642 *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE);
1643 *p++ = cpu_to_be32(args->one_fs);
1644 hdr->nops++;
1645 hdr->replen += decode_reclaim_complete_maxsz;
1646}
1595#endif /* CONFIG_NFS_V4_1 */ 1647#endif /* CONFIG_NFS_V4_1 */
1596 1648
1597static void encode_sequence(struct xdr_stream *xdr, 1649static void encode_sequence(struct xdr_stream *xdr,
@@ -2096,7 +2148,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
2096 encode_compound_hdr(&xdr, req, &hdr); 2148 encode_compound_hdr(&xdr, req, &hdr);
2097 encode_sequence(&xdr, &args->seq_args, &hdr); 2149 encode_sequence(&xdr, &args->seq_args, &hdr);
2098 encode_putfh(&xdr, args->fh, &hdr); 2150 encode_putfh(&xdr, args->fh, &hdr);
2099 replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1; 2151 replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
2100 encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); 2152 encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
2101 2153
2102 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, 2154 xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
@@ -2420,6 +2472,26 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
2420 encode_nops(&hdr); 2472 encode_nops(&hdr);
2421 return 0; 2473 return 0;
2422} 2474}
2475
2476/*
2477 * a RECLAIM_COMPLETE request
2478 */
2479static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
2480 struct nfs41_reclaim_complete_args *args)
2481{
2482 struct xdr_stream xdr;
2483 struct compound_hdr hdr = {
2484 .minorversion = nfs4_xdr_minorversion(&args->seq_args)
2485 };
2486
2487 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
2488 encode_compound_hdr(&xdr, req, &hdr);
2489 encode_sequence(&xdr, &args->seq_args, &hdr);
2490 encode_reclaim_complete(&xdr, args, &hdr);
2491 encode_nops(&hdr);
2492 return 0;
2493}
2494
2423#endif /* CONFIG_NFS_V4_1 */ 2495#endif /* CONFIG_NFS_V4_1 */
2424 2496
2425static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) 2497static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -4528,6 +4600,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
4528{ 4600{
4529 return decode_op_hdr(xdr, OP_DESTROY_SESSION); 4601 return decode_op_hdr(xdr, OP_DESTROY_SESSION);
4530} 4602}
4603
4604static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
4605{
4606 return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
4607}
4531#endif /* CONFIG_NFS_V4_1 */ 4608#endif /* CONFIG_NFS_V4_1 */
4532 4609
4533static int decode_sequence(struct xdr_stream *xdr, 4610static int decode_sequence(struct xdr_stream *xdr,
@@ -4583,8 +4660,8 @@ static int decode_sequence(struct xdr_stream *xdr,
4583 dummy = be32_to_cpup(p++); 4660 dummy = be32_to_cpup(p++);
4584 /* target highest slot id - currently not processed */ 4661 /* target highest slot id - currently not processed */
4585 dummy = be32_to_cpup(p++); 4662 dummy = be32_to_cpup(p++);
4586 /* result flags - currently not processed */ 4663 /* result flags */
4587 dummy = be32_to_cpup(p); 4664 res->sr_status_flags = be32_to_cpup(p);
4588 status = 0; 4665 status = 0;
4589out_err: 4666out_err:
4590 res->sr_status = status; 4667 res->sr_status = status;
@@ -5309,7 +5386,7 @@ out:
5309} 5386}
5310 5387
5311/* 5388/*
5312 * FSINFO request 5389 * Decode FSINFO response
5313 */ 5390 */
5314static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, 5391static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
5315 struct nfs4_fsinfo_res *res) 5392 struct nfs4_fsinfo_res *res)
@@ -5330,7 +5407,7 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
5330} 5407}
5331 5408
5332/* 5409/*
5333 * PATHCONF request 5410 * Decode PATHCONF response
5334 */ 5411 */
5335static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, 5412static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
5336 struct nfs4_pathconf_res *res) 5413 struct nfs4_pathconf_res *res)
@@ -5351,7 +5428,7 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
5351} 5428}
5352 5429
5353/* 5430/*
5354 * STATFS request 5431 * Decode STATFS response
5355 */ 5432 */
5356static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, 5433static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
5357 struct nfs4_statfs_res *res) 5434 struct nfs4_statfs_res *res)
@@ -5372,7 +5449,7 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
5372} 5449}
5373 5450
5374/* 5451/*
5375 * GETATTR_BITMAP request 5452 * Decode GETATTR_BITMAP response
5376 */ 5453 */
5377static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res) 5454static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
5378{ 5455{
@@ -5411,7 +5488,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
5411} 5488}
5412 5489
5413/* 5490/*
5414 * a SETCLIENTID request 5491 * Decode SETCLIENTID response
5415 */ 5492 */
5416static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p, 5493static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
5417 struct nfs_client *clp) 5494 struct nfs_client *clp)
@@ -5428,7 +5505,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
5428} 5505}
5429 5506
5430/* 5507/*
5431 * a SETCLIENTID_CONFIRM request 5508 * Decode SETCLIENTID_CONFIRM response
5432 */ 5509 */
5433static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo) 5510static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
5434{ 5511{
@@ -5448,7 +5525,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
5448} 5525}
5449 5526
5450/* 5527/*
5451 * DELEGRETURN request 5528 * Decode DELEGRETURN response
5452 */ 5529 */
5453static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res) 5530static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
5454{ 5531{
@@ -5474,7 +5551,7 @@ out:
5474} 5551}
5475 5552
5476/* 5553/*
5477 * FS_LOCATIONS request 5554 * Decode FS_LOCATIONS response
5478 */ 5555 */
5479static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, 5556static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
5480 struct nfs4_fs_locations_res *res) 5557 struct nfs4_fs_locations_res *res)
@@ -5504,7 +5581,7 @@ out:
5504 5581
5505#if defined(CONFIG_NFS_V4_1) 5582#if defined(CONFIG_NFS_V4_1)
5506/* 5583/*
5507 * EXCHANGE_ID request 5584 * Decode EXCHANGE_ID response
5508 */ 5585 */
5509static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p, 5586static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
5510 void *res) 5587 void *res)
@@ -5521,7 +5598,7 @@ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
5521} 5598}
5522 5599
5523/* 5600/*
5524 * a CREATE_SESSION request 5601 * Decode CREATE_SESSION response
5525 */ 5602 */
5526static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p, 5603static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
5527 struct nfs41_create_session_res *res) 5604 struct nfs41_create_session_res *res)
@@ -5538,7 +5615,7 @@ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
5538} 5615}
5539 5616
5540/* 5617/*
5541 * a DESTROY_SESSION request 5618 * Decode DESTROY_SESSION response
5542 */ 5619 */
5543static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p, 5620static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
5544 void *dummy) 5621 void *dummy)
@@ -5555,7 +5632,7 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
5555} 5632}
5556 5633
5557/* 5634/*
5558 * a SEQUENCE request 5635 * Decode SEQUENCE response
5559 */ 5636 */
5560static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p, 5637static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
5561 struct nfs4_sequence_res *res) 5638 struct nfs4_sequence_res *res)
@@ -5572,7 +5649,7 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
5572} 5649}
5573 5650
5574/* 5651/*
5575 * a GET_LEASE_TIME request 5652 * Decode GET_LEASE_TIME response
5576 */ 5653 */
5577static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p, 5654static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
5578 struct nfs4_get_lease_time_res *res) 5655 struct nfs4_get_lease_time_res *res)
@@ -5591,6 +5668,25 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
5591 status = decode_fsinfo(&xdr, res->lr_fsinfo); 5668 status = decode_fsinfo(&xdr, res->lr_fsinfo);
5592 return status; 5669 return status;
5593} 5670}
5671
5672/*
5673 * Decode RECLAIM_COMPLETE response
5674 */
5675static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
5676 struct nfs41_reclaim_complete_res *res)
5677{
5678 struct xdr_stream xdr;
5679 struct compound_hdr hdr;
5680 int status;
5681
5682 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
5683 status = decode_compound_hdr(&xdr, &hdr);
5684 if (!status)
5685 status = decode_sequence(&xdr, &res->seq_res, rqstp);
5686 if (!status)
5687 status = decode_reclaim_complete(&xdr, (void *)NULL);
5688 return status;
5689}
5594#endif /* CONFIG_NFS_V4_1 */ 5690#endif /* CONFIG_NFS_V4_1 */
5595 5691
5596__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus) 5692__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
@@ -5767,6 +5863,7 @@ struct rpc_procinfo nfs4_procedures[] = {
5767 PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), 5863 PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
5768 PROC(SEQUENCE, enc_sequence, dec_sequence), 5864 PROC(SEQUENCE, enc_sequence, dec_sequence),
5769 PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), 5865 PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
5866 PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
5770#endif /* CONFIG_NFS_V4_1 */ 5867#endif /* CONFIG_NFS_V4_1 */
5771}; 5868};
5772 5869
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 12c9e66d3f1d..db9b360ae19d 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -356,25 +356,19 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
356 struct nfs_readres *resp = &data->res; 356 struct nfs_readres *resp = &data->res;
357 357
358 if (resp->eof || resp->count == argp->count) 358 if (resp->eof || resp->count == argp->count)
359 goto out; 359 return;
360 360
361 /* This is a short read! */ 361 /* This is a short read! */
362 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); 362 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
363 /* Has the server at least made some progress? */ 363 /* Has the server at least made some progress? */
364 if (resp->count == 0) 364 if (resp->count == 0)
365 goto out; 365 return;
366 366
367 /* Yes, so retry the read at the end of the data */ 367 /* Yes, so retry the read at the end of the data */
368 argp->offset += resp->count; 368 argp->offset += resp->count;
369 argp->pgbase += resp->count; 369 argp->pgbase += resp->count;
370 argp->count -= resp->count; 370 argp->count -= resp->count;
371 nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client); 371 nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
372 return;
373out:
374 nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
375 &data->res.seq_res);
376 return;
377
378} 372}
379 373
380/* 374/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 90be551b80c1..ce907efc5508 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -175,14 +175,16 @@ static const match_table_t nfs_mount_option_tokens = {
175}; 175};
176 176
177enum { 177enum {
178 Opt_xprt_udp, Opt_xprt_tcp, Opt_xprt_rdma, 178 Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
179 179
180 Opt_xprt_err 180 Opt_xprt_err
181}; 181};
182 182
183static const match_table_t nfs_xprt_protocol_tokens = { 183static const match_table_t nfs_xprt_protocol_tokens = {
184 { Opt_xprt_udp, "udp" }, 184 { Opt_xprt_udp, "udp" },
185 { Opt_xprt_udp6, "udp6" },
185 { Opt_xprt_tcp, "tcp" }, 186 { Opt_xprt_tcp, "tcp" },
187 { Opt_xprt_tcp6, "tcp6" },
186 { Opt_xprt_rdma, "rdma" }, 188 { Opt_xprt_rdma, "rdma" },
187 189
188 { Opt_xprt_err, NULL } 190 { Opt_xprt_err, NULL }
@@ -492,6 +494,45 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
492 return sec_flavours[i].str; 494 return sec_flavours[i].str;
493} 495}
494 496
497static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss,
498 int showdefaults)
499{
500 struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address;
501
502 seq_printf(m, ",mountproto=");
503 switch (sap->sa_family) {
504 case AF_INET:
505 switch (nfss->mountd_protocol) {
506 case IPPROTO_UDP:
507 seq_printf(m, RPCBIND_NETID_UDP);
508 break;
509 case IPPROTO_TCP:
510 seq_printf(m, RPCBIND_NETID_TCP);
511 break;
512 default:
513 if (showdefaults)
514 seq_printf(m, "auto");
515 }
516 break;
517 case AF_INET6:
518 switch (nfss->mountd_protocol) {
519 case IPPROTO_UDP:
520 seq_printf(m, RPCBIND_NETID_UDP6);
521 break;
522 case IPPROTO_TCP:
523 seq_printf(m, RPCBIND_NETID_TCP6);
524 break;
525 default:
526 if (showdefaults)
527 seq_printf(m, "auto");
528 }
529 break;
530 default:
531 if (showdefaults)
532 seq_printf(m, "auto");
533 }
534}
535
495static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, 536static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
496 int showdefaults) 537 int showdefaults)
497{ 538{
@@ -505,7 +546,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
505 } 546 }
506 case AF_INET6: { 547 case AF_INET6: {
507 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 548 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
508 seq_printf(m, ",mountaddr=%pI6", &sin6->sin6_addr); 549 seq_printf(m, ",mountaddr=%pI6c", &sin6->sin6_addr);
509 break; 550 break;
510 } 551 }
511 default: 552 default:
@@ -518,17 +559,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
518 if (nfss->mountd_port || showdefaults) 559 if (nfss->mountd_port || showdefaults)
519 seq_printf(m, ",mountport=%u", nfss->mountd_port); 560 seq_printf(m, ",mountport=%u", nfss->mountd_port);
520 561
521 switch (nfss->mountd_protocol) { 562 nfs_show_mountd_netid(m, nfss, showdefaults);
522 case IPPROTO_UDP:
523 seq_printf(m, ",mountproto=udp");
524 break;
525 case IPPROTO_TCP:
526 seq_printf(m, ",mountproto=tcp");
527 break;
528 default:
529 if (showdefaults)
530 seq_printf(m, ",mountproto=auto");
531 }
532} 563}
533 564
534/* 565/*
@@ -578,7 +609,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
578 seq_puts(m, nfs_infop->nostr); 609 seq_puts(m, nfs_infop->nostr);
579 } 610 }
580 seq_printf(m, ",proto=%s", 611 seq_printf(m, ",proto=%s",
581 rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO)); 612 rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID));
582 if (version == 4) { 613 if (version == 4) {
583 if (nfss->port != NFS_PORT) 614 if (nfss->port != NFS_PORT)
584 seq_printf(m, ",port=%u", nfss->port); 615 seq_printf(m, ",port=%u", nfss->port);
@@ -714,8 +745,6 @@ static void nfs_umount_begin(struct super_block *sb)
714 struct nfs_server *server; 745 struct nfs_server *server;
715 struct rpc_clnt *rpc; 746 struct rpc_clnt *rpc;
716 747
717 lock_kernel();
718
719 server = NFS_SB(sb); 748 server = NFS_SB(sb);
720 /* -EIO all pending I/O */ 749 /* -EIO all pending I/O */
721 rpc = server->client_acl; 750 rpc = server->client_acl;
@@ -724,8 +753,6 @@ static void nfs_umount_begin(struct super_block *sb)
724 rpc = server->client; 753 rpc = server->client;
725 if (!IS_ERR(rpc)) 754 if (!IS_ERR(rpc))
726 rpc_killall_tasks(rpc); 755 rpc_killall_tasks(rpc);
727
728 unlock_kernel();
729} 756}
730 757
731static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version) 758static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
@@ -734,8 +761,6 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
734 761
735 data = kzalloc(sizeof(*data), GFP_KERNEL); 762 data = kzalloc(sizeof(*data), GFP_KERNEL);
736 if (data) { 763 if (data) {
737 data->rsize = NFS_MAX_FILE_IO_SIZE;
738 data->wsize = NFS_MAX_FILE_IO_SIZE;
739 data->acregmin = NFS_DEF_ACREGMIN; 764 data->acregmin = NFS_DEF_ACREGMIN;
740 data->acregmax = NFS_DEF_ACREGMAX; 765 data->acregmax = NFS_DEF_ACREGMAX;
741 data->acdirmin = NFS_DEF_ACDIRMIN; 766 data->acdirmin = NFS_DEF_ACDIRMIN;
@@ -887,6 +912,8 @@ static int nfs_parse_mount_options(char *raw,
887{ 912{
888 char *p, *string, *secdata; 913 char *p, *string, *secdata;
889 int rc, sloppy = 0, invalid_option = 0; 914 int rc, sloppy = 0, invalid_option = 0;
915 unsigned short protofamily = AF_UNSPEC;
916 unsigned short mountfamily = AF_UNSPEC;
890 917
891 if (!raw) { 918 if (!raw) {
892 dfprintk(MOUNT, "NFS: mount options string was NULL.\n"); 919 dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -1232,12 +1259,17 @@ static int nfs_parse_mount_options(char *raw,
1232 token = match_token(string, 1259 token = match_token(string,
1233 nfs_xprt_protocol_tokens, args); 1260 nfs_xprt_protocol_tokens, args);
1234 1261
1262 protofamily = AF_INET;
1235 switch (token) { 1263 switch (token) {
1264 case Opt_xprt_udp6:
1265 protofamily = AF_INET6;
1236 case Opt_xprt_udp: 1266 case Opt_xprt_udp:
1237 mnt->flags &= ~NFS_MOUNT_TCP; 1267 mnt->flags &= ~NFS_MOUNT_TCP;
1238 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; 1268 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
1239 kfree(string); 1269 kfree(string);
1240 break; 1270 break;
1271 case Opt_xprt_tcp6:
1272 protofamily = AF_INET6;
1241 case Opt_xprt_tcp: 1273 case Opt_xprt_tcp:
1242 mnt->flags |= NFS_MOUNT_TCP; 1274 mnt->flags |= NFS_MOUNT_TCP;
1243 mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP; 1275 mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
@@ -1265,10 +1297,15 @@ static int nfs_parse_mount_options(char *raw,
1265 nfs_xprt_protocol_tokens, args); 1297 nfs_xprt_protocol_tokens, args);
1266 kfree(string); 1298 kfree(string);
1267 1299
1300 mountfamily = AF_INET;
1268 switch (token) { 1301 switch (token) {
1302 case Opt_xprt_udp6:
1303 mountfamily = AF_INET6;
1269 case Opt_xprt_udp: 1304 case Opt_xprt_udp:
1270 mnt->mount_server.protocol = XPRT_TRANSPORT_UDP; 1305 mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
1271 break; 1306 break;
1307 case Opt_xprt_tcp6:
1308 mountfamily = AF_INET6;
1272 case Opt_xprt_tcp: 1309 case Opt_xprt_tcp:
1273 mnt->mount_server.protocol = XPRT_TRANSPORT_TCP; 1310 mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
1274 break; 1311 break;
@@ -1367,8 +1404,33 @@ static int nfs_parse_mount_options(char *raw,
1367 if (!sloppy && invalid_option) 1404 if (!sloppy && invalid_option)
1368 return 0; 1405 return 0;
1369 1406
1407 /*
1408 * verify that any proto=/mountproto= options match the address
1409 * familiies in the addr=/mountaddr= options.
1410 */
1411 if (protofamily != AF_UNSPEC &&
1412 protofamily != mnt->nfs_server.address.ss_family)
1413 goto out_proto_mismatch;
1414
1415 if (mountfamily != AF_UNSPEC) {
1416 if (mnt->mount_server.addrlen) {
1417 if (mountfamily != mnt->mount_server.address.ss_family)
1418 goto out_mountproto_mismatch;
1419 } else {
1420 if (mountfamily != mnt->nfs_server.address.ss_family)
1421 goto out_mountproto_mismatch;
1422 }
1423 }
1424
1370 return 1; 1425 return 1;
1371 1426
1427out_mountproto_mismatch:
1428 printk(KERN_INFO "NFS: mount server address does not match mountproto= "
1429 "option\n");
1430 return 0;
1431out_proto_mismatch:
1432 printk(KERN_INFO "NFS: server address does not match proto= option\n");
1433 return 0;
1372out_invalid_address: 1434out_invalid_address:
1373 printk(KERN_INFO "NFS: bad IP address specified: %s\n", p); 1435 printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
1374 return 0; 1436 return 0;
@@ -1881,7 +1943,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
1881 if (data == NULL) 1943 if (data == NULL)
1882 return -ENOMEM; 1944 return -ENOMEM;
1883 1945
1884 lock_kernel();
1885 /* fill out struct with values from existing mount */ 1946 /* fill out struct with values from existing mount */
1886 data->flags = nfss->flags; 1947 data->flags = nfss->flags;
1887 data->rsize = nfss->rsize; 1948 data->rsize = nfss->rsize;
@@ -1907,7 +1968,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
1907 error = nfs_compare_remount_data(nfss, data); 1968 error = nfs_compare_remount_data(nfss, data);
1908out: 1969out:
1909 kfree(data); 1970 kfree(data);
1910 unlock_kernel();
1911 return error; 1971 return error;
1912} 1972}
1913 1973
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1064c91ae810..6da3d3ff6edd 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -83,7 +83,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
83 struct inode *dir = data->dir; 83 struct inode *dir = data->dir;
84 84
85 if (!NFS_PROTO(dir)->unlink_done(task, dir)) 85 if (!NFS_PROTO(dir)->unlink_done(task, dir))
86 nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client); 86 nfs_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
87} 87}
88 88
89/** 89/**
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b1ce2ea9b93b..d171696017f4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1216,7 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1216 */ 1216 */
1217 argp->stable = NFS_FILE_SYNC; 1217 argp->stable = NFS_FILE_SYNC;
1218 } 1218 }
1219 nfs4_restart_rpc(task, server->nfs_client); 1219 nfs_restart_rpc(task, server->nfs_client);
1220 return -EAGAIN; 1220 return -EAGAIN;
1221 } 1221 }
1222 if (time_before(complain, jiffies)) { 1222 if (time_before(complain, jiffies)) {
@@ -1228,7 +1228,6 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1228 /* Can't do anything about it except throw an error. */ 1228 /* Can't do anything about it except throw an error. */
1229 task->tk_status = -EIO; 1229 task->tk_status = -EIO;
1230 } 1230 }
1231 nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
1232 return 0; 1231 return 0;
1233} 1232}
1234 1233
@@ -1612,15 +1611,16 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1612 if (ret) 1611 if (ret)
1613 goto out_unlock; 1612 goto out_unlock;
1614 page_cache_get(newpage); 1613 page_cache_get(newpage);
1614 spin_lock(&mapping->host->i_lock);
1615 req->wb_page = newpage; 1615 req->wb_page = newpage;
1616 SetPagePrivate(newpage); 1616 SetPagePrivate(newpage);
1617 set_page_private(newpage, page_private(page)); 1617 set_page_private(newpage, (unsigned long)req);
1618 ClearPagePrivate(page); 1618 ClearPagePrivate(page);
1619 set_page_private(page, 0); 1619 set_page_private(page, 0);
1620 spin_unlock(&mapping->host->i_lock);
1620 page_cache_release(page); 1621 page_cache_release(page);
1621out_unlock: 1622out_unlock:
1622 nfs_clear_page_tag_locked(req); 1623 nfs_clear_page_tag_locked(req);
1623 nfs_release_request(req);
1624out: 1624out:
1625 return ret; 1625 return ret;
1626} 1626}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 66a888a9ad6f..bfffd6334abb 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -2389,12 +2389,12 @@ xfs_icsb_modify_counters(
2389{ 2389{
2390 xfs_icsb_cnts_t *icsbp; 2390 xfs_icsb_cnts_t *icsbp;
2391 long long lcounter; /* long counter for 64 bit fields */ 2391 long long lcounter; /* long counter for 64 bit fields */
2392 int cpu, ret = 0; 2392 int ret = 0;
2393 2393
2394 might_sleep(); 2394 might_sleep();
2395again: 2395again:
2396 cpu = get_cpu(); 2396 preempt_disable();
2397 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); 2397 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2398 2398
2399 /* 2399 /*
2400 * if the counter is disabled, go to slow path 2400 * if the counter is disabled, go to slow path
@@ -2438,11 +2438,11 @@ again:
2438 break; 2438 break;
2439 } 2439 }
2440 xfs_icsb_unlock_cntr(icsbp); 2440 xfs_icsb_unlock_cntr(icsbp);
2441 put_cpu(); 2441 preempt_enable();
2442 return 0; 2442 return 0;
2443 2443
2444slow_path: 2444slow_path:
2445 put_cpu(); 2445 preempt_enable();
2446 2446
2447 /* 2447 /*
2448 * serialise with a mutex so we don't burn lots of cpu on 2448 * serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2490,7 @@ slow_path:
2490 2490
2491balance_counter: 2491balance_counter:
2492 xfs_icsb_unlock_cntr(icsbp); 2492 xfs_icsb_unlock_cntr(icsbp);
2493 put_cpu(); 2493 preempt_enable();
2494 2494
2495 /* 2495 /*
2496 * We may have multiple threads here if multiple per-cpu 2496 * We may have multiple threads here if multiple per-cpu
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1c..8087b90d4673 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
56#define __raw_get_cpu_var(var) \ 56#define __raw_get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
58 58
59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61
59 62
60#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 63#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
61extern void setup_per_cpu_areas(void); 64extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
66#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 69#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
67#define __get_cpu_var(var) per_cpu_var(var) 70#define __get_cpu_var(var) per_cpu_var(var)
68#define __raw_get_cpu_var(var) per_cpu_var(var) 71#define __raw_get_cpu_var(var) per_cpu_var(var)
72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
69 74
70#endif /* SMP */ 75#endif /* SMP */
71 76
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c060208109..9b8299af3741 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
128#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 128#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
129#define SEQ4_STATUS_LEASE_MOVED 0x00000080 129#define SEQ4_STATUS_LEASE_MOVED 0x00000080
130#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 130#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
131#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
132#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
131 133
132#define NFS4_MAX_UINT64 (~(u64)0) 134#define NFS4_MAX_UINT64 (~(u64)0)
133 135
@@ -528,6 +530,7 @@ enum {
528 NFSPROC4_CLNT_DESTROY_SESSION, 530 NFSPROC4_CLNT_DESTROY_SESSION,
529 NFSPROC4_CLNT_SEQUENCE, 531 NFSPROC4_CLNT_SEQUENCE,
530 NFSPROC4_CLNT_GET_LEASE_TIME, 532 NFSPROC4_CLNT_GET_LEASE_TIME,
533 NFSPROC4_CLNT_RECLAIM_COMPLETE,
531}; 534};
532 535
533/* nfs41 types */ 536/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3b..34fc6be5bfcf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
209 unsigned long session_state; 209 unsigned long session_state;
210 u32 hash_alg; 210 u32 hash_alg;
211 u32 ssv_len; 211 u32 ssv_len;
212 struct completion complete;
212 213
213 /* The fore and back channel */ 214 /* The fore and back channel */
214 struct nfs4_channel_attrs fc_attrs; 215 struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c8..51071b335751 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -170,8 +170,9 @@ struct nfs4_sequence_args {
170struct nfs4_sequence_res { 170struct nfs4_sequence_res {
171 struct nfs4_session *sr_session; 171 struct nfs4_session *sr_session;
172 u8 sr_slotid; /* slot used to send request */ 172 u8 sr_slotid; /* slot used to send request */
173 unsigned long sr_renewal_time;
174 int sr_status; /* sequence operation status */ 173 int sr_status; /* sequence operation status */
174 unsigned long sr_renewal_time;
175 u32 sr_status_flags;
175}; 176};
176 177
177struct nfs4_get_lease_time_args { 178struct nfs4_get_lease_time_args {
@@ -938,6 +939,16 @@ struct nfs41_create_session_args {
938struct nfs41_create_session_res { 939struct nfs41_create_session_res {
939 struct nfs_client *client; 940 struct nfs_client *client;
940}; 941};
942
943struct nfs41_reclaim_complete_args {
944 /* In the future extend to include curr_fh for use with migration */
945 unsigned char one_fs:1;
946 struct nfs4_sequence_args seq_args;
947};
948
949struct nfs41_reclaim_complete_res {
950 struct nfs4_sequence_res seq_res;
951};
941#endif /* CONFIG_NFS_V4_1 */ 952#endif /* CONFIG_NFS_V4_1 */
942 953
943struct nfs_page; 954struct nfs_page;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd4..5a5d6ce4bd55 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
60 60
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 61#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
63 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
65 __typeof__(type) per_cpu__##name 66 __typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..cf5efbcf716c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38
39/* minimum unit size, also is the maximum supported allocation size */ 37/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 38#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
41 39
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 128#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131 129
132extern void *__alloc_reserved_percpu(size_t size, size_t align); 130extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
134#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
135
136struct percpu_data {
137 void *ptrs[1];
138};
139
140/* pointer disguising messes up the kmemleak objects tracking */
141#ifndef CONFIG_DEBUG_KMEMLEAK
142#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
143#else
144#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
145#endif
146
147#define per_cpu_ptr(ptr, cpu) \
148({ \
149 struct percpu_data *__p = __percpu_disguise(ptr); \
150 (__typeof__(ptr))__p->ptrs[(cpu)]; \
151})
152
153#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
154
155extern void *__alloc_percpu(size_t size, size_t align); 131extern void *__alloc_percpu(size_t size, size_t align);
156extern void free_percpu(void *__pdata); 132extern void free_percpu(void *__pdata);
133extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
157 134
158#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 135#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
159extern void __init setup_per_cpu_areas(void); 136extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
179 kfree(p); 156 kfree(p);
180} 157}
181 158
159static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
160{
161 return __pa(addr);
162}
163
182static inline void __init setup_per_cpu_areas(void) { } 164static inline void __init setup_per_cpu_areas(void) { }
183 165
184static inline void *pcpu_lpage_remapped(void *kaddr) 166static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
188 170
189#endif /* CONFIG_SMP */ 171#endif /* CONFIG_SMP */
190 172
191#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 173#define alloc_percpu(type) \
192 __alignof__(type)) 174 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
193 175
194/* 176/*
195 * Optional methods for optimized non-lvalue per-cpu variable access. 177 * Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
243# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) 225# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
244#endif 226#endif
245 227
228/*
229 * Branching function to split up a function into a set of functions that
230 * are called for different scalar sizes of the objects handled.
231 */
232
233extern void __bad_size_call_parameter(void);
234
235#define __pcpu_size_call_return(stem, variable) \
236({ typeof(variable) pscr_ret__; \
237 switch(sizeof(variable)) { \
238 case 1: pscr_ret__ = stem##1(variable);break; \
239 case 2: pscr_ret__ = stem##2(variable);break; \
240 case 4: pscr_ret__ = stem##4(variable);break; \
241 case 8: pscr_ret__ = stem##8(variable);break; \
242 default: \
243 __bad_size_call_parameter();break; \
244 } \
245 pscr_ret__; \
246})
247
248#define __pcpu_size_call(stem, variable, ...) \
249do { \
250 switch(sizeof(variable)) { \
251 case 1: stem##1(variable, __VA_ARGS__);break; \
252 case 2: stem##2(variable, __VA_ARGS__);break; \
253 case 4: stem##4(variable, __VA_ARGS__);break; \
254 case 8: stem##8(variable, __VA_ARGS__);break; \
255 default: \
256 __bad_size_call_parameter();break; \
257 } \
258} while (0)
259
260/*
261 * Optimized manipulation for memory allocated through the per cpu
262 * allocator or for addresses of per cpu variables (can be determined
263 * using per_cpu_var(xx).
264 *
265 * These operation guarantee exclusivity of access for other operations
266 * on the *same* processor. The assumption is that per cpu data is only
267 * accessed by a single processor instance (the current one).
268 *
269 * The first group is used for accesses that must be done in a
270 * preemption safe way since we know that the context is not preempt
271 * safe. Interrupts may occur. If the interrupt modifies the variable
272 * too then RMW actions will not be reliable.
273 *
274 * The arch code can provide optimized functions in two ways:
275 *
276 * 1. Override the function completely. F.e. define this_cpu_add().
277 * The arch must then ensure that the various scalar format passed
278 * are handled correctly.
279 *
280 * 2. Provide functions for certain scalar sizes. F.e. provide
281 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
282 * sized RMW actions. If arch code does not provide operations for
283 * a scalar size then the fallback in the generic code will be
284 * used.
285 */
286
287#define _this_cpu_generic_read(pcp) \
288({ typeof(pcp) ret__; \
289 preempt_disable(); \
290 ret__ = *this_cpu_ptr(&(pcp)); \
291 preempt_enable(); \
292 ret__; \
293})
294
295#ifndef this_cpu_read
296# ifndef this_cpu_read_1
297# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
298# endif
299# ifndef this_cpu_read_2
300# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
301# endif
302# ifndef this_cpu_read_4
303# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
304# endif
305# ifndef this_cpu_read_8
306# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
307# endif
308# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
309#endif
310
311#define _this_cpu_generic_to_op(pcp, val, op) \
312do { \
313 preempt_disable(); \
314 *__this_cpu_ptr(&pcp) op val; \
315 preempt_enable(); \
316} while (0)
317
318#ifndef this_cpu_write
319# ifndef this_cpu_write_1
320# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321# endif
322# ifndef this_cpu_write_2
323# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
324# endif
325# ifndef this_cpu_write_4
326# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
327# endif
328# ifndef this_cpu_write_8
329# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
330# endif
331# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
332#endif
333
334#ifndef this_cpu_add
335# ifndef this_cpu_add_1
336# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337# endif
338# ifndef this_cpu_add_2
339# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
340# endif
341# ifndef this_cpu_add_4
342# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
343# endif
344# ifndef this_cpu_add_8
345# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
346# endif
347# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
348#endif
349
350#ifndef this_cpu_sub
351# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
352#endif
353
354#ifndef this_cpu_inc
355# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
356#endif
357
358#ifndef this_cpu_dec
359# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
360#endif
361
362#ifndef this_cpu_and
363# ifndef this_cpu_and_1
364# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365# endif
366# ifndef this_cpu_and_2
367# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
368# endif
369# ifndef this_cpu_and_4
370# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
371# endif
372# ifndef this_cpu_and_8
373# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
374# endif
375# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
376#endif
377
378#ifndef this_cpu_or
379# ifndef this_cpu_or_1
380# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381# endif
382# ifndef this_cpu_or_2
383# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
384# endif
385# ifndef this_cpu_or_4
386# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
387# endif
388# ifndef this_cpu_or_8
389# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
390# endif
391# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
392#endif
393
394#ifndef this_cpu_xor
395# ifndef this_cpu_xor_1
396# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
397# endif
398# ifndef this_cpu_xor_2
399# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
400# endif
401# ifndef this_cpu_xor_4
402# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
403# endif
404# ifndef this_cpu_xor_8
405# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
406# endif
407# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
408#endif
409
410/*
411 * Generic percpu operations that do not require preemption handling.
412 * Either we do not care about races or the caller has the
413 * responsibility of handling preemptions issues. Arch code can still
414 * override these instructions since the arch per cpu code may be more
415 * efficient and may actually get race freeness for free (that is the
416 * case for x86 for example).
417 *
418 * If there is no other protection through preempt disable and/or
419 * disabling interupts then one of these RMW operations can show unexpected
420 * behavior because the execution thread was rescheduled on another processor
421 * or an interrupt occurred and the same percpu variable was modified from
422 * the interrupt context.
423 */
424#ifndef __this_cpu_read
425# ifndef __this_cpu_read_1
426# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
427# endif
428# ifndef __this_cpu_read_2
429# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
430# endif
431# ifndef __this_cpu_read_4
432# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
433# endif
434# ifndef __this_cpu_read_8
435# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
436# endif
437# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
438#endif
439
440#define __this_cpu_generic_to_op(pcp, val, op) \
441do { \
442 *__this_cpu_ptr(&(pcp)) op val; \
443} while (0)
444
445#ifndef __this_cpu_write
446# ifndef __this_cpu_write_1
447# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
448# endif
449# ifndef __this_cpu_write_2
450# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
451# endif
452# ifndef __this_cpu_write_4
453# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
454# endif
455# ifndef __this_cpu_write_8
456# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
457# endif
458# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
459#endif
460
461#ifndef __this_cpu_add
462# ifndef __this_cpu_add_1
463# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
464# endif
465# ifndef __this_cpu_add_2
466# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
467# endif
468# ifndef __this_cpu_add_4
469# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
470# endif
471# ifndef __this_cpu_add_8
472# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
473# endif
474# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
475#endif
476
477#ifndef __this_cpu_sub
478# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
479#endif
480
481#ifndef __this_cpu_inc
482# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
483#endif
484
485#ifndef __this_cpu_dec
486# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
487#endif
488
489#ifndef __this_cpu_and
490# ifndef __this_cpu_and_1
491# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
492# endif
493# ifndef __this_cpu_and_2
494# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
495# endif
496# ifndef __this_cpu_and_4
497# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
498# endif
499# ifndef __this_cpu_and_8
500# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
501# endif
502# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
503#endif
504
505#ifndef __this_cpu_or
506# ifndef __this_cpu_or_1
507# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
508# endif
509# ifndef __this_cpu_or_2
510# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
511# endif
512# ifndef __this_cpu_or_4
513# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
514# endif
515# ifndef __this_cpu_or_8
516# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
517# endif
518# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
519#endif
520
521#ifndef __this_cpu_xor
522# ifndef __this_cpu_xor_1
523# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
524# endif
525# ifndef __this_cpu_xor_2
526# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
527# endif
528# ifndef __this_cpu_xor_4
529# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
530# endif
531# ifndef __this_cpu_xor_8
532# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
533# endif
534# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
535#endif
536
537/*
538 * IRQ safe versions of the per cpu RMW operations. Note that these operations
539 * are *not* safe against modification of the same variable from another
540 * processors (which one gets when using regular atomic operations)
541 . They are guaranteed to be atomic vs. local interrupts and
542 * preemption only.
543 */
544#define irqsafe_cpu_generic_to_op(pcp, val, op) \
545do { \
546 unsigned long flags; \
547 local_irq_save(flags); \
548 *__this_cpu_ptr(&(pcp)) op val; \
549 local_irq_restore(flags); \
550} while (0)
551
552#ifndef irqsafe_cpu_add
553# ifndef irqsafe_cpu_add_1
554# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
555# endif
556# ifndef irqsafe_cpu_add_2
557# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
558# endif
559# ifndef irqsafe_cpu_add_4
560# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
561# endif
562# ifndef irqsafe_cpu_add_8
563# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
564# endif
565# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
566#endif
567
568#ifndef irqsafe_cpu_sub
569# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
570#endif
571
572#ifndef irqsafe_cpu_inc
573# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
574#endif
575
576#ifndef irqsafe_cpu_dec
577# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
578#endif
579
580#ifndef irqsafe_cpu_and
581# ifndef irqsafe_cpu_and_1
582# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
583# endif
584# ifndef irqsafe_cpu_and_2
585# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
586# endif
587# ifndef irqsafe_cpu_and_4
588# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
589# endif
590# ifndef irqsafe_cpu_and_8
591# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
592# endif
593# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
594#endif
595
596#ifndef irqsafe_cpu_or
597# ifndef irqsafe_cpu_or_1
598# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
599# endif
600# ifndef irqsafe_cpu_or_2
601# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
602# endif
603# ifndef irqsafe_cpu_or_4
604# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
605# endif
606# ifndef irqsafe_cpu_or_8
607# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
608# endif
609# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
610#endif
611
612#ifndef irqsafe_cpu_xor
613# ifndef irqsafe_cpu_xor_1
614# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
615# endif
616# ifndef irqsafe_cpu_xor_2
617# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
618# endif
619# ifndef irqsafe_cpu_xor_4
620# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
621# endif
622# ifndef irqsafe_cpu_xor_8
623# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
624# endif
625# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
626#endif
627
246#endif /* __LINUX_PERCPU_H */ 628#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc0..1906782ec86b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
130#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 130#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
131#define RPC_TASK_KILLED 0x0100 /* task was killed */ 131#define RPC_TASK_KILLED 0x0100 /* task was killed */
132#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ 132#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
133#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
133 134
134#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 135#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
135#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 136#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
136#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) 137#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
137#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) 138#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
138#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) 139#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
140#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
139 141
140#define RPC_TASK_RUNNING 0 142#define RPC_TASK_RUNNING 0
141#define RPC_TASK_QUEUED 1 143#define RPC_TASK_QUEUED 1
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a8..d85889710f9b 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
76 76
77static inline void __count_vm_event(enum vm_event_item item) 77static inline void __count_vm_event(enum vm_event_item item)
78{ 78{
79 __get_cpu_var(vm_event_states).event[item]++; 79 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
80} 80}
81 81
82static inline void count_vm_event(enum vm_event_item item) 82static inline void count_vm_event(enum vm_event_item item)
83{ 83{
84 get_cpu_var(vm_event_states).event[item]++; 84 this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
85 put_cpu();
86} 85}
87 86
88static inline void __count_vm_events(enum vm_event_item item, long delta) 87static inline void __count_vm_events(enum vm_event_item item, long delta)
89{ 88{
90 __get_cpu_var(vm_event_states).event[item] += delta; 89 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
91} 90}
92 91
93static inline void count_vm_events(enum vm_event_item item, long delta) 92static inline void count_vm_events(enum vm_event_item item, long delta)
94{ 93{
95 get_cpu_var(vm_event_states).event[item] += delta; 94 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
96 put_cpu();
97} 95}
98 96
99extern void all_vm_events(unsigned long *); 97extern void all_vm_events(unsigned long *);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0302f31a2fb7..b0173202cad9 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -88,12 +88,7 @@ struct neigh_statistics {
88 unsigned long unres_discards; /* number of unresolved drops */ 88 unsigned long unres_discards; /* number of unresolved drops */
89}; 89};
90 90
91#define NEIGH_CACHE_STAT_INC(tbl, field) \ 91#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
92 do { \
93 preempt_disable(); \
94 (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
95 preempt_enable(); \
96 } while (0)
97 92
98struct neighbour { 93struct neighbour {
99 struct neighbour *next; 94 struct neighbour *next;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5cf7270e3ffc..a0904adfb8f7 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
293extern unsigned int nf_conntrack_max; 293extern unsigned int nf_conntrack_max;
294 294
295#define NF_CT_STAT_INC(net, count) \ 295#define NF_CT_STAT_INC(net, count) \
296 (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++) 296 __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) \ 297#define NF_CT_STAT_INC_ATOMIC(net, count) \
298do { \ 298do { \
299 local_bh_disable(); \ 299 local_bh_disable(); \
300 per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \ 300 __this_cpu_inc((net)->ct.stat->count); \
301 local_bh_enable(); \ 301 local_bh_enable(); \
302} while (0) 302} while (0)
303 303
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8c842e06bec8..f0d756f2ac99 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
136#define SNMP_STAT_BHPTR(name) (name[0]) 136#define SNMP_STAT_BHPTR(name) (name[0])
137#define SNMP_STAT_USRPTR(name) (name[1]) 137#define SNMP_STAT_USRPTR(name) (name[1])
138 138
139#define SNMP_INC_STATS_BH(mib, field) \ 139#define SNMP_INC_STATS_BH(mib, field) \
140 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) 140 __this_cpu_inc(mib[0]->mibs[field])
141#define SNMP_INC_STATS_USER(mib, field) \ 141#define SNMP_INC_STATS_USER(mib, field) \
142 do { \ 142 this_cpu_inc(mib[1]->mibs[field])
143 per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ 143#define SNMP_INC_STATS(mib, field) \
144 put_cpu(); \ 144 this_cpu_inc(mib[!in_softirq()]->mibs[field])
145 } while (0) 145#define SNMP_DEC_STATS(mib, field) \
146#define SNMP_INC_STATS(mib, field) \ 146 this_cpu_dec(mib[!in_softirq()]->mibs[field])
147 do { \ 147#define SNMP_ADD_STATS_BH(mib, field, addend) \
148 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ 148 __this_cpu_add(mib[0]->mibs[field], addend)
149 put_cpu(); \ 149#define SNMP_ADD_STATS_USER(mib, field, addend) \
150 } while (0) 150 this_cpu_add(mib[1]->mibs[field], addend)
151#define SNMP_DEC_STATS(mib, field) \
152 do { \
153 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
154 put_cpu(); \
155 } while (0)
156#define SNMP_ADD_STATS(mib, field, addend) \
157 do { \
158 per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
159 put_cpu(); \
160 } while (0)
161#define SNMP_ADD_STATS_BH(mib, field, addend) \
162 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
163#define SNMP_ADD_STATS_USER(mib, field, addend) \
164 do { \
165 per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
166 put_cpu(); \
167 } while (0)
168#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 151#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
169 do { \ 152 do { \
170 __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\ 153 __typeof__(mib[0]) ptr; \
154 preempt_disable(); \
155 ptr = this_cpu_ptr((mib)[!in_softirq()]); \
171 ptr->mibs[basefield##PKTS]++; \ 156 ptr->mibs[basefield##PKTS]++; \
172 ptr->mibs[basefield##OCTETS] += addend;\ 157 ptr->mibs[basefield##OCTETS] += addend;\
173 put_cpu(); \ 158 preempt_enable(); \
174 } while (0) 159 } while (0)
175#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 160#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
176 do { \ 161 do { \
177 __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\ 162 __typeof__(mib[0]) ptr = \
163 __this_cpu_ptr((mib)[!in_softirq()]); \
178 ptr->mibs[basefield##PKTS]++; \ 164 ptr->mibs[basefield##PKTS]++; \
179 ptr->mibs[basefield##OCTETS] += addend;\ 165 ptr->mibs[basefield##OCTETS] += addend;\
180 } while (0) 166 } while (0)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 4f8df01dbe51..429540c70d3f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
140} 140}
141 141
142#ifdef CONFIG_LOCK_STAT 142#ifdef CONFIG_LOCK_STAT
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
144 cpu_lock_stats);
144 145
145static inline u64 lockstat_clock(void) 146static inline u64 lockstat_clock(void)
146{ 147{
@@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
198 memset(&stats, 0, sizeof(struct lock_class_stats)); 199 memset(&stats, 0, sizeof(struct lock_class_stats));
199 for_each_possible_cpu(cpu) { 200 for_each_possible_cpu(cpu) {
200 struct lock_class_stats *pcs = 201 struct lock_class_stats *pcs =
201 &per_cpu(lock_stats, cpu)[class - lock_classes]; 202 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
202 203
203 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 204 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
204 stats.contention_point[i] += pcs->contention_point[i]; 205 stats.contention_point[i] += pcs->contention_point[i];
@@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
225 226
226 for_each_possible_cpu(cpu) { 227 for_each_possible_cpu(cpu) {
227 struct lock_class_stats *cpu_stats = 228 struct lock_class_stats *cpu_stats =
228 &per_cpu(lock_stats, cpu)[class - lock_classes]; 229 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
229 230
230 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 231 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
231 } 232 }
@@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
235 236
236static struct lock_class_stats *get_lock_stats(struct lock_class *class) 237static struct lock_class_stats *get_lock_stats(struct lock_class *class)
237{ 238{
238 return &get_cpu_var(lock_stats)[class - lock_classes]; 239 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
239} 240}
240 241
241static void put_lock_stats(struct lock_class_stats *stats) 242static void put_lock_stats(struct lock_class_stats *stats)
242{ 243{
243 put_cpu_var(lock_stats); 244 put_cpu_var(cpu_lock_stats);
244} 245}
245 246
246static void lock_release_holdtime(struct held_lock *hlock) 247static void lock_release_holdtime(struct held_lock *hlock)
diff --git a/kernel/module.c b/kernel/module.c
index 5842a71cf052..12afc5a3ddd3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
370 370
371#ifdef CONFIG_SMP 371#ifdef CONFIG_SMP
372 372
373#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
374
375static void *percpu_modalloc(unsigned long size, unsigned long align, 373static void *percpu_modalloc(unsigned long size, unsigned long align,
376 const char *name) 374 const char *name)
377{ 375{
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
395 free_percpu(freeme); 393 free_percpu(freeme);
396} 394}
397 395
398#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
399
400/* Number of blocks used and allocated. */
401static unsigned int pcpu_num_used, pcpu_num_allocated;
402/* Size of each block. -ve means used. */
403static int *pcpu_size;
404
405static int split_block(unsigned int i, unsigned short size)
406{
407 /* Reallocation required? */
408 if (pcpu_num_used + 1 > pcpu_num_allocated) {
409 int *new;
410
411 new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
412 GFP_KERNEL);
413 if (!new)
414 return 0;
415
416 pcpu_num_allocated *= 2;
417 pcpu_size = new;
418 }
419
420 /* Insert a new subblock */
421 memmove(&pcpu_size[i+1], &pcpu_size[i],
422 sizeof(pcpu_size[0]) * (pcpu_num_used - i));
423 pcpu_num_used++;
424
425 pcpu_size[i+1] -= size;
426 pcpu_size[i] = size;
427 return 1;
428}
429
430static inline unsigned int block_size(int val)
431{
432 if (val < 0)
433 return -val;
434 return val;
435}
436
437static void *percpu_modalloc(unsigned long size, unsigned long align,
438 const char *name)
439{
440 unsigned long extra;
441 unsigned int i;
442 void *ptr;
443 int cpu;
444
445 if (align > PAGE_SIZE) {
446 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
447 name, align, PAGE_SIZE);
448 align = PAGE_SIZE;
449 }
450
451 ptr = __per_cpu_start;
452 for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
453 /* Extra for alignment requirement. */
454 extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
455 BUG_ON(i == 0 && extra != 0);
456
457 if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
458 continue;
459
460 /* Transfer extra to previous block. */
461 if (pcpu_size[i-1] < 0)
462 pcpu_size[i-1] -= extra;
463 else
464 pcpu_size[i-1] += extra;
465 pcpu_size[i] -= extra;
466 ptr += extra;
467
468 /* Split block if warranted */
469 if (pcpu_size[i] - size > sizeof(unsigned long))
470 if (!split_block(i, size))
471 return NULL;
472
473 /* add the per-cpu scanning areas */
474 for_each_possible_cpu(cpu)
475 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
476 GFP_KERNEL);
477
478 /* Mark allocated */
479 pcpu_size[i] = -pcpu_size[i];
480 return ptr;
481 }
482
483 printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
484 size);
485 return NULL;
486}
487
488static void percpu_modfree(void *freeme)
489{
490 unsigned int i;
491 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
492 int cpu;
493
494 /* First entry is core kernel percpu data. */
495 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
496 if (ptr == freeme) {
497 pcpu_size[i] = -pcpu_size[i];
498 goto free;
499 }
500 }
501 BUG();
502
503 free:
504 /* remove the per-cpu scanning areas */
505 for_each_possible_cpu(cpu)
506 kmemleak_free(freeme + per_cpu_offset(cpu));
507
508 /* Merge with previous? */
509 if (pcpu_size[i-1] >= 0) {
510 pcpu_size[i-1] += pcpu_size[i];
511 pcpu_num_used--;
512 memmove(&pcpu_size[i], &pcpu_size[i+1],
513 (pcpu_num_used - i) * sizeof(pcpu_size[0]));
514 i--;
515 }
516 /* Merge with next? */
517 if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
518 pcpu_size[i] += pcpu_size[i+1];
519 pcpu_num_used--;
520 memmove(&pcpu_size[i+1], &pcpu_size[i+2],
521 (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
522 }
523}
524
525static int percpu_modinit(void)
526{
527 pcpu_num_used = 2;
528 pcpu_num_allocated = 2;
529 pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
530 GFP_KERNEL);
531 /* Static in-kernel percpu data (used). */
532 pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
533 /* Free room. */
534 pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
535 if (pcpu_size[1] < 0) {
536 printk(KERN_ERR "No per-cpu room for modules.\n");
537 pcpu_num_used = 1;
538 }
539
540 return 0;
541}
542__initcall(percpu_modinit);
543
544#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
545
546static unsigned int find_pcpusec(Elf_Ehdr *hdr, 396static unsigned int find_pcpusec(Elf_Ehdr *hdr,
547 Elf_Shdr *sechdrs, 397 Elf_Shdr *sechdrs,
548 const char *secstrings) 398 const char *secstrings)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a621a67ef4e3..9bb52177af02 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
763 /* Should not happen, but... */ 763 /* Should not happen, but... */
764 pipe_count = RCU_TORTURE_PIPE_LEN; 764 pipe_count = RCU_TORTURE_PIPE_LEN;
765 } 765 }
766 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 766 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
767 completed = cur_ops->completed() - completed; 767 completed = cur_ops->completed() - completed;
768 if (completed > RCU_TORTURE_PIPE_LEN) { 768 if (completed > RCU_TORTURE_PIPE_LEN) {
769 /* Should not happen, but... */ 769 /* Should not happen, but... */
770 completed = RCU_TORTURE_PIPE_LEN; 770 completed = RCU_TORTURE_PIPE_LEN;
771 } 771 }
772 ++__get_cpu_var(rcu_torture_batch)[completed]; 772 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
773 preempt_enable(); 773 preempt_enable();
774 cur_ops->readunlock(idx); 774 cur_ops->readunlock(idx);
775} 775}
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
818 /* Should not happen, but... */ 818 /* Should not happen, but... */
819 pipe_count = RCU_TORTURE_PIPE_LEN; 819 pipe_count = RCU_TORTURE_PIPE_LEN;
820 } 820 }
821 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 821 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
822 completed = cur_ops->completed() - completed; 822 completed = cur_ops->completed() - completed;
823 if (completed > RCU_TORTURE_PIPE_LEN) { 823 if (completed > RCU_TORTURE_PIPE_LEN) {
824 /* Should not happen, but... */ 824 /* Should not happen, but... */
825 completed = RCU_TORTURE_PIPE_LEN; 825 completed = RCU_TORTURE_PIPE_LEN;
826 } 826 }
827 ++__get_cpu_var(rcu_torture_batch)[completed]; 827 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
828 preempt_enable(); 828 preempt_enable();
829 cur_ops->readunlock(idx); 829 cur_ops->readunlock(idx);
830 schedule(); 830 schedule();
diff --git a/kernel/sched.c b/kernel/sched.c
index ff39cadf621e..fd05861b2111 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
298 298
299#ifdef CONFIG_RT_GROUP_SCHED 299#ifdef CONFIG_RT_GROUP_SCHED
300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); 301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
302#endif /* CONFIG_RT_GROUP_SCHED */ 302#endif /* CONFIG_RT_GROUP_SCHED */
303#else /* !CONFIG_USER_SCHED */ 303#else /* !CONFIG_USER_SCHED */
304#define root_task_group init_task_group 304#define root_task_group init_task_group
@@ -8286,14 +8286,14 @@ enum s_alloc {
8286 */ 8286 */
8287#ifdef CONFIG_SCHED_SMT 8287#ifdef CONFIG_SCHED_SMT
8288static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); 8288static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
8289static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); 8289static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
8290 8290
8291static int 8291static int
8292cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, 8292cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
8293 struct sched_group **sg, struct cpumask *unused) 8293 struct sched_group **sg, struct cpumask *unused)
8294{ 8294{
8295 if (sg) 8295 if (sg)
8296 *sg = &per_cpu(sched_group_cpus, cpu).sg; 8296 *sg = &per_cpu(sched_groups, cpu).sg;
8297 return cpu; 8297 return cpu;
8298} 8298}
8299#endif /* CONFIG_SCHED_SMT */ 8299#endif /* CONFIG_SCHED_SMT */
@@ -9583,7 +9583,7 @@ void __init sched_init(void)
9583#elif defined CONFIG_USER_SCHED 9583#elif defined CONFIG_USER_SCHED
9584 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); 9584 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
9585 init_tg_rt_entry(&init_task_group, 9585 init_tg_rt_entry(&init_task_group,
9586 &per_cpu(init_rt_rq, i), 9586 &per_cpu(init_rt_rq_var, i),
9587 &per_cpu(init_sched_rt_entity, i), i, 1, 9587 &per_cpu(init_sched_rt_entity, i), i, 1,
9588 root_task_group.rt_se[i]); 9588 root_task_group.rt_se[i]);
9589#endif 9589#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 21939d9e830e..a09502e2ef75 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -697,7 +697,7 @@ void __init softirq_init(void)
697 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 697 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
698} 698}
699 699
700static int ksoftirqd(void * __bind_cpu) 700static int run_ksoftirqd(void * __bind_cpu)
701{ 701{
702 set_current_state(TASK_INTERRUPTIBLE); 702 set_current_state(TASK_INTERRUPTIBLE);
703 703
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
810 switch (action) { 810 switch (action) {
811 case CPU_UP_PREPARE: 811 case CPU_UP_PREPARE:
812 case CPU_UP_PREPARE_FROZEN: 812 case CPU_UP_PREPARE_FROZEN:
813 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 813 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
814 if (IS_ERR(p)) { 814 if (IS_ERR(p)) {
815 printk("ksoftirqd for %i failed\n", hotcpu); 815 printk("ksoftirqd for %i failed\n", hotcpu);
816 return NOTIFY_BAD; 816 return NOTIFY_BAD;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 81324d12eb35..d22579087e27 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -22,9 +22,9 @@
22 22
23static DEFINE_SPINLOCK(print_lock); 23static DEFINE_SPINLOCK(print_lock);
24 24
25static DEFINE_PER_CPU(unsigned long, touch_timestamp); 25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
26static DEFINE_PER_CPU(unsigned long, print_timestamp); 26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
27static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
28 28
29static int __read_mostly did_panic; 29static int __read_mostly did_panic;
30int __read_mostly softlockup_thresh = 60; 30int __read_mostly softlockup_thresh = 60;
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
70{ 70{
71 int this_cpu = raw_smp_processor_id(); 71 int this_cpu = raw_smp_processor_id();
72 72
73 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 73 __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
74} 74}
75 75
76void touch_softlockup_watchdog(void) 76void touch_softlockup_watchdog(void)
77{ 77{
78 __raw_get_cpu_var(touch_timestamp) = 0; 78 __raw_get_cpu_var(softlockup_touch_ts) = 0;
79} 79}
80EXPORT_SYMBOL(touch_softlockup_watchdog); 80EXPORT_SYMBOL(touch_softlockup_watchdog);
81 81
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
85 85
86 /* Cause each CPU to re-update its timestamp rather than complain */ 86 /* Cause each CPU to re-update its timestamp rather than complain */
87 for_each_online_cpu(cpu) 87 for_each_online_cpu(cpu)
88 per_cpu(touch_timestamp, cpu) = 0; 88 per_cpu(softlockup_touch_ts, cpu) = 0;
89} 89}
90EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 90EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
91 91
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
104void softlockup_tick(void) 104void softlockup_tick(void)
105{ 105{
106 int this_cpu = smp_processor_id(); 106 int this_cpu = smp_processor_id();
107 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); 107 unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
108 unsigned long print_timestamp; 108 unsigned long print_ts;
109 struct pt_regs *regs = get_irq_regs(); 109 struct pt_regs *regs = get_irq_regs();
110 unsigned long now; 110 unsigned long now;
111 111
112 /* Is detection switched off? */ 112 /* Is detection switched off? */
113 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { 113 if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
114 /* Be sure we don't false trigger if switched back on */ 114 /* Be sure we don't false trigger if switched back on */
115 if (touch_timestamp) 115 if (touch_ts)
116 per_cpu(touch_timestamp, this_cpu) = 0; 116 per_cpu(softlockup_touch_ts, this_cpu) = 0;
117 return; 117 return;
118 } 118 }
119 119
120 if (touch_timestamp == 0) { 120 if (touch_ts == 0) {
121 __touch_softlockup_watchdog(); 121 __touch_softlockup_watchdog();
122 return; 122 return;
123 } 123 }
124 124
125 print_timestamp = per_cpu(print_timestamp, this_cpu); 125 print_ts = per_cpu(softlockup_print_ts, this_cpu);
126 126
127 /* report at most once a second */ 127 /* report at most once a second */
128 if (print_timestamp == touch_timestamp || did_panic) 128 if (print_ts == touch_ts || did_panic)
129 return; 129 return;
130 130
131 /* do not print during early bootup: */ 131 /* do not print during early bootup: */
@@ -140,18 +140,18 @@ void softlockup_tick(void)
140 * Wake up the high-prio watchdog task twice per 140 * Wake up the high-prio watchdog task twice per
141 * threshold timespan. 141 * threshold timespan.
142 */ 142 */
143 if (now > touch_timestamp + softlockup_thresh/2) 143 if (now > touch_ts + softlockup_thresh/2)
144 wake_up_process(per_cpu(watchdog_task, this_cpu)); 144 wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
145 145
146 /* Warn about unreasonable delays: */ 146 /* Warn about unreasonable delays: */
147 if (now <= (touch_timestamp + softlockup_thresh)) 147 if (now <= (touch_ts + softlockup_thresh))
148 return; 148 return;
149 149
150 per_cpu(print_timestamp, this_cpu) = touch_timestamp; 150 per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
151 151
152 spin_lock(&print_lock); 152 spin_lock(&print_lock);
153 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 153 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
154 this_cpu, now - touch_timestamp, 154 this_cpu, now - touch_ts,
155 current->comm, task_pid_nr(current)); 155 current->comm, task_pid_nr(current));
156 print_modules(); 156 print_modules();
157 print_irqtrace_events(current); 157 print_irqtrace_events(current);
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
209 switch (action) { 209 switch (action) {
210 case CPU_UP_PREPARE: 210 case CPU_UP_PREPARE:
211 case CPU_UP_PREPARE_FROZEN: 211 case CPU_UP_PREPARE_FROZEN:
212 BUG_ON(per_cpu(watchdog_task, hotcpu)); 212 BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
213 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); 213 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
214 if (IS_ERR(p)) { 214 if (IS_ERR(p)) {
215 printk(KERN_ERR "watchdog for %i failed\n", hotcpu); 215 printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
216 return NOTIFY_BAD; 216 return NOTIFY_BAD;
217 } 217 }
218 per_cpu(touch_timestamp, hotcpu) = 0; 218 per_cpu(softlockup_touch_ts, hotcpu) = 0;
219 per_cpu(watchdog_task, hotcpu) = p; 219 per_cpu(softlockup_watchdog, hotcpu) = p;
220 kthread_bind(p, hotcpu); 220 kthread_bind(p, hotcpu);
221 break; 221 break;
222 case CPU_ONLINE: 222 case CPU_ONLINE:
223 case CPU_ONLINE_FROZEN: 223 case CPU_ONLINE_FROZEN:
224 wake_up_process(per_cpu(watchdog_task, hotcpu)); 224 wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
225 break; 225 break;
226#ifdef CONFIG_HOTPLUG_CPU 226#ifdef CONFIG_HOTPLUG_CPU
227 case CPU_UP_CANCELED: 227 case CPU_UP_CANCELED:
228 case CPU_UP_CANCELED_FROZEN: 228 case CPU_UP_CANCELED_FROZEN:
229 if (!per_cpu(watchdog_task, hotcpu)) 229 if (!per_cpu(softlockup_watchdog, hotcpu))
230 break; 230 break;
231 /* Unbind so it can run. Fall thru. */ 231 /* Unbind so it can run. Fall thru. */
232 kthread_bind(per_cpu(watchdog_task, hotcpu), 232 kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
233 cpumask_any(cpu_online_mask)); 233 cpumask_any(cpu_online_mask));
234 case CPU_DEAD: 234 case CPU_DEAD:
235 case CPU_DEAD_FROZEN: 235 case CPU_DEAD_FROZEN:
236 p = per_cpu(watchdog_task, hotcpu); 236 p = per_cpu(softlockup_watchdog, hotcpu);
237 per_cpu(watchdog_task, hotcpu) = NULL; 237 per_cpu(softlockup_watchdog, hotcpu) = NULL;
238 kthread_stop(p); 238 kthread_stop(p);
239 break; 239 break;
240#endif /* CONFIG_HOTPLUG_CPU */ 240#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index ee5681f8d7ec..63b117e9eba1 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
86/* 86/*
87 * Per-CPU lookup locks for fast hash lookup: 87 * Per-CPU lookup locks for fast hash lookup:
88 */ 88 */
89static DEFINE_PER_CPU(spinlock_t, lookup_lock); 89static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock);
90 90
91/* 91/*
92 * Mutex to serialize state changes with show-stats activities: 92 * Mutex to serialize state changes with show-stats activities:
@@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
245 if (likely(!timer_stats_active)) 245 if (likely(!timer_stats_active))
246 return; 246 return;
247 247
248 lock = &per_cpu(lookup_lock, raw_smp_processor_id()); 248 lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
249 249
250 input.timer = timer; 250 input.timer = timer;
251 input.start_func = startf; 251 input.start_func = startf;
@@ -348,9 +348,10 @@ static void sync_access(void)
348 int cpu; 348 int cpu;
349 349
350 for_each_online_cpu(cpu) { 350 for_each_online_cpu(cpu) {
351 spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); 351 spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
352 spin_lock_irqsave(lock, flags);
352 /* nothing */ 353 /* nothing */
353 spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); 354 spin_unlock_irqrestore(lock, flags);
354 } 355 }
355} 356}
356 357
@@ -408,7 +409,7 @@ void __init init_timer_stats(void)
408 int cpu; 409 int cpu;
409 410
410 for_each_possible_cpu(cpu) 411 for_each_possible_cpu(cpu)
411 spin_lock_init(&per_cpu(lookup_lock, cpu)); 412 spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
412} 413}
413 414
414static int __init init_tstats_procfs(void) 415static int __init init_tstats_procfs(void)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88bd9ae2a9ed..c82dfd92fdfd 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
86 */ 86 */
87static int tracing_disabled = 1; 87static int tracing_disabled = 1;
88 88
89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(int, ftrace_cpu_disabled);
90 90
91static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
92{ 92{
93 preempt_disable(); 93 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled)); 94 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
95} 95}
96 96
97static inline void ftrace_enable_cpu(void) 97static inline void ftrace_enable_cpu(void)
98{ 98{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled)); 99 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
100 preempt_enable(); 100 preempt_enable();
101} 101}
102 102
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
203 */ 203 */
204static struct trace_array max_tr; 204static struct trace_array max_tr;
205 205
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data); 206static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
207 207
208/* tracer_enabled is used to toggle activation of a tracer */ 208/* tracer_enabled is used to toggle activation of a tracer */
209static int tracer_enabled = 1; 209static int tracer_enabled = 1;
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1085 struct ftrace_entry *entry;
1086 1086
1087 /* If we are reading the ring buffer, don't trace */ 1087 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 1088 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
1089 return; 1089 return;
1090 1090
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
4454 /* Allocate the first page for all buffers */ 4454 /* Allocate the first page for all buffers */
4455 for_each_tracing_cpu(i) { 4455 for_each_tracing_cpu(i) {
4456 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4456 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4457 max_tr.data[i] = &per_cpu(max_data, i); 4457 max_tr.data[i] = &per_cpu(max_tr_data, i);
4458 } 4458 }
4459 4459
4460 trace_init_cmdlines(); 4460 trace_init_cmdlines();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 7fa33cab6962..a52bed2eedd8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
443 443
444extern int ring_buffer_expanded; 444extern int ring_buffer_expanded;
445extern bool tracing_selftest_disabled; 445extern bool tracing_selftest_disabled;
446DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); 446DECLARE_PER_CPU(int, ftrace_cpu_disabled);
447 447
448#ifdef CONFIG_FTRACE_STARTUP_TEST 448#ifdef CONFIG_FTRACE_STARTUP_TEST
449extern int trace_selftest_startup_function(struct tracer *trace, 449extern int trace_selftest_startup_function(struct tracer *trace,
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a43d009c561a..b1342c5d37cf 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
187 struct ring_buffer *buffer = tr->buffer; 187 struct ring_buffer *buffer = tr->buffer;
188 struct ftrace_graph_ent_entry *entry; 188 struct ftrace_graph_ent_entry *entry;
189 189
190 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 190 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
191 return 0; 191 return 0;
192 192
193 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 193 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
251 struct ring_buffer *buffer = tr->buffer; 251 struct ring_buffer *buffer = tr->buffer;
252 struct ftrace_graph_ret_entry *entry; 252 struct ftrace_graph_ret_entry *entry;
253 253
254 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 254 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
255 return; 255 return;
256 256
257 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 257 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 69543a905cd5..7b97000745f5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -20,10 +20,10 @@
20 20
21#define BTS_BUFFER_SIZE (1 << 13) 21#define BTS_BUFFER_SIZE (1 << 13)
22 22
23static DEFINE_PER_CPU(struct bts_tracer *, tracer); 23static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
24static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); 24static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
25 25
26#define this_tracer per_cpu(tracer, smp_processor_id()) 26#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
27 27
28static int trace_hw_branches_enabled __read_mostly; 28static int trace_hw_branches_enabled __read_mostly;
29static int trace_hw_branches_suspended __read_mostly; 29static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
32 32
33static void bts_trace_init_cpu(int cpu) 33static void bts_trace_init_cpu(int cpu)
34{ 34{
35 per_cpu(tracer, cpu) = 35 per_cpu(hwb_tracer, cpu) =
36 ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, 36 ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
37 NULL, (size_t)-1, BTS_KERNEL); 37 BTS_BUFFER_SIZE, NULL, (size_t)-1,
38 BTS_KERNEL);
38 39
39 if (IS_ERR(per_cpu(tracer, cpu))) 40 if (IS_ERR(per_cpu(hwb_tracer, cpu)))
40 per_cpu(tracer, cpu) = NULL; 41 per_cpu(hwb_tracer, cpu) = NULL;
41} 42}
42 43
43static int bts_trace_init(struct trace_array *tr) 44static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
51 for_each_online_cpu(cpu) { 52 for_each_online_cpu(cpu) {
52 bts_trace_init_cpu(cpu); 53 bts_trace_init_cpu(cpu);
53 54
54 if (likely(per_cpu(tracer, cpu))) 55 if (likely(per_cpu(hwb_tracer, cpu)))
55 trace_hw_branches_enabled = 1; 56 trace_hw_branches_enabled = 1;
56 } 57 }
57 trace_hw_branches_suspended = 0; 58 trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
67 68
68 get_online_cpus(); 69 get_online_cpus();
69 for_each_online_cpu(cpu) { 70 for_each_online_cpu(cpu) {
70 if (likely(per_cpu(tracer, cpu))) { 71 if (likely(per_cpu(hwb_tracer, cpu))) {
71 ds_release_bts(per_cpu(tracer, cpu)); 72 ds_release_bts(per_cpu(hwb_tracer, cpu));
72 per_cpu(tracer, cpu) = NULL; 73 per_cpu(hwb_tracer, cpu) = NULL;
73 } 74 }
74 } 75 }
75 trace_hw_branches_enabled = 0; 76 trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
83 84
84 get_online_cpus(); 85 get_online_cpus();
85 for_each_online_cpu(cpu) 86 for_each_online_cpu(cpu)
86 if (likely(per_cpu(tracer, cpu))) 87 if (likely(per_cpu(hwb_tracer, cpu)))
87 ds_resume_bts(per_cpu(tracer, cpu)); 88 ds_resume_bts(per_cpu(hwb_tracer, cpu));
88 trace_hw_branches_suspended = 0; 89 trace_hw_branches_suspended = 0;
89 put_online_cpus(); 90 put_online_cpus();
90} 91}
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
95 96
96 get_online_cpus(); 97 get_online_cpus();
97 for_each_online_cpu(cpu) 98 for_each_online_cpu(cpu)
98 if (likely(per_cpu(tracer, cpu))) 99 if (likely(per_cpu(hwb_tracer, cpu)))
99 ds_suspend_bts(per_cpu(tracer, cpu)); 100 ds_suspend_bts(per_cpu(hwb_tracer, cpu));
100 trace_hw_branches_suspended = 1; 101 trace_hw_branches_suspended = 1;
101 put_online_cpus(); 102 put_online_cpus();
102} 103}
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
114 bts_trace_init_cpu(cpu); 115 bts_trace_init_cpu(cpu);
115 116
116 if (trace_hw_branches_suspended && 117 if (trace_hw_branches_suspended &&
117 likely(per_cpu(tracer, cpu))) 118 likely(per_cpu(hwb_tracer, cpu)))
118 ds_suspend_bts(per_cpu(tracer, cpu)); 119 ds_suspend_bts(per_cpu(hwb_tracer, cpu));
119 } 120 }
120 break; 121 break;
121 122
122 case CPU_DOWN_PREPARE: 123 case CPU_DOWN_PREPARE:
123 /* The notification is sent with interrupts enabled. */ 124 /* The notification is sent with interrupts enabled. */
124 if (likely(per_cpu(tracer, cpu))) { 125 if (likely(per_cpu(hwb_tracer, cpu))) {
125 ds_release_bts(per_cpu(tracer, cpu)); 126 ds_release_bts(per_cpu(hwb_tracer, cpu));
126 per_cpu(tracer, cpu) = NULL; 127 per_cpu(hwb_tracer, cpu) = NULL;
127 } 128 }
128 } 129 }
129 130
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
258 259
259 get_online_cpus(); 260 get_online_cpus();
260 for_each_online_cpu(cpu) 261 for_each_online_cpu(cpu)
261 if (likely(per_cpu(tracer, cpu))) 262 if (likely(per_cpu(hwb_tracer, cpu)))
262 ds_suspend_bts(per_cpu(tracer, cpu)); 263 ds_suspend_bts(per_cpu(hwb_tracer, cpu));
263 /* 264 /*
264 * We need to collect the trace on the respective cpu since ftrace 265 * We need to collect the trace on the respective cpu since ftrace
265 * implicitly adds the record for the current cpu. 266 * implicitly adds the record for the current cpu.
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
268 on_each_cpu(trace_bts_cpu, iter->tr, 1); 269 on_each_cpu(trace_bts_cpu, iter->tr, 1);
269 270
270 for_each_online_cpu(cpu) 271 for_each_online_cpu(cpu)
271 if (likely(per_cpu(tracer, cpu))) 272 if (likely(per_cpu(hwb_tracer, cpu)))
272 ds_resume_bts(per_cpu(tracer, cpu)); 273 ds_resume_bts(per_cpu(hwb_tracer, cpu));
273 put_online_cpus(); 274 put_online_cpus();
274} 275}
275 276
diff --git a/mm/Makefile b/mm/Makefile
index ebf849042ed3..82131d0f8d85 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
35obj-$(CONFIG_FS_XIP) += filemap_xip.o 35obj-$(CONFIG_FS_XIP) += filemap_xip.o
36obj-$(CONFIG_MIGRATION) += migrate.o 36obj-$(CONFIG_MIGRATION) += migrate.o
37ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38obj-$(CONFIG_SMP) += percpu.o 37obj-$(CONFIG_SMP) += percpu.o
39else
40obj-$(CONFIG_SMP) += allocpercpu.o
41endif
42obj-$(CONFIG_QUICKLIST) += quicklist.o 38obj-$(CONFIG_QUICKLIST) += quicklist.o
43obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 39obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
44obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 40obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index df34ceae0c67..000000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * linux/mm/allocpercpu.c
3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
10
11#ifndef cache_line_size
12#define cache_line_size() L1_CACHE_BYTES
13#endif
14
15/**
16 * percpu_depopulate - depopulate per-cpu data for given cpu
17 * @__pdata: per-cpu data to depopulate
18 * @cpu: depopulate per-cpu data for this cpu
19 *
20 * Depopulating per-cpu data for a cpu going offline would be a typical
21 * use case. You need to register a cpu hotplug handler for that purpose.
22 */
23static void percpu_depopulate(void *__pdata, int cpu)
24{
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
26
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
29}
30
31/**
32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
33 * @__pdata: per-cpu data to depopulate
34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
35 */
36static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
37{
38 int cpu;
39 for_each_cpu_mask_nr(cpu, *mask)
40 percpu_depopulate(__pdata, cpu);
41}
42
43#define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
45
46/**
47 * percpu_populate - populate per-cpu data for given cpu
48 * @__pdata: per-cpu data to populate further
49 * @size: size of per-cpu object
50 * @gfp: may sleep or not etc.
51 * @cpu: populate per-data for this cpu
52 *
53 * Populating per-cpu data for a cpu coming online would be a typical
54 * use case. You need to register a cpu hotplug handler for that purpose.
55 * Per-cpu object is populated with zeroed buffer.
56 */
57static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
58{
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
61
62 /*
63 * We should make sure each CPU gets private memory.
64 */
65 size = roundup(size, cache_line_size());
66
67 BUG_ON(pdata->ptrs[cpu]);
68 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
71 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
73}
74
75/**
76 * percpu_populate_mask - populate per-cpu data for more cpu's
77 * @__pdata: per-cpu data to populate further
78 * @size: size of per-cpu object
79 * @gfp: may sleep or not etc.
80 * @mask: populate per-cpu data for cpu's selected through mask bits
81 *
82 * Per-cpu objects are populated with zeroed buffers.
83 */
84static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
86{
87 cpumask_t populated;
88 int cpu;
89
90 cpus_clear(populated);
91 for_each_cpu_mask_nr(cpu, *mask)
92 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
98}
99
100#define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
102
103/**
104 * alloc_percpu - initial setup of per-cpu data
105 * @size: size of per-cpu object
106 * @align: alignment
107 *
108 * Allocate dynamic percpu area. Percpu objects are populated with
109 * zeroed buffers.
110 */
111void *__alloc_percpu(size_t size, size_t align)
112{
113 /*
114 * We allocate whole cache lines to avoid false sharing
115 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata);
119
120 /*
121 * Can't easily make larger alignment work with kmalloc. WARN
122 * on it. Larger alignment should only be used for module
123 * percpu sections on SMP for which this path isn't used.
124 */
125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
126
127 if (unlikely(!pdata))
128 return NULL;
129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
131 return __pdata;
132 kfree(pdata);
133 return NULL;
134}
135EXPORT_SYMBOL_GPL(__alloc_percpu);
136
137/**
138 * free_percpu - final cleanup of per-cpu data
139 * @__pdata: object to clean up
140 *
141 * We simply clean up any per-cpu object left. No need for the client to
142 * track and specify through a bis mask which per-cpu objects are to free.
143 */
144void free_percpu(void *__pdata)
145{
146 if (unlikely(!__pdata))
147 return;
148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
149 kfree(__percpu_disguise(__pdata));
150}
151EXPORT_SYMBOL_GPL(free_percpu);
152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/percpu.c b/mm/percpu.c
index 5adfc268b408..442010cc91c6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,8 +46,6 @@
46 * 46 *
47 * To use this allocator, arch code should do the followings. 47 * To use this allocator, arch code should do the followings.
48 * 48 *
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
50 *
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 * regular address to percpu pointer and back if they need to be 50 * regular address to percpu pointer and back if they need to be
53 * different from the default 51 * different from the default
@@ -74,6 +72,7 @@
74#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
75#include <asm/sections.h> 73#include <asm/sections.h>
76#include <asm/tlbflush.h> 74#include <asm/tlbflush.h>
75#include <asm/io.h>
77 76
78#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
79#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
1302} 1301}
1303EXPORT_SYMBOL_GPL(free_percpu); 1302EXPORT_SYMBOL_GPL(free_percpu);
1304 1303
1304/**
1305 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1306 * @addr: the address to be converted to physical address
1307 *
1308 * Given @addr which is dereferenceable address obtained via one of
1309 * percpu access macros, this function translates it into its physical
1310 * address. The caller is responsible for ensuring @addr stays valid
1311 * until this function finishes.
1312 *
1313 * RETURNS:
1314 * The physical address for @addr.
1315 */
1316phys_addr_t per_cpu_ptr_to_phys(void *addr)
1317{
1318 if ((unsigned long)addr < VMALLOC_START ||
1319 (unsigned long)addr >= VMALLOC_END)
1320 return __pa(addr);
1321 else
1322 return page_to_phys(vmalloc_to_page(addr));
1323}
1324
1305static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1325static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1306 size_t reserved_size, 1326 size_t reserved_size,
1307 ssize_t *dyn_sizep) 1327 ssize_t *dyn_sizep)
diff --git a/mm/slab.c b/mm/slab.c
index a6c9166996a9..29b09599af7c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
697static DEFINE_MUTEX(cache_chain_mutex); 697static DEFINE_MUTEX(cache_chain_mutex);
698static struct list_head cache_chain; 698static struct list_head cache_chain;
699 699
700static DEFINE_PER_CPU(struct delayed_work, reap_work); 700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
701 701
702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
703{ 703{
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
838 * objects freed on different nodes from which they were allocated) and the 838 * objects freed on different nodes from which they were allocated) and the
839 * flushing of remote pcps by calling drain_node_pages. 839 * flushing of remote pcps by calling drain_node_pages.
840 */ 840 */
841static DEFINE_PER_CPU(unsigned long, reap_node); 841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
842 842
843static void init_reap_node(int cpu) 843static void init_reap_node(int cpu)
844{ 844{
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
848 if (node == MAX_NUMNODES) 848 if (node == MAX_NUMNODES)
849 node = first_node(node_online_map); 849 node = first_node(node_online_map);
850 850
851 per_cpu(reap_node, cpu) = node; 851 per_cpu(slab_reap_node, cpu) = node;
852} 852}
853 853
854static void next_reap_node(void) 854static void next_reap_node(void)
855{ 855{
856 int node = __get_cpu_var(reap_node); 856 int node = __get_cpu_var(slab_reap_node);
857 857
858 node = next_node(node, node_online_map); 858 node = next_node(node, node_online_map);
859 if (unlikely(node >= MAX_NUMNODES)) 859 if (unlikely(node >= MAX_NUMNODES))
860 node = first_node(node_online_map); 860 node = first_node(node_online_map);
861 __get_cpu_var(reap_node) = node; 861 __get_cpu_var(slab_reap_node) = node;
862} 862}
863 863
864#else 864#else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
875 */ 875 */
876static void __cpuinit start_cpu_timer(int cpu) 876static void __cpuinit start_cpu_timer(int cpu)
877{ 877{
878 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
879 879
880 /* 880 /*
881 * When this gets called from do_initcalls via cpucache_init(), 881 * When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1039 */ 1039 */
1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1041{ 1041{
1042 int node = __get_cpu_var(reap_node); 1042 int node = __get_cpu_var(slab_reap_node);
1043 1043
1044 if (l3->alien) { 1044 if (l3->alien) {
1045 struct array_cache *ac = l3->alien[node]; 1045 struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1300 * anything expensive but will only modify reap_work 1300 * anything expensive but will only modify reap_work
1301 * and reschedule the timer. 1301 * and reschedule the timer.
1302 */ 1302 */
1303 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1303 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1304 /* Now the cache_reaper is guaranteed to be not running. */ 1304 /* Now the cache_reaper is guaranteed to be not running. */
1305 per_cpu(reap_work, cpu).work.func = NULL; 1305 per_cpu(slab_reap_work, cpu).work.func = NULL;
1306 break; 1306 break;
1307 case CPU_DOWN_FAILED: 1307 case CPU_DOWN_FAILED:
1308 case CPU_DOWN_FAILED_FROZEN: 1308 case CPU_DOWN_FAILED_FROZEN:
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f551a4a44cd..9b08d790df6f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
761 spin_lock(&vbq->lock); 761 spin_lock(&vbq->lock);
762 list_add(&vb->free_list, &vbq->free); 762 list_add(&vb->free_list, &vbq->free);
763 spin_unlock(&vbq->lock); 763 spin_unlock(&vbq->lock);
764 put_cpu_var(vmap_cpu_blocks); 764 put_cpu_var(vmap_block_queue);
765 765
766 return vb; 766 return vb;
767} 767}
@@ -826,7 +826,7 @@ again:
826 } 826 }
827 spin_unlock(&vb->lock); 827 spin_unlock(&vb->lock);
828 } 828 }
829 put_cpu_var(vmap_cpu_blocks); 829 put_cpu_var(vmap_block_queue);
830 rcu_read_unlock(); 830 rcu_read_unlock();
831 831
832 if (!addr) { 832 if (!addr) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9feec..dad2327e4580 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w)
883 883
884static void __cpuinit start_cpu_timer(int cpu) 884static void __cpuinit start_cpu_timer(int cpu)
885{ 885{
886 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); 886 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
887 887
888 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); 888 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
889 schedule_delayed_work_on(cpu, vmstat_work, 889 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
890 __round_jiffies_relative(HZ, cpu));
891} 890}
892 891
893/* 892/*
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index c7450c8f0a7c..6dcdd2517819 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
55 55
56 /* 56 /*
57 * RFC 4291, Section 2.2.1 57 * RFC 4291, Section 2.2.1
58 *
59 * To keep the result as short as possible, especially
60 * since we don't shorthand, we don't want leading zeros
61 * in each halfword, so avoid %pI6.
62 */ 58 */
63 return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x", 59 return snprintf(buf, buflen, "%pI6c", addr);
64 ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
65 ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
66 ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
67 ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
68} 60}
69 61
70static size_t rpc_ntop6(const struct sockaddr *sap, 62static size_t rpc_ntop6(const struct sockaddr *sap,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7535a7bed2fa..f394fc190a49 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred)
123 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 123 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
124} 124}
125 125
126static void 126static int
127rpcauth_unhash_cred(struct rpc_cred *cred) 127rpcauth_unhash_cred(struct rpc_cred *cred)
128{ 128{
129 spinlock_t *cache_lock; 129 spinlock_t *cache_lock;
130 int ret;
130 131
131 cache_lock = &cred->cr_auth->au_credcache->lock; 132 cache_lock = &cred->cr_auth->au_credcache->lock;
132 spin_lock(cache_lock); 133 spin_lock(cache_lock);
133 if (atomic_read(&cred->cr_count) == 0) 134 ret = atomic_read(&cred->cr_count) == 0;
135 if (ret)
134 rpcauth_unhash_cred_locked(cred); 136 rpcauth_unhash_cred_locked(cred);
135 spin_unlock(cache_lock); 137 spin_unlock(cache_lock);
138 return ret;
136} 139}
137 140
138/* 141/*
@@ -446,31 +449,35 @@ void
446put_rpccred(struct rpc_cred *cred) 449put_rpccred(struct rpc_cred *cred)
447{ 450{
448 /* Fast path for unhashed credentials */ 451 /* Fast path for unhashed credentials */
449 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 452 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
450 goto need_lock; 453 if (atomic_dec_and_test(&cred->cr_count))
451 454 cred->cr_ops->crdestroy(cred);
452 if (!atomic_dec_and_test(&cred->cr_count))
453 return; 455 return;
454 goto out_destroy; 456 }
455need_lock: 457
456 if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) 458 if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
457 return; 459 return;
458 if (!list_empty(&cred->cr_lru)) { 460 if (!list_empty(&cred->cr_lru)) {
459 number_cred_unused--; 461 number_cred_unused--;
460 list_del_init(&cred->cr_lru); 462 list_del_init(&cred->cr_lru);
461 } 463 }
462 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
463 rpcauth_unhash_cred(cred);
464 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { 464 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
465 cred->cr_expire = jiffies; 465 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
466 list_add_tail(&cred->cr_lru, &cred_unused); 466 cred->cr_expire = jiffies;
467 number_cred_unused++; 467 list_add_tail(&cred->cr_lru, &cred_unused);
468 spin_unlock(&rpc_credcache_lock); 468 number_cred_unused++;
469 return; 469 goto out_nodestroy;
470 }
471 if (!rpcauth_unhash_cred(cred)) {
472 /* We were hashed and someone looked us up... */
473 goto out_nodestroy;
474 }
470 } 475 }
471 spin_unlock(&rpc_credcache_lock); 476 spin_unlock(&rpc_credcache_lock);
472out_destroy:
473 cred->cr_ops->crdestroy(cred); 477 cred->cr_ops->crdestroy(cred);
478 return;
479out_nodestroy:
480 spin_unlock(&rpc_credcache_lock);
474} 481}
475EXPORT_SYMBOL_GPL(put_rpccred); 482EXPORT_SYMBOL_GPL(put_rpccred);
476 483
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fc6a43ccd950..3c3c50f38a1c 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
304 * to that upcall instead of adding the new upcall. 304 * to that upcall instead of adding the new upcall.
305 */ 305 */
306static inline struct gss_upcall_msg * 306static inline struct gss_upcall_msg *
307gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) 307gss_add_msg(struct gss_upcall_msg *gss_msg)
308{ 308{
309 struct rpc_inode *rpci = gss_msg->inode; 309 struct rpc_inode *rpci = gss_msg->inode;
310 struct inode *inode = &rpci->vfs_inode; 310 struct inode *inode = &rpci->vfs_inode;
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
445 gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); 445 gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
446 if (IS_ERR(gss_new)) 446 if (IS_ERR(gss_new))
447 return gss_new; 447 return gss_new;
448 gss_msg = gss_add_msg(gss_auth, gss_new); 448 gss_msg = gss_add_msg(gss_new);
449 if (gss_msg == gss_new) { 449 if (gss_msg == gss_new) {
450 struct inode *inode = &gss_new->inode->vfs_inode; 450 struct inode *inode = &gss_new->inode->vfs_inode;
451 int res = rpc_queue_upcall(inode, &gss_new->msg); 451 int res = rpc_queue_upcall(inode, &gss_new->msg);
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, 485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
486 cred->cr_uid); 486 cred->cr_uid);
487 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); 487 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
488 if (IS_ERR(gss_msg) == -EAGAIN) { 488 if (PTR_ERR(gss_msg) == -EAGAIN) {
489 /* XXX: warning on the first, under the assumption we 489 /* XXX: warning on the first, under the assumption we
490 * shouldn't normally hit this case on a refresh. */ 490 * shouldn't normally hit this case on a refresh. */
491 warn_gssd(); 491 warn_gssd();
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 38829e20500b..154034b675bd 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task);
79 79
80static __be32 *rpc_encode_header(struct rpc_task *task); 80static __be32 *rpc_encode_header(struct rpc_task *task);
81static __be32 *rpc_verify_header(struct rpc_task *task); 81static __be32 *rpc_verify_header(struct rpc_task *task);
82static int rpc_ping(struct rpc_clnt *clnt, int flags); 82static int rpc_ping(struct rpc_clnt *clnt);
83 83
84static void rpc_register_client(struct rpc_clnt *clnt) 84static void rpc_register_client(struct rpc_clnt *clnt)
85{ 85{
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
340 return clnt; 340 return clnt;
341 341
342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
343 int err = rpc_ping(clnt, RPC_TASK_SOFT); 343 int err = rpc_ping(clnt);
344 if (err != 0) { 344 if (err != 0) {
345 rpc_shutdown_client(clnt); 345 rpc_shutdown_client(clnt);
346 return ERR_PTR(err); 346 return ERR_PTR(err);
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
528 clnt->cl_prog = program->number; 528 clnt->cl_prog = program->number;
529 clnt->cl_vers = version->number; 529 clnt->cl_vers = version->number;
530 clnt->cl_stats = program->stats; 530 clnt->cl_stats = program->stats;
531 err = rpc_ping(clnt, RPC_TASK_SOFT); 531 err = rpc_ping(clnt);
532 if (err != 0) { 532 if (err != 0) {
533 rpc_shutdown_client(clnt); 533 rpc_shutdown_client(clnt);
534 clnt = ERR_PTR(err); 534 clnt = ERR_PTR(err);
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task)
1060 goto retry_timeout; 1060 goto retry_timeout;
1061 case -EPFNOSUPPORT: 1061 case -EPFNOSUPPORT:
1062 /* server doesn't support any rpcbind version we know of */ 1062 /* server doesn't support any rpcbind version we know of */
1063 dprintk("RPC: %5u remote rpcbind service unavailable\n", 1063 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1064 task->tk_pid); 1064 task->tk_pid);
1065 break; 1065 break;
1066 case -EPROTONOSUPPORT: 1066 case -EPROTONOSUPPORT:
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task)
1069 task->tk_status = 0; 1069 task->tk_status = 0;
1070 task->tk_action = call_bind; 1070 task->tk_action = call_bind;
1071 return; 1071 return;
1072 case -ECONNREFUSED: /* connection problems */
1073 case -ECONNRESET:
1074 case -ENOTCONN:
1075 case -EHOSTDOWN:
1076 case -EHOSTUNREACH:
1077 case -ENETUNREACH:
1078 case -EPIPE:
1079 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1080 task->tk_pid, task->tk_status);
1081 if (!RPC_IS_SOFTCONN(task)) {
1082 rpc_delay(task, 5*HZ);
1083 goto retry_timeout;
1084 }
1085 status = task->tk_status;
1086 break;
1072 default: 1087 default:
1073 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 1088 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1074 task->tk_pid, -task->tk_status); 1089 task->tk_pid, -task->tk_status);
@@ -1180,11 +1195,25 @@ static void
1180call_transmit_status(struct rpc_task *task) 1195call_transmit_status(struct rpc_task *task)
1181{ 1196{
1182 task->tk_action = call_status; 1197 task->tk_action = call_status;
1198
1199 /*
1200 * Common case: success. Force the compiler to put this
1201 * test first.
1202 */
1203 if (task->tk_status == 0) {
1204 xprt_end_transmit(task);
1205 rpc_task_force_reencode(task);
1206 return;
1207 }
1208
1183 switch (task->tk_status) { 1209 switch (task->tk_status) {
1184 case -EAGAIN: 1210 case -EAGAIN:
1185 break; 1211 break;
1186 default: 1212 default:
1213 dprint_status(task);
1187 xprt_end_transmit(task); 1214 xprt_end_transmit(task);
1215 rpc_task_force_reencode(task);
1216 break;
1188 /* 1217 /*
1189 * Special cases: if we've been waiting on the 1218 * Special cases: if we've been waiting on the
1190 * socket's write_space() callback, or if the 1219 * socket's write_space() callback, or if the
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task)
1192 * then hold onto the transport lock. 1221 * then hold onto the transport lock.
1193 */ 1222 */
1194 case -ECONNREFUSED: 1223 case -ECONNREFUSED:
1195 case -ECONNRESET:
1196 case -ENOTCONN:
1197 case -EHOSTDOWN: 1224 case -EHOSTDOWN:
1198 case -EHOSTUNREACH: 1225 case -EHOSTUNREACH:
1199 case -ENETUNREACH: 1226 case -ENETUNREACH:
1227 if (RPC_IS_SOFTCONN(task)) {
1228 xprt_end_transmit(task);
1229 rpc_exit(task, task->tk_status);
1230 break;
1231 }
1232 case -ECONNRESET:
1233 case -ENOTCONN:
1200 case -EPIPE: 1234 case -EPIPE:
1201 rpc_task_force_reencode(task); 1235 rpc_task_force_reencode(task);
1202 } 1236 }
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task)
1346 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1380 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1347 task->tk_timeouts++; 1381 task->tk_timeouts++;
1348 1382
1383 if (RPC_IS_SOFTCONN(task)) {
1384 rpc_exit(task, -ETIMEDOUT);
1385 return;
1386 }
1349 if (RPC_IS_SOFT(task)) { 1387 if (RPC_IS_SOFT(task)) {
1350 if (clnt->cl_chatty) 1388 if (clnt->cl_chatty)
1351 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1389 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = {
1675 .p_decode = rpcproc_decode_null, 1713 .p_decode = rpcproc_decode_null,
1676}; 1714};
1677 1715
1678static int rpc_ping(struct rpc_clnt *clnt, int flags) 1716static int rpc_ping(struct rpc_clnt *clnt)
1679{ 1717{
1680 struct rpc_message msg = { 1718 struct rpc_message msg = {
1681 .rpc_proc = &rpcproc_null, 1719 .rpc_proc = &rpcproc_null,
1682 }; 1720 };
1683 int err; 1721 int err;
1684 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1722 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1685 err = rpc_call_sync(clnt, &msg, flags); 1723 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
1686 put_rpccred(msg.rpc_cred); 1724 put_rpccred(msg.rpc_cred);
1687 return err; 1725 return err;
1688} 1726}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 830faf4d9997..3e3772d8eb92 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -20,6 +20,7 @@
20#include <linux/in6.h> 20#include <linux/in6.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/mutex.h>
23#include <net/ipv6.h> 24#include <net/ipv6.h>
24 25
25#include <linux/sunrpc/clnt.h> 26#include <linux/sunrpc/clnt.h>
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *);
110static void rpcb_map_release(void *data); 111static void rpcb_map_release(void *data);
111static struct rpc_program rpcb_program; 112static struct rpc_program rpcb_program;
112 113
114static struct rpc_clnt * rpcb_local_clnt;
115static struct rpc_clnt * rpcb_local_clnt4;
116
113struct rpcbind_args { 117struct rpcbind_args {
114 struct rpc_xprt * r_xprt; 118 struct rpc_xprt * r_xprt;
115 119
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = {
163 .sin_port = htons(RPCBIND_PORT), 167 .sin_port = htons(RPCBIND_PORT),
164}; 168};
165 169
166static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr, 170static DEFINE_MUTEX(rpcb_create_local_mutex);
167 size_t addrlen, u32 version) 171
172/*
173 * Returns zero on success, otherwise a negative errno value
174 * is returned.
175 */
176static int rpcb_create_local(void)
168{ 177{
169 struct rpc_create_args args = { 178 struct rpc_create_args args = {
170 .protocol = XPRT_TRANSPORT_UDP, 179 .protocol = XPRT_TRANSPORT_TCP,
171 .address = addr, 180 .address = (struct sockaddr *)&rpcb_inaddr_loopback,
172 .addrsize = addrlen, 181 .addrsize = sizeof(rpcb_inaddr_loopback),
173 .servername = "localhost", 182 .servername = "localhost",
174 .program = &rpcb_program, 183 .program = &rpcb_program,
175 .version = version, 184 .version = RPCBVERS_2,
176 .authflavor = RPC_AUTH_UNIX, 185 .authflavor = RPC_AUTH_UNIX,
177 .flags = RPC_CLNT_CREATE_NOPING, 186 .flags = RPC_CLNT_CREATE_NOPING,
178 }; 187 };
188 struct rpc_clnt *clnt, *clnt4;
189 int result = 0;
190
191 if (rpcb_local_clnt)
192 return result;
193
194 mutex_lock(&rpcb_create_local_mutex);
195 if (rpcb_local_clnt)
196 goto out;
197
198 clnt = rpc_create(&args);
199 if (IS_ERR(clnt)) {
200 dprintk("RPC: failed to create local rpcbind "
201 "client (errno %ld).\n", PTR_ERR(clnt));
202 result = -PTR_ERR(clnt);
203 goto out;
204 }
179 205
180 return rpc_create(&args); 206 /*
207 * This results in an RPC ping. On systems running portmapper,
208 * the v4 ping will fail. Proceed anyway, but disallow rpcb
209 * v4 upcalls.
210 */
211 clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
212 if (IS_ERR(clnt4)) {
213 dprintk("RPC: failed to create local rpcbind v4 "
214 "cleint (errno %ld).\n", PTR_ERR(clnt4));
215 clnt4 = NULL;
216 }
217
218 rpcb_local_clnt = clnt;
219 rpcb_local_clnt4 = clnt4;
220
221out:
222 mutex_unlock(&rpcb_create_local_mutex);
223 return result;
181} 224}
182 225
183static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, 226static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
209 return rpc_create(&args); 252 return rpc_create(&args);
210} 253}
211 254
212static int rpcb_register_call(const u32 version, struct rpc_message *msg) 255static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
213{ 256{
214 struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback;
215 size_t addrlen = sizeof(rpcb_inaddr_loopback);
216 struct rpc_clnt *rpcb_clnt;
217 int result, error = 0; 257 int result, error = 0;
218 258
219 msg->rpc_resp = &result; 259 msg->rpc_resp = &result;
220 260
221 rpcb_clnt = rpcb_create_local(addr, addrlen, version); 261 error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
222 if (!IS_ERR(rpcb_clnt)) {
223 error = rpc_call_sync(rpcb_clnt, msg, 0);
224 rpc_shutdown_client(rpcb_clnt);
225 } else
226 error = PTR_ERR(rpcb_clnt);
227
228 if (error < 0) { 262 if (error < 0) {
229 dprintk("RPC: failed to contact local rpcbind " 263 dprintk("RPC: failed to contact local rpcbind "
230 "server (errno %d).\n", -error); 264 "server (errno %d).\n", -error);
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
279 struct rpc_message msg = { 313 struct rpc_message msg = {
280 .rpc_argp = &map, 314 .rpc_argp = &map,
281 }; 315 };
316 int error;
317
318 error = rpcb_create_local();
319 if (error)
320 return error;
282 321
283 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 322 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
284 "rpcbind\n", (port ? "" : "un"), 323 "rpcbind\n", (port ? "" : "un"),
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
288 if (port) 327 if (port)
289 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; 328 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
290 329
291 return rpcb_register_call(RPCBVERS_2, &msg); 330 return rpcb_register_call(rpcb_local_clnt, &msg);
292} 331}
293 332
294/* 333/*
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
313 if (port) 352 if (port)
314 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 353 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
315 354
316 result = rpcb_register_call(RPCBVERS_4, msg); 355 result = rpcb_register_call(rpcb_local_clnt4, msg);
317 kfree(map->r_addr); 356 kfree(map->r_addr);
318 return result; 357 return result;
319} 358}
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
340 if (port) 379 if (port)
341 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 380 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
342 381
343 result = rpcb_register_call(RPCBVERS_4, msg); 382 result = rpcb_register_call(rpcb_local_clnt4, msg);
344 kfree(map->r_addr); 383 kfree(map->r_addr);
345 return result; 384 return result;
346} 385}
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
356 map->r_addr = ""; 395 map->r_addr = "";
357 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 396 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
358 397
359 return rpcb_register_call(RPCBVERS_4, msg); 398 return rpcb_register_call(rpcb_local_clnt4, msg);
360} 399}
361 400
362/** 401/**
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version,
414 struct rpc_message msg = { 453 struct rpc_message msg = {
415 .rpc_argp = &map, 454 .rpc_argp = &map,
416 }; 455 };
456 int error;
457
458 error = rpcb_create_local();
459 if (error)
460 return error;
461 if (rpcb_local_clnt4 == NULL)
462 return -EPROTONOSUPPORT;
417 463
418 if (address == NULL) 464 if (address == NULL)
419 return rpcb_unregister_all_protofamilies(&msg); 465 return rpcb_unregister_all_protofamilies(&msg);
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
491 .rpc_message = &msg, 537 .rpc_message = &msg,
492 .callback_ops = &rpcb_getport_ops, 538 .callback_ops = &rpcb_getport_ops,
493 .callback_data = map, 539 .callback_data = map,
494 .flags = RPC_TASK_ASYNC, 540 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
495 }; 541 };
496 542
497 return rpc_run_task(&task_setup_data); 543 return rpc_run_task(&task_setup_data);
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = {
1027 .version = rpcb_version, 1073 .version = rpcb_version,
1028 .stats = &rpcb_stats, 1074 .stats = &rpcb_stats,
1029}; 1075};
1076
1077/**
1078 * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
1079 *
1080 */
1081void cleanup_rpcb_clnt(void)
1082{
1083 if (rpcb_local_clnt4)
1084 rpc_shutdown_client(rpcb_local_clnt4);
1085 if (rpcb_local_clnt)
1086 rpc_shutdown_client(rpcb_local_clnt);
1087}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8cce92189019..f438347d817b 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,8 @@
24 24
25extern struct cache_detail ip_map_cache, unix_gid_cache; 25extern struct cache_detail ip_map_cache, unix_gid_cache;
26 26
27extern void cleanup_rpcb_clnt(void);
28
27static int __init 29static int __init
28init_sunrpc(void) 30init_sunrpc(void)
29{ 31{
@@ -53,6 +55,7 @@ out:
53static void __exit 55static void __exit
54cleanup_sunrpc(void) 56cleanup_sunrpc(void)
55{ 57{
58 cleanup_rpcb_clnt();
56 rpcauth_remove_module(); 59 rpcauth_remove_module();
57 cleanup_socket_xprt(); 60 cleanup_socket_xprt();
58 svc_cleanup_xprt_sock(); 61 svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fd46d42afa89..469de292c23c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task)
700 } 700 }
701 if (!xprt_lock_write(xprt, task)) 701 if (!xprt_lock_write(xprt, task))
702 return; 702 return;
703
704 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
705 xprt->ops->close(xprt);
706
703 if (xprt_connected(xprt)) 707 if (xprt_connected(xprt))
704 xprt_release_write(xprt, task); 708 xprt_release_write(xprt, task);
705 else { 709 else {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04732d09013e..3d739e5d15d8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task)
2019 if (xprt_test_and_set_connecting(xprt)) 2019 if (xprt_test_and_set_connecting(xprt))
2020 return; 2020 return;
2021 2021
2022 if (transport->sock != NULL) { 2022 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2023 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2023 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2024 "seconds\n", 2024 "seconds\n",
2025 xprt, xprt->reestablish_timeout / HZ); 2025 xprt, xprt->reestablish_timeout / HZ);