aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v32/mm/mmu.S2
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/parisc/lib/fixup.S8
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S2
-rw-r--r--arch/sparc/kernel/nmi.c6
-rw-r--r--arch/sparc/kernel/rtrap_64.S8
-rw-r--r--arch/x86/include/asm/percpu.h37
-rw-r--r--arch/x86/include/asm/system.h8
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/xen/xen-asm_32.S4
-rw-r--r--include/asm-generic/percpu.h12
-rw-r--r--include/linux/percpu-defs.h18
-rw-r--r--include/linux/percpu.h5
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_functions_graph.c4
24 files changed, 80 insertions, 90 deletions
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 1e7cac23e25f..a3ea7e9fe43b 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -835,8 +835,8 @@ ENDPROC(_resume)
835 835
836ENTRY(_ret_from_exception) 836ENTRY(_ret_from_exception)
837#ifdef CONFIG_IPIPE 837#ifdef CONFIG_IPIPE
838 p2.l = _per_cpu__ipipe_percpu_domain; 838 p2.l = _ipipe_percpu_domain;
839 p2.h = _per_cpu__ipipe_percpu_domain; 839 p2.h = _ipipe_percpu_domain;
840 r0.l = _ipipe_root; 840 r0.l = _ipipe_root;
841 r0.h = _ipipe_root; 841 r0.h = _ipipe_root;
842 r2 = [p2]; 842 r2 = [p2];
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 2c18d08cd913..c52bef39e250 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -358,7 +358,7 @@ mmu_bus_fault:
3581: btstq 12, $r1 ; Refill? 3581: btstq 12, $r1 ; Refill?
359 bpl 2f 359 bpl 2f
360 lsrq 24, $r1 ; Get PGD index (bit 24-31) 360 lsrq 24, $r1 ; Get PGD index (bit 24-31)
361 move.d [per_cpu__current_pgd], $r0 ; PGD for the current process 361 move.d [current_pgd], $r0 ; PGD for the current process
362 move.d [$r0+$r1.d], $r0 ; Get PMD 362 move.d [$r0+$r1.d], $r0 ; Get PMD
363 beq 2f 363 beq 2f
364 nop 364 nop
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 2238d154bde3..f125d912e140 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,7 +115,7 @@
115#ifdef CONFIG_SMP 115#ifdef CONFIG_SMP
116 move $s7, $acr ; PGD 116 move $s7, $acr ; PGD
117#else 117#else
118 move.d per_cpu__current_pgd, $acr ; PGD 118 move.d current_pgd, $acr ; PGD
119#endif 119#endif
120 ; Look up PMD in PGD 120 ; Look up PMD in PGD
121 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31) 121 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf46534dd2..f7c00a5e0e2b 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE 9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10 10
11#ifdef __ASSEMBLY__ 11#ifdef __ASSEMBLY__
12# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ 12# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
13#else /* !__ASSEMBLY__ */ 13#else /* !__ASSEMBLY__ */
14 14
15 15
@@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly 39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
40 * more efficient. 40 * more efficient.
41 */ 41 */
42#define __ia64_per_cpu_var(var) per_cpu__##var 42#define __ia64_per_cpu_var(var) var
43 43
44#include <asm-generic/percpu.h> 44#include <asm-generic/percpu.h>
45 45
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 461b99902bf6..7f4a0ed24152 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
30#endif 30#endif
31 31
32#include <asm/processor.h> 32#include <asm/processor.h>
33EXPORT_SYMBOL(per_cpu__ia64_cpu_info); 33EXPORT_SYMBOL(ia64_cpu_info);
34#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35EXPORT_SYMBOL(local_per_cpu_offset);
36#endif 36#endif
37 37
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 19c4b2195dce..8d586d1e2515 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
459 cpu = 0; 459 cpu = 0;
460 node = node_cpuid[cpu].nid; 460 node = node_cpuid[cpu].nid;
461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 461 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
462 ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); 462 ((char *)&ia64_cpu_info - __per_cpu_start));
463 cpu0_cpu_info->node_data = mem_data[node].node_data; 463 cpu0_cpu_info->node_data = mem_data[node].node_data;
464 } 464 }
465#endif /* CONFIG_SMP */ 465#endif /* CONFIG_SMP */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd232640..ec89f2ad0fe1 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
21 * places 21 * places
22 */ 22 */
23 23
24#define PER_CPU(var) per_cpu__##var 24#define PER_CPU(var) var
25 25
26# ifndef __ASSEMBLY__ 26# ifndef __ASSEMBLY__
27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ 27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index d172d4245cdc..f8c45cc2947d 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -36,8 +36,8 @@
36#endif 36#endif
37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */ 37 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
38 LDREGX \t2(\t1),\t2 38 LDREGX \t2(\t1),\t2
39 addil LT%per_cpu__exception_data,%r27 39 addil LT%exception_data,%r27
40 LDREG RT%per_cpu__exception_data(%r1),\t1 40 LDREG RT%exception_data(%r1),\t1
41 /* t1 = &__get_cpu_var(exception_data) */ 41 /* t1 = &__get_cpu_var(exception_data) */
42 add,l \t1,\t2,\t1 42 add,l \t1,\t2,\t1
43 /* t1 = t1->fault_ip */ 43 /* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
46#else 46#else
47 .macro get_fault_ip t1 t2 47 .macro get_fault_ip t1 t2
48 /* t1 = &__get_cpu_var(exception_data) */ 48 /* t1 = &__get_cpu_var(exception_data) */
49 addil LT%per_cpu__exception_data,%r27 49 addil LT%exception_data,%r27
50 LDREG RT%per_cpu__exception_data(%r1),\t2 50 LDREG RT%exception_data(%r1),\t2
51 /* t1 = t2->fault_ip */ 51 /* t1 = t2->fault_ip */
52 LDREG EXCDATA_IP(\t2), \t1 52 LDREG EXCDATA_IP(\t2), \t1
53 .endm 53 .endm
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index c1427b3634ec..580f789cae7f 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -55,7 +55,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
55 /* calculate address of stat structure r4 = opcode */ \ 55 /* calculate address of stat structure r4 = opcode */ \
56 srdi r4,r4,2; /* index into array */ \ 56 srdi r4,r4,2; /* index into array */ \
57 mulli r4,r4,HCALL_STAT_SIZE; \ 57 mulli r4,r4,HCALL_STAT_SIZE; \
58 LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ 58 LOAD_REG_ADDR(r7, hcall_stats); \
59 add r4,r4,r7; \ 59 add r4,r4,r7; \
60 ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ 60 ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \
61 add r4,r4,r7; \ 61 add r4,r4,r7; \
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index f30f4a1ead23..2ad288ff99a4 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
112 touched = 1; 112 touched = 1;
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 __this_cpu_inc(per_cpu_var(alert_counter)); 115 __this_cpu_inc(alert_counter);
116 if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz) 116 if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
120 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
121 __this_cpu_write(per_cpu_var(alert_counter), 0); 121 __this_cpu_write(alert_counter, 0);
122 } 122 }
123 if (__get_cpu_var(wd_enabled)) { 123 if (__get_cpu_var(wd_enabled)) {
124 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4d117c..1ddec403f512 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -149,11 +149,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
149rtrap_irq: 149rtrap_irq:
150rtrap: 150rtrap:
151#ifndef CONFIG_SMP 151#ifndef CONFIG_SMP
152 sethi %hi(per_cpu____cpu_data), %l0 152 sethi %hi(__cpu_data), %l0
153 lduw [%l0 + %lo(per_cpu____cpu_data)], %l1 153 lduw [%l0 + %lo(__cpu_data)], %l1
154#else 154#else
155 sethi %hi(per_cpu____cpu_data), %l0 155 sethi %hi(__cpu_data), %l0
156 or %l0, %lo(per_cpu____cpu_data), %l0 156 or %l0, %lo(__cpu_data), %l0
157 lduw [%l0 + %g5], %l1 157 lduw [%l0 + %g5], %l1
158#endif 158#endif
159 cmp %l1, 0 159 cmp %l1, 0
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196b78ac..4c170ccc72ed 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
25 */ 25 */
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27#define PER_CPU(var, reg) \ 27#define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ 28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea per_cpu__##var(reg), reg 29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var 30#define PER_CPU_VAR(var) %__percpu_seg:var
31#else /* ! SMP */ 31#else /* ! SMP */
32#define PER_CPU(var, reg) \ 32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 __percpu_mov_op $per_cpu__##var, reg 33#define PER_CPU_VAR(var) var
34#define PER_CPU_VAR(var) per_cpu__##var
35#endif /* SMP */ 34#endif /* SMP */
36 35
37#ifdef CONFIG_X86_64_SMP 36#ifdef CONFIG_X86_64_SMP
38#define INIT_PER_CPU_VAR(var) init_per_cpu__##var 37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39#else 38#else
40#define INIT_PER_CPU_VAR(var) per_cpu__##var 39#define INIT_PER_CPU_VAR(var) var
41#endif 40#endif
42 41
43#else /* ...!ASSEMBLY */ 42#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
60 * There also must be an entry in vmlinux_64.lds.S 59 * There also must be an entry in vmlinux_64.lds.S
61 */ 60 */
62#define DECLARE_INIT_PER_CPU(var) \ 61#define DECLARE_INIT_PER_CPU(var) \
63 extern typeof(per_cpu_var(var)) init_per_cpu_var(var) 62 extern typeof(var) init_per_cpu_var(var)
64 63
65#ifdef CONFIG_X86_64_SMP 64#ifdef CONFIG_X86_64_SMP
66#define init_per_cpu_var(var) init_per_cpu__##var 65#define init_per_cpu_var(var) init_per_cpu__##var
67#else 66#else
68#define init_per_cpu_var(var) per_cpu_var(var) 67#define init_per_cpu_var(var) var
69#endif 68#endif
70 69
71/* For arch-specific code, we can use direct single-insn ops (they 70/* For arch-specific code, we can use direct single-insn ops (they
@@ -142,16 +141,14 @@ do { \
142 * per-thread variables implemented as per-cpu variables and thus 141 * per-thread variables implemented as per-cpu variables and thus
143 * stable for the duration of the respective task. 142 * stable for the duration of the respective task.
144 */ 143 */
145#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ 144#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
146 "m" (per_cpu__##var)) 145#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ 146#define percpu_write(var, val) percpu_to_op("mov", var, val)
148 "p" (&per_cpu__##var)) 147#define percpu_add(var, val) percpu_to_op("add", var, val)
149#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 148#define percpu_sub(var, val) percpu_to_op("sub", var, val)
150#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 149#define percpu_and(var, val) percpu_to_op("and", var, val)
151#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 150#define percpu_or(var, val) percpu_to_op("or", var, val)
152#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) 151#define percpu_xor(var, val) percpu_to_op("xor", var, val)
153#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
154#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
155 152
156#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 153#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
157#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 154#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -236,7 +233,7 @@ do { \
236({ \ 233({ \
237 int old__; \ 234 int old__; \
238 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ 235 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
239 : "=r" (old__), "+m" (per_cpu__##var) \ 236 : "=r" (old__), "+m" (var) \
240 : "dIr" (bit)); \ 237 : "dIr" (bit)); \
241 old__; \ 238 old__; \
242}) 239})
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index f08f97374892..de10c19d9558 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \ 33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) 34 , [stack_canary] "=m" (stack_canary.canary)
35#define __switch_canary_iparam \ 35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */ 37#else /* CC_STACKPROTECTOR */
@@ -113,7 +113,7 @@ do { \
113 "movq %P[task_canary](%%rsi),%%r8\n\t" \ 113 "movq %P[task_canary](%%rsi),%%r8\n\t" \
114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t" 114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
115#define __switch_canary_oparam \ 115#define __switch_canary_oparam \
116 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) 116 , [gs_canary] "=m" (irq_stack_union.stack_canary)
117#define __switch_canary_iparam \ 117#define __switch_canary_iparam \
118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
119#else /* CC_STACKPROTECTOR */ 119#else /* CC_STACKPROTECTOR */
@@ -134,7 +134,7 @@ do { \
134 __switch_canary \ 134 __switch_canary \
135 "movq %P[thread_info](%%rsi),%%r8\n\t" \ 135 "movq %P[thread_info](%%rsi),%%r8\n\t" \
136 "movq %%rax,%%rdi\n\t" \ 136 "movq %%rax,%%rdi\n\t" \
137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ 137 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
138 "jnz ret_from_fork\n\t" \ 138 "jnz ret_from_fork\n\t" \
139 RESTORE_CONTEXT \ 139 RESTORE_CONTEXT \
140 : "=a" (last) \ 140 : "=a" (last) \
@@ -144,7 +144,7 @@ do { \
144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \ 144 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
145 [_tif_fork] "i" (_TIF_FORK), \ 145 [_tif_fork] "i" (_TIF_FORK), \
146 [thread_info] "i" (offsetof(struct task_struct, stack)), \ 146 [thread_info] "i" (offsetof(struct task_struct, stack)), \
147 [current_task] "m" (per_cpu_var(current_task)) \ 147 [current_task] "m" (current_task) \
148 __switch_canary_iparam \ 148 __switch_canary_iparam \
149 : "memory", "cc" __EXTRA_CLOBBER) 149 : "memory", "cc" __EXTRA_CLOBBER)
150#endif 150#endif
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index e631cc4416f7..45404379d173 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -437,8 +437,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
437 * Ayiee, looks like this CPU is stuck ... 437 * Ayiee, looks like this CPU is stuck ...
438 * wait a few IRQs (5 seconds) before doing the oops ... 438 * wait a few IRQs (5 seconds) before doing the oops ...
439 */ 439 */
440 __this_cpu_inc(per_cpu_var(alert_counter)); 440 __this_cpu_inc(alert_counter);
441 if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) 441 if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
442 /* 442 /*
443 * die_nmi will return ONLY if NOTIFY_STOP happens.. 443 * die_nmi will return ONLY if NOTIFY_STOP happens..
444 */ 444 */
@@ -446,7 +446,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
446 regs, panic_on_timeout); 446 regs, panic_on_timeout);
447 } else { 447 } else {
448 __get_cpu_var(last_irq_sum) = sum; 448 __get_cpu_var(last_irq_sum) = sum;
449 __this_cpu_write(per_cpu_var(alert_counter), 0); 449 __this_cpu_write(alert_counter, 0);
450 } 450 }
451 451
452 /* see if the nmi watchdog went off */ 452 /* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 050c278481b1..fd39eaf83b84 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -438,8 +438,8 @@ is386: movl $2,%ecx # set MP
438 */ 438 */
439 cmpb $0,ready 439 cmpb $0,ready
440 jne 1f 440 jne 1f
441 movl $per_cpu__gdt_page,%eax 441 movl $gdt_page,%eax
442 movl $per_cpu__stack_canary,%ecx 442 movl $stack_canary,%ecx
443 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 443 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
444 shrl $16, %ecx 444 shrl $16, %ecx
445 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 445 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -702,7 +702,7 @@ idt_descr:
702 .word 0 # 32 bit align gdt_desc.address 702 .word 0 # 32 bit align gdt_desc.address
703ENTRY(early_gdt_descr) 703ENTRY(early_gdt_descr)
704 .word GDT_ENTRIES*8-1 704 .word GDT_ENTRIES*8-1
705 .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 705 .long gdt_page /* Overwritten for secondary CPUs */
706 706
707/* 707/*
708 * The boot_gdt must mirror the equivalent in setup.S and is 708 * The boot_gdt must mirror the equivalent in setup.S and is
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 92929fb3f9fa..ecb92717c412 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -312,7 +312,7 @@ SECTIONS
312 * Per-cpu symbols which need to be offset from __per_cpu_load 312 * Per-cpu symbols which need to be offset from __per_cpu_load
313 * for the boot processor. 313 * for the boot processor.
314 */ 314 */
315#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load 315#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
316INIT_PER_CPU(gdt_page); 316INIT_PER_CPU(gdt_page);
317INIT_PER_CPU(irq_stack_union); 317INIT_PER_CPU(irq_stack_union);
318 318
@@ -323,7 +323,7 @@ INIT_PER_CPU(irq_stack_union);
323 "kernel image bigger than KERNEL_IMAGE_SIZE"); 323 "kernel image bigger than KERNEL_IMAGE_SIZE");
324 324
325#ifdef CONFIG_SMP 325#ifdef CONFIG_SMP
326. = ASSERT((per_cpu__irq_stack_union == 0), 326. = ASSERT((irq_stack_union == 0),
327 "irq_stack_union is not at start of per-cpu area"); 327 "irq_stack_union is not at start of per-cpu area");
328#endif 328#endif
329 329
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 88e15deb8b82..22a2093b5862 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -90,9 +90,9 @@ ENTRY(xen_iret)
90 GET_THREAD_INFO(%eax) 90 GET_THREAD_INFO(%eax)
91 movl TI_cpu(%eax), %eax 91 movl TI_cpu(%eax), %eax
92 movl __per_cpu_offset(,%eax,4), %eax 92 movl __per_cpu_offset(,%eax,4), %eax
93 mov per_cpu__xen_vcpu(%eax), %eax 93 mov xen_vcpu(%eax), %eax
94#else 94#else
95 movl per_cpu__xen_vcpu, %eax 95 movl xen_vcpu, %eax
96#endif 96#endif
97 97
98 /* check IF state we're restoring */ 98 /* check IF state we're restoring */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 8087b90d4673..ca6f0491412b 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -50,11 +50,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
50 * offset. 50 * offset.
51 */ 51 */
52#define per_cpu(var, cpu) \ 52#define per_cpu(var, cpu) \
53 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) 53 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
54#define __get_cpu_var(var) \ 54#define __get_cpu_var(var) \
55 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) 55 (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
56#define __raw_get_cpu_var(var) \ 56#define __raw_get_cpu_var(var) \
57 (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) 57 (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
58 58
59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 59#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) 60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
@@ -66,9 +66,9 @@ extern void setup_per_cpu_areas(void);
66 66
67#else /* ! SMP */ 67#else /* ! SMP */
68 68
69#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) 69#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
70#define __get_cpu_var(var) per_cpu_var(var) 70#define __get_cpu_var(var) (var)
71#define __raw_get_cpu_var(var) per_cpu_var(var) 71#define __raw_get_cpu_var(var) (var)
72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 72#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) 73#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
74 74
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 5a5d6ce4bd55..ee99f6c2cdcd 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -2,12 +2,6 @@
2#define _LINUX_PERCPU_DEFS_H 2#define _LINUX_PERCPU_DEFS_H
3 3
4/* 4/*
5 * Determine the real variable name from the name visible in the
6 * kernel sources.
7 */
8#define per_cpu_var(var) per_cpu__##var
9
10/*
11 * Base implementations of per-CPU variable declarations and definitions, where 5 * Base implementations of per-CPU variable declarations and definitions, where
12 * the section in which the variable is to be placed is provided by the 6 * the section in which the variable is to be placed is provided by the
13 * 'sec' argument. This may be used to affect the parameters governing the 7 * 'sec' argument. This may be used to affect the parameters governing the
@@ -56,24 +50,24 @@
56 */ 50 */
57#define DECLARE_PER_CPU_SECTION(type, name, sec) \ 51#define DECLARE_PER_CPU_SECTION(type, name, sec) \
58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 52 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
59 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 53 extern __PCPU_ATTRS(sec) __typeof__(type) name
60 54
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 55#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 56 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 57 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 58 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 59 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
66 __typeof__(type) per_cpu__##name 60 __typeof__(type) name
67#else 61#else
68/* 62/*
69 * Normal declaration and definition macros. 63 * Normal declaration and definition macros.
70 */ 64 */
71#define DECLARE_PER_CPU_SECTION(type, name, sec) \ 65#define DECLARE_PER_CPU_SECTION(type, name, sec) \
72 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name 66 extern __PCPU_ATTRS(sec) __typeof__(type) name
73 67
74#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 68#define DEFINE_PER_CPU_SECTION(type, name, sec) \
75 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ 69 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
76 __typeof__(type) per_cpu__##name 70 __typeof__(type) name
77#endif 71#endif
78 72
79/* 73/*
@@ -137,8 +131,8 @@
137/* 131/*
138 * Intermodule exports for per-CPU variables. 132 * Intermodule exports for per-CPU variables.
139 */ 133 */
140#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 134#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
141#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 135#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
142 136
143 137
144#endif /* _LINUX_PERCPU_DEFS_H */ 138#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 522f421ec213..e12410e55e05 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -182,7 +182,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
182#ifndef percpu_read 182#ifndef percpu_read
183# define percpu_read(var) \ 183# define percpu_read(var) \
184 ({ \ 184 ({ \
185 typeof(per_cpu_var(var)) __tmp_var__; \ 185 typeof(var) __tmp_var__; \
186 __tmp_var__ = get_cpu_var(var); \ 186 __tmp_var__ = get_cpu_var(var); \
187 put_cpu_var(var); \ 187 put_cpu_var(var); \
188 __tmp_var__; \ 188 __tmp_var__; \
@@ -253,8 +253,7 @@ do { \
253 253
254/* 254/*
255 * Optimized manipulation for memory allocated through the per cpu 255 * Optimized manipulation for memory allocated through the per cpu
256 * allocator or for addresses of per cpu variables (can be determined 256 * allocator or for addresses of per cpu variables.
257 * using per_cpu_var(xx).
258 * 257 *
259 * These operation guarantee exclusivity of access for other operations 258 * These operation guarantee exclusivity of access for other operations
260 * on the *same* processor. The assumption is that per cpu data is only 259 * on the *same* processor. The assumption is that per cpu data is only
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d85889710f9b..3e489fda11a1 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -76,22 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
76 76
77static inline void __count_vm_event(enum vm_event_item item) 77static inline void __count_vm_event(enum vm_event_item item)
78{ 78{
79 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 79 __this_cpu_inc(vm_event_states.event[item]);
80} 80}
81 81
82static inline void count_vm_event(enum vm_event_item item) 82static inline void count_vm_event(enum vm_event_item item)
83{ 83{
84 this_cpu_inc(per_cpu_var(vm_event_states).event[item]); 84 this_cpu_inc(vm_event_states.event[item]);
85} 85}
86 86
87static inline void __count_vm_events(enum vm_event_item item, long delta) 87static inline void __count_vm_events(enum vm_event_item item, long delta)
88{ 88{
89 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 89 __this_cpu_add(vm_event_states.event[item], delta);
90} 90}
91 91
92static inline void count_vm_events(enum vm_event_item item, long delta) 92static inline void count_vm_events(enum vm_event_item item, long delta)
93{ 93{
94 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); 94 this_cpu_add(vm_event_states.event[item], delta);
95} 95}
96 96
97extern void all_vm_events(unsigned long *); 97extern void all_vm_events(unsigned long *);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 178967b6434e..e339ab349121 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -731,13 +731,13 @@ static void rcu_torture_timer(unsigned long unused)
731 /* Should not happen, but... */ 731 /* Should not happen, but... */
732 pipe_count = RCU_TORTURE_PIPE_LEN; 732 pipe_count = RCU_TORTURE_PIPE_LEN;
733 } 733 }
734 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 734 __this_cpu_inc(rcu_torture_count[pipe_count]);
735 completed = cur_ops->completed() - completed; 735 completed = cur_ops->completed() - completed;
736 if (completed > RCU_TORTURE_PIPE_LEN) { 736 if (completed > RCU_TORTURE_PIPE_LEN) {
737 /* Should not happen, but... */ 737 /* Should not happen, but... */
738 completed = RCU_TORTURE_PIPE_LEN; 738 completed = RCU_TORTURE_PIPE_LEN;
739 } 739 }
740 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 740 __this_cpu_inc(rcu_torture_batch[completed]);
741 preempt_enable(); 741 preempt_enable();
742 cur_ops->readunlock(idx); 742 cur_ops->readunlock(idx);
743} 743}
@@ -786,13 +786,13 @@ rcu_torture_reader(void *arg)
786 /* Should not happen, but... */ 786 /* Should not happen, but... */
787 pipe_count = RCU_TORTURE_PIPE_LEN; 787 pipe_count = RCU_TORTURE_PIPE_LEN;
788 } 788 }
789 __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); 789 __this_cpu_inc(rcu_torture_count[pipe_count]);
790 completed = cur_ops->completed() - completed; 790 completed = cur_ops->completed() - completed;
791 if (completed > RCU_TORTURE_PIPE_LEN) { 791 if (completed > RCU_TORTURE_PIPE_LEN) {
792 /* Should not happen, but... */ 792 /* Should not happen, but... */
793 completed = RCU_TORTURE_PIPE_LEN; 793 completed = RCU_TORTURE_PIPE_LEN;
794 } 794 }
795 __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); 795 __this_cpu_inc(rcu_torture_batch[completed]);
796 preempt_enable(); 796 preempt_enable();
797 cur_ops->readunlock(idx); 797 cur_ops->readunlock(idx);
798 schedule(); 798 schedule();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 85a5ed70b5b2..b808177af816 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -91,12 +91,12 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
91static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
92{ 92{
93 preempt_disable(); 93 preempt_disable();
94 __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); 94 __this_cpu_inc(ftrace_cpu_disabled);
95} 95}
96 96
97static inline void ftrace_enable_cpu(void) 97static inline void ftrace_enable_cpu(void)
98{ 98{
99 __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); 99 __this_cpu_dec(ftrace_cpu_disabled);
100 preempt_enable(); 100 preempt_enable();
101} 101}
102 102
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
1085 struct ftrace_entry *entry; 1085 struct ftrace_entry *entry;
1086 1086
1087 /* If we are reading the ring buffer, don't trace */ 1087 /* If we are reading the ring buffer, don't trace */
1088 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 1088 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1089 return; 1089 return;
1090 1090
1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1091 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 90a6daa10962..8614e3241ff8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -176,7 +176,7 @@ static int __trace_graph_entry(struct trace_array *tr,
176 struct ring_buffer *buffer = tr->buffer; 176 struct ring_buffer *buffer = tr->buffer;
177 struct ftrace_graph_ent_entry *entry; 177 struct ftrace_graph_ent_entry *entry;
178 178
179 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 179 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
180 return 0; 180 return 0;
181 181
182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -240,7 +240,7 @@ static void __trace_graph_return(struct trace_array *tr,
240 struct ring_buffer *buffer = tr->buffer; 240 struct ring_buffer *buffer = tr->buffer;
241 struct ftrace_graph_ret_entry *entry; 241 struct ftrace_graph_ret_entry *entry;
242 242
243 if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) 243 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
244 return; 244 return;
245 245
246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,