diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
commit | 72eb6a791459c87a0340318840bb3bd9252b627b (patch) | |
tree | 3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 | |
parent | 23d69b09b78c4876e134f104a3814c30747c53f1 (diff) | |
parent | 55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff) |
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
gameport: use this_cpu_read instead of lookup
x86: udelay: Use this_cpu_read to avoid address calculation
x86: Use this_cpu_inc_return for nmi counter
x86: Replace uses of current_cpu_data with this_cpu ops
x86: Use this_cpu_ops to optimize code
vmstat: User per cpu atomics to avoid interrupt disable / enable
irq_work: Use per cpu atomics instead of regular atomics
cpuops: Use cmpxchg for xchg to avoid lock semantics
x86: this_cpu_cmpxchg and this_cpu_xchg operations
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
percpu,x86: relocate this_cpu_add_return() and friends
connector: Use this_cpu operations
xen: Use this_cpu_inc_return
taskstats: Use this_cpu_ops
random: Use this_cpu_inc_return
fs: Use this_cpu_inc_return in buffer.c
highmem: Use this_cpu_xx_return() operations
vmstat: Use this_cpu_inc_return for vm statistics
x86: Support for this_cpu_add, sub, dec, inc_return
percpu: Generic support for this_cpu_add, sub, dec, inc_return
...
Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.
62 files changed, 703 insertions, 275 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 78d317c545b9..23d04363a195 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4653,6 +4653,16 @@ S: Maintained | |||
4653 | F: crypto/pcrypt.c | 4653 | F: crypto/pcrypt.c |
4654 | F: include/crypto/pcrypt.h | 4654 | F: include/crypto/pcrypt.h |
4655 | 4655 | ||
4656 | PER-CPU MEMORY ALLOCATOR | ||
4657 | M: Tejun Heo <tj@kernel.org> | ||
4658 | M: Christoph Lameter <cl@linux-foundation.org> | ||
4659 | L: linux-kernel@vger.kernel.org | ||
4660 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git | ||
4661 | S: Maintained | ||
4662 | F: include/linux/percpu*.h | ||
4663 | F: mm/percpu*.c | ||
4664 | F: arch/*/include/asm/percpu.h | ||
4665 | |||
4656 | PER-TASK DELAY ACCOUNTING | 4666 | PER-TASK DELAY ACCOUNTING |
4657 | M: Balbir Singh <balbir@linux.vnet.ibm.com> | 4667 | M: Balbir Singh <balbir@linux.vnet.ibm.com> |
4658 | S: Maintained | 4668 | S: Maintained |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2ac9069890cd..15588a0ef466 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT | |||
310 | config X86_CMPXCHG | 310 | config X86_CMPXCHG |
311 | def_bool X86_64 || (X86_32 && !M386) | 311 | def_bool X86_64 || (X86_32 && !M386) |
312 | 312 | ||
313 | config CMPXCHG_LOCAL | ||
314 | def_bool X86_64 || (X86_32 && !M386) | ||
315 | |||
313 | config X86_L1_CACHE_SHIFT | 316 | config X86_L1_CACHE_SHIFT |
314 | int | 317 | int |
315 | default "7" if MPENTIUM4 || MPSC | 318 | default "7" if MPENTIUM4 || MPSC |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index b81002f23614..078ad0caefc6 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void) | |||
94 | 94 | ||
95 | static inline int hw_breakpoint_active(void) | 95 | static inline int hw_breakpoint_active(void) |
96 | { | 96 | { |
97 | return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; | 97 | return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; |
98 | } | 98 | } |
99 | 99 | ||
100 | extern void aout_dump_debugregs(struct user *dump); | 100 | extern void aout_dump_debugregs(struct user *dump); |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f899e01a8ac9..8ee45167e817 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -230,6 +230,125 @@ do { \ | |||
230 | }) | 230 | }) |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * Add return operation | ||
234 | */ | ||
235 | #define percpu_add_return_op(var, val) \ | ||
236 | ({ \ | ||
237 | typeof(var) paro_ret__ = val; \ | ||
238 | switch (sizeof(var)) { \ | ||
239 | case 1: \ | ||
240 | asm("xaddb %0, "__percpu_arg(1) \ | ||
241 | : "+q" (paro_ret__), "+m" (var) \ | ||
242 | : : "memory"); \ | ||
243 | break; \ | ||
244 | case 2: \ | ||
245 | asm("xaddw %0, "__percpu_arg(1) \ | ||
246 | : "+r" (paro_ret__), "+m" (var) \ | ||
247 | : : "memory"); \ | ||
248 | break; \ | ||
249 | case 4: \ | ||
250 | asm("xaddl %0, "__percpu_arg(1) \ | ||
251 | : "+r" (paro_ret__), "+m" (var) \ | ||
252 | : : "memory"); \ | ||
253 | break; \ | ||
254 | case 8: \ | ||
255 | asm("xaddq %0, "__percpu_arg(1) \ | ||
256 | : "+re" (paro_ret__), "+m" (var) \ | ||
257 | : : "memory"); \ | ||
258 | break; \ | ||
259 | default: __bad_percpu_size(); \ | ||
260 | } \ | ||
261 | paro_ret__ += val; \ | ||
262 | paro_ret__; \ | ||
263 | }) | ||
264 | |||
265 | /* | ||
266 | * xchg is implemented using cmpxchg without a lock prefix. xchg is | ||
267 | * expensive due to the implied lock prefix. The processor cannot prefetch | ||
268 | * cachelines if xchg is used. | ||
269 | */ | ||
270 | #define percpu_xchg_op(var, nval) \ | ||
271 | ({ \ | ||
272 | typeof(var) pxo_ret__; \ | ||
273 | typeof(var) pxo_new__ = (nval); \ | ||
274 | switch (sizeof(var)) { \ | ||
275 | case 1: \ | ||
276 | asm("\n1:mov "__percpu_arg(1)",%%al" \ | ||
277 | "\n\tcmpxchgb %2, "__percpu_arg(1) \ | ||
278 | "\n\tjnz 1b" \ | ||
279 | : "=a" (pxo_ret__), "+m" (var) \ | ||
280 | : "q" (pxo_new__) \ | ||
281 | : "memory"); \ | ||
282 | break; \ | ||
283 | case 2: \ | ||
284 | asm("\n1:mov "__percpu_arg(1)",%%ax" \ | ||
285 | "\n\tcmpxchgw %2, "__percpu_arg(1) \ | ||
286 | "\n\tjnz 1b" \ | ||
287 | : "=a" (pxo_ret__), "+m" (var) \ | ||
288 | : "r" (pxo_new__) \ | ||
289 | : "memory"); \ | ||
290 | break; \ | ||
291 | case 4: \ | ||
292 | asm("\n1:mov "__percpu_arg(1)",%%eax" \ | ||
293 | "\n\tcmpxchgl %2, "__percpu_arg(1) \ | ||
294 | "\n\tjnz 1b" \ | ||
295 | : "=a" (pxo_ret__), "+m" (var) \ | ||
296 | : "r" (pxo_new__) \ | ||
297 | : "memory"); \ | ||
298 | break; \ | ||
299 | case 8: \ | ||
300 | asm("\n1:mov "__percpu_arg(1)",%%rax" \ | ||
301 | "\n\tcmpxchgq %2, "__percpu_arg(1) \ | ||
302 | "\n\tjnz 1b" \ | ||
303 | : "=a" (pxo_ret__), "+m" (var) \ | ||
304 | : "r" (pxo_new__) \ | ||
305 | : "memory"); \ | ||
306 | break; \ | ||
307 | default: __bad_percpu_size(); \ | ||
308 | } \ | ||
309 | pxo_ret__; \ | ||
310 | }) | ||
311 | |||
312 | /* | ||
313 | * cmpxchg has no such implied lock semantics as a result it is much | ||
314 | * more efficient for cpu local operations. | ||
315 | */ | ||
316 | #define percpu_cmpxchg_op(var, oval, nval) \ | ||
317 | ({ \ | ||
318 | typeof(var) pco_ret__; \ | ||
319 | typeof(var) pco_old__ = (oval); \ | ||
320 | typeof(var) pco_new__ = (nval); \ | ||
321 | switch (sizeof(var)) { \ | ||
322 | case 1: \ | ||
323 | asm("cmpxchgb %2, "__percpu_arg(1) \ | ||
324 | : "=a" (pco_ret__), "+m" (var) \ | ||
325 | : "q" (pco_new__), "0" (pco_old__) \ | ||
326 | : "memory"); \ | ||
327 | break; \ | ||
328 | case 2: \ | ||
329 | asm("cmpxchgw %2, "__percpu_arg(1) \ | ||
330 | : "=a" (pco_ret__), "+m" (var) \ | ||
331 | : "r" (pco_new__), "0" (pco_old__) \ | ||
332 | : "memory"); \ | ||
333 | break; \ | ||
334 | case 4: \ | ||
335 | asm("cmpxchgl %2, "__percpu_arg(1) \ | ||
336 | : "=a" (pco_ret__), "+m" (var) \ | ||
337 | : "r" (pco_new__), "0" (pco_old__) \ | ||
338 | : "memory"); \ | ||
339 | break; \ | ||
340 | case 8: \ | ||
341 | asm("cmpxchgq %2, "__percpu_arg(1) \ | ||
342 | : "=a" (pco_ret__), "+m" (var) \ | ||
343 | : "r" (pco_new__), "0" (pco_old__) \ | ||
344 | : "memory"); \ | ||
345 | break; \ | ||
346 | default: __bad_percpu_size(); \ | ||
347 | } \ | ||
348 | pco_ret__; \ | ||
349 | }) | ||
350 | |||
351 | /* | ||
233 | * percpu_read() makes gcc load the percpu variable every time it is | 352 | * percpu_read() makes gcc load the percpu variable every time it is |
234 | * accessed while percpu_read_stable() allows the value to be cached. | 353 | * accessed while percpu_read_stable() allows the value to be cached. |
235 | * percpu_read_stable() is more efficient and can be used if its value | 354 | * percpu_read_stable() is more efficient and can be used if its value |
@@ -267,6 +386,12 @@ do { \ | |||
267 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 386 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
268 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 387 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
269 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 388 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
389 | /* | ||
390 | * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much | ||
391 | * faster than an xchg with forced lock semantics. | ||
392 | */ | ||
393 | #define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
394 | #define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
270 | 395 | ||
271 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 396 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
272 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 397 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -286,6 +411,11 @@ do { \ | |||
286 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 411 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
287 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 412 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
288 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 413 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
414 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) | ||
415 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | ||
416 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | ||
417 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
418 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
289 | 419 | ||
290 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | 420 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
291 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | 421 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
@@ -299,6 +429,31 @@ do { \ | |||
299 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 429 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
300 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 430 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
301 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 431 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
432 | #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) | ||
433 | #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | ||
434 | #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | ||
435 | #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
436 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
437 | |||
438 | #ifndef CONFIG_M386 | ||
439 | #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) | ||
440 | #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | ||
441 | #define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | ||
442 | #define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
443 | #define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
444 | #define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
445 | |||
446 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) | ||
447 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | ||
448 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | ||
449 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
450 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
451 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
452 | |||
453 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
454 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
455 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
456 | #endif /* !CONFIG_M386 */ | ||
302 | 457 | ||
303 | /* | 458 | /* |
304 | * Per cpu atomic 64 bit operations are only available under 64 bit. | 459 | * Per cpu atomic 64 bit operations are only available under 64 bit. |
@@ -311,6 +466,7 @@ do { \ | |||
311 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 466 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
312 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 467 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
313 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 468 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
469 | #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | ||
314 | 470 | ||
315 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 471 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
316 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | 472 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
@@ -318,12 +474,12 @@ do { \ | |||
318 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 474 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
319 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 475 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
320 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 476 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
477 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | ||
321 | 478 | ||
322 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | 479 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
323 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 480 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
324 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 481 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
325 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 482 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
326 | |||
327 | #endif | 483 | #endif |
328 | 484 | ||
329 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 485 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cae9c3cb95cf..c6efecf85a6a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS]; | |||
141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
142 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 142 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
143 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | 143 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
144 | #define current_cpu_data __get_cpu_var(cpu_info) | ||
145 | #else | 144 | #else |
145 | #define cpu_info boot_cpu_data | ||
146 | #define cpu_data(cpu) boot_cpu_data | 146 | #define cpu_data(cpu) boot_cpu_data |
147 | #define current_cpu_data boot_cpu_data | ||
148 | #endif | 147 | #endif |
149 | 148 | ||
150 | extern const struct seq_operations cpuinfo_op; | 149 | extern const struct seq_operations cpuinfo_op; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ce65d449b750..79e6baa8aa0a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
516 | { | 516 | { |
517 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 517 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
518 | 518 | ||
519 | if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { | 519 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { |
520 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 520 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
521 | /* Make LAPIC timer preferrable over percpu HPET */ | 521 | /* Make LAPIC timer preferrable over percpu HPET */ |
522 | lapic_clockevent.rating = 150; | 522 | lapic_clockevent.rating = 150; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 52735a710c30..697dc34b7b87 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2329 | unsigned int irr; | 2329 | unsigned int irr; |
2330 | struct irq_desc *desc; | 2330 | struct irq_desc *desc; |
2331 | struct irq_cfg *cfg; | 2331 | struct irq_cfg *cfg; |
2332 | irq = __get_cpu_var(vector_irq)[vector]; | 2332 | irq = __this_cpu_read(vector_irq[vector]); |
2333 | 2333 | ||
2334 | if (irq == -1) | 2334 | if (irq == -1) |
2335 | continue; | 2335 | continue; |
@@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2363 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | 2363 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); |
2364 | goto unlock; | 2364 | goto unlock; |
2365 | } | 2365 | } |
2366 | __get_cpu_var(vector_irq)[vector] = -1; | 2366 | __this_cpu_write(vector_irq[vector], -1); |
2367 | unlock: | 2367 | unlock: |
2368 | raw_spin_unlock(&desc->lock); | 2368 | raw_spin_unlock(&desc->lock); |
2369 | } | 2369 | } |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 2a3f2a7db243..ecca5f41ad2c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
120 | else if (!strcmp(oem_table_id, "UVX")) | 120 | else if (!strcmp(oem_table_id, "UVX")) |
121 | uv_system_type = UV_X2APIC; | 121 | uv_system_type = UV_X2APIC; |
122 | else if (!strcmp(oem_table_id, "UVH")) { | 122 | else if (!strcmp(oem_table_id, "UVH")) { |
123 | __get_cpu_var(x2apic_extra_bits) = | 123 | __this_cpu_write(x2apic_extra_bits, |
124 | pnodeid << uvh_apicid.s.pnode_shift; | 124 | pnodeid << uvh_apicid.s.pnode_shift); |
125 | uv_system_type = UV_NON_UNIQUE_APIC; | 125 | uv_system_type = UV_NON_UNIQUE_APIC; |
126 | uv_set_apicid_hibit(); | 126 | uv_set_apicid_hibit(); |
127 | return 1; | 127 | return 1; |
@@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x) | |||
286 | unsigned int id; | 286 | unsigned int id; |
287 | 287 | ||
288 | WARN_ON(preemptible() && num_online_cpus() > 1); | 288 | WARN_ON(preemptible() && num_online_cpus() > 1); |
289 | id = x | __get_cpu_var(x2apic_extra_bits); | 289 | id = x | __this_cpu_read(x2apic_extra_bits); |
290 | 290 | ||
291 | return id; | 291 | return id; |
292 | } | 292 | } |
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = { | |||
378 | 378 | ||
379 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 379 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
380 | { | 380 | { |
381 | __get_cpu_var(x2apic_extra_bits) = (pnode << 6); | 381 | __this_cpu_write(x2apic_extra_bits, (pnode << 6)); |
382 | } | 382 | } |
383 | 383 | ||
384 | /* | 384 | /* |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9e093f8fe78c..7c7bedb83c5a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383); | |||
668 | 668 | ||
669 | bool cpu_has_amd_erratum(const int *erratum) | 669 | bool cpu_has_amd_erratum(const int *erratum) |
670 | { | 670 | { |
671 | struct cpuinfo_x86 *cpu = ¤t_cpu_data; | 671 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); |
672 | int osvw_id = *erratum++; | 672 | int osvw_id = *erratum++; |
673 | u32 range; | 673 | u32 range; |
674 | u32 ms; | 674 | u32 ms; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 491977baf6c0..35c7e65e59be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc) | |||
521 | 521 | ||
522 | *rc = -ENODEV; | 522 | *rc = -ENODEV; |
523 | 523 | ||
524 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | 524 | if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) |
525 | return; | 525 | return; |
526 | 526 | ||
527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | |||
1377 | static void query_values_on_cpu(void *_err) | 1377 | static void query_values_on_cpu(void *_err) |
1378 | { | 1378 | { |
1379 | int *err = _err; | 1379 | int *err = _err; |
1380 | struct powernow_k8_data *data = __get_cpu_var(powernow_data); | 1380 | struct powernow_k8_data *data = __this_cpu_read(powernow_data); |
1381 | 1381 | ||
1382 | *err = query_current_values_with_pending_wait(data); | 1382 | *err = query_current_values_with_pending_wait(data); |
1383 | } | 1383 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 9ecf81f9b90f..7283e98deaae 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
265 | line_size = l2.line_size; | 265 | line_size = l2.line_size; |
266 | lines_per_tag = l2.lines_per_tag; | 266 | lines_per_tag = l2.lines_per_tag; |
267 | /* cpu_data has errata corrections for K7 applied */ | 267 | /* cpu_data has errata corrections for K7 applied */ |
268 | size_in_kb = current_cpu_data.x86_cache_size; | 268 | size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); |
269 | break; | 269 | break; |
270 | case 3: | 270 | case 3: |
271 | if (!l3.val) | 271 | if (!l3.val) |
@@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
287 | eax->split.type = types[leaf]; | 287 | eax->split.type = types[leaf]; |
288 | eax->split.level = levels[leaf]; | 288 | eax->split.level = levels[leaf]; |
289 | eax->split.num_threads_sharing = 0; | 289 | eax->split.num_threads_sharing = 0; |
290 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 290 | eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; |
291 | 291 | ||
292 | 292 | ||
293 | if (assoc == 0xffff) | 293 | if (assoc == 0xffff) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7a35b72d7c03..d916183b7f9c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) | |||
326 | 326 | ||
327 | static int msr_to_offset(u32 msr) | 327 | static int msr_to_offset(u32 msr) |
328 | { | 328 | { |
329 | unsigned bank = __get_cpu_var(injectm.bank); | 329 | unsigned bank = __this_cpu_read(injectm.bank); |
330 | 330 | ||
331 | if (msr == rip_msr) | 331 | if (msr == rip_msr) |
332 | return offsetof(struct mce, ip); | 332 | return offsetof(struct mce, ip); |
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
346 | { | 346 | { |
347 | u64 v; | 347 | u64 v; |
348 | 348 | ||
349 | if (__get_cpu_var(injectm).finished) { | 349 | if (__this_cpu_read(injectm.finished)) { |
350 | int offset = msr_to_offset(msr); | 350 | int offset = msr_to_offset(msr); |
351 | 351 | ||
352 | if (offset < 0) | 352 | if (offset < 0) |
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
369 | 369 | ||
370 | static void mce_wrmsrl(u32 msr, u64 v) | 370 | static void mce_wrmsrl(u32 msr, u64 v) |
371 | { | 371 | { |
372 | if (__get_cpu_var(injectm).finished) { | 372 | if (__this_cpu_read(injectm.finished)) { |
373 | int offset = msr_to_offset(msr); | 373 | int offset = msr_to_offset(msr); |
374 | 374 | ||
375 | if (offset >= 0) | 375 | if (offset >= 0) |
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data) | |||
1159 | 1159 | ||
1160 | WARN_ON(smp_processor_id() != data); | 1160 | WARN_ON(smp_processor_id() != data); |
1161 | 1161 | ||
1162 | if (mce_available(¤t_cpu_data)) { | 1162 | if (mce_available(__this_cpu_ptr(&cpu_info))) { |
1163 | machine_check_poll(MCP_TIMESTAMP, | 1163 | machine_check_poll(MCP_TIMESTAMP, |
1164 | &__get_cpu_var(mce_poll_banks)); | 1164 | &__get_cpu_var(mce_poll_banks)); |
1165 | } | 1165 | } |
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev) | |||
1767 | static int mce_resume(struct sys_device *dev) | 1767 | static int mce_resume(struct sys_device *dev) |
1768 | { | 1768 | { |
1769 | __mcheck_cpu_init_generic(); | 1769 | __mcheck_cpu_init_generic(); |
1770 | __mcheck_cpu_init_vendor(¤t_cpu_data); | 1770 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); |
1771 | 1771 | ||
1772 | return 0; | 1772 | return 0; |
1773 | } | 1773 | } |
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev) | |||
1775 | static void mce_cpu_restart(void *data) | 1775 | static void mce_cpu_restart(void *data) |
1776 | { | 1776 | { |
1777 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1777 | del_timer_sync(&__get_cpu_var(mce_timer)); |
1778 | if (!mce_available(¤t_cpu_data)) | 1778 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1779 | return; | 1779 | return; |
1780 | __mcheck_cpu_init_generic(); | 1780 | __mcheck_cpu_init_generic(); |
1781 | __mcheck_cpu_init_timer(); | 1781 | __mcheck_cpu_init_timer(); |
@@ -1790,7 +1790,7 @@ static void mce_restart(void) | |||
1790 | /* Toggle features for corrected errors */ | 1790 | /* Toggle features for corrected errors */ |
1791 | static void mce_disable_ce(void *all) | 1791 | static void mce_disable_ce(void *all) |
1792 | { | 1792 | { |
1793 | if (!mce_available(¤t_cpu_data)) | 1793 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1794 | return; | 1794 | return; |
1795 | if (all) | 1795 | if (all) |
1796 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1796 | del_timer_sync(&__get_cpu_var(mce_timer)); |
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all) | |||
1799 | 1799 | ||
1800 | static void mce_enable_ce(void *all) | 1800 | static void mce_enable_ce(void *all) |
1801 | { | 1801 | { |
1802 | if (!mce_available(¤t_cpu_data)) | 1802 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1803 | return; | 1803 | return; |
1804 | cmci_reenable(); | 1804 | cmci_reenable(); |
1805 | cmci_recheck(); | 1805 | cmci_recheck(); |
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h) | |||
2022 | unsigned long action = *(unsigned long *)h; | 2022 | unsigned long action = *(unsigned long *)h; |
2023 | int i; | 2023 | int i; |
2024 | 2024 | ||
2025 | if (!mce_available(¤t_cpu_data)) | 2025 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2026 | return; | 2026 | return; |
2027 | 2027 | ||
2028 | if (!(action & CPU_TASKS_FROZEN)) | 2028 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h) | |||
2040 | unsigned long action = *(unsigned long *)h; | 2040 | unsigned long action = *(unsigned long *)h; |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | if (!mce_available(¤t_cpu_data)) | 2043 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2044 | return; | 2044 | return; |
2045 | 2045 | ||
2046 | if (!(action & CPU_TASKS_FROZEN)) | 2046 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 6fcd0936194f..8694ef56459d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -130,7 +130,7 @@ void cmci_recheck(void) | |||
130 | unsigned long flags; | 130 | unsigned long flags; |
131 | int banks; | 131 | int banks; |
132 | 132 | ||
133 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | 133 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
134 | return; | 134 | return; |
135 | local_irq_save(flags); | 135 | local_irq_save(flags); |
136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0a360d146596..04921017abe0 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
997 | 997 | ||
998 | static void x86_pmu_enable_event(struct perf_event *event) | 998 | static void x86_pmu_enable_event(struct perf_event *event) |
999 | { | 999 | { |
1000 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1000 | if (__this_cpu_read(cpu_hw_events.enabled)) |
1001 | if (cpuc->enabled) | ||
1002 | __x86_pmu_enable_event(&event->hw, | 1001 | __x86_pmu_enable_event(&event->hw, |
1003 | ARCH_PERFMON_EVENTSEL_ENABLE); | 1002 | ARCH_PERFMON_EVENTSEL_ENABLE); |
1004 | } | 1003 | } |
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1272 | break; | 1271 | break; |
1273 | case DIE_NMIUNKNOWN: | 1272 | case DIE_NMIUNKNOWN: |
1274 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1273 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1275 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | 1274 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) |
1276 | /* let the kernel handle the unknown nmi */ | 1275 | /* let the kernel handle the unknown nmi */ |
1277 | return NOTIFY_DONE; | 1276 | return NOTIFY_DONE; |
1278 | /* | 1277 | /* |
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1296 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1295 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1297 | if ((handled > 1) || | 1296 | if ((handled > 1) || |
1298 | /* the next nmi could be a back-to-back nmi */ | 1297 | /* the next nmi could be a back-to-back nmi */ |
1299 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | 1298 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && |
1300 | (__get_cpu_var(pmu_nmi).handled > 1))) { | 1299 | (__this_cpu_read(pmu_nmi.handled) > 1))) { |
1301 | /* | 1300 | /* |
1302 | * We could have two subsequent back-to-back nmis: The | 1301 | * We could have two subsequent back-to-back nmis: The |
1303 | * first handles more than one counter, the 2nd | 1302 | * first handles more than one counter, the 2nd |
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1308 | * handling more than one counter. We will mark the | 1307 | * handling more than one counter. We will mark the |
1309 | * next (3rd) and then drop it if unhandled. | 1308 | * next (3rd) and then drop it if unhandled. |
1310 | */ | 1309 | */ |
1311 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | 1310 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); |
1312 | __get_cpu_var(pmu_nmi).handled = handled; | 1311 | __this_cpu_write(pmu_nmi.handled, handled); |
1313 | } | 1312 | } |
1314 | 1313 | ||
1315 | return NOTIFY_STOP; | 1314 | return NOTIFY_STOP; |
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1484 | */ | 1483 | */ |
1485 | static void x86_pmu_start_txn(struct pmu *pmu) | 1484 | static void x86_pmu_start_txn(struct pmu *pmu) |
1486 | { | 1485 | { |
1487 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1488 | |||
1489 | perf_pmu_disable(pmu); | 1486 | perf_pmu_disable(pmu); |
1490 | cpuc->group_flag |= PERF_EVENT_TXN; | 1487 | __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); |
1491 | cpuc->n_txn = 0; | 1488 | __this_cpu_write(cpu_hw_events.n_txn, 0); |
1492 | } | 1489 | } |
1493 | 1490 | ||
1494 | /* | 1491 | /* |
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu) | |||
1498 | */ | 1495 | */ |
1499 | static void x86_pmu_cancel_txn(struct pmu *pmu) | 1496 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1500 | { | 1497 | { |
1501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1498 | __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); |
1502 | |||
1503 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1504 | /* | 1499 | /* |
1505 | * Truncate the collected events. | 1500 | * Truncate the collected events. |
1506 | */ | 1501 | */ |
1507 | cpuc->n_added -= cpuc->n_txn; | 1502 | __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); |
1508 | cpuc->n_events -= cpuc->n_txn; | 1503 | __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); |
1509 | perf_pmu_enable(pmu); | 1504 | perf_pmu_enable(pmu); |
1510 | } | 1505 | } |
1511 | 1506 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 24e390e40f2e..008835c1d79c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
649 | struct hw_perf_event *hwc = &event->hw; | 649 | struct hw_perf_event *hwc = &event->hw; |
650 | 650 | ||
651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
652 | if (!__get_cpu_var(cpu_hw_events).enabled) | 652 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
653 | return; | 653 | return; |
654 | 654 | ||
655 | intel_pmu_enable_bts(hwc->config); | 655 | intel_pmu_enable_bts(hwc->config); |
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) | |||
679 | 679 | ||
680 | static void intel_pmu_reset(void) | 680 | static void intel_pmu_reset(void) |
681 | { | 681 | { |
682 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | 682 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
683 | unsigned long flags; | 683 | unsigned long flags; |
684 | int idx; | 684 | int idx; |
685 | 685 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 298448656b60..382eb2936d4d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -170,9 +170,9 @@ static void ftrace_mod_code(void) | |||
170 | 170 | ||
171 | void ftrace_nmi_enter(void) | 171 | void ftrace_nmi_enter(void) |
172 | { | 172 | { |
173 | __get_cpu_var(save_modifying_code) = modifying_code; | 173 | __this_cpu_write(save_modifying_code, modifying_code); |
174 | 174 | ||
175 | if (!__get_cpu_var(save_modifying_code)) | 175 | if (!__this_cpu_read(save_modifying_code)) |
176 | return; | 176 | return; |
177 | 177 | ||
178 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | 178 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
@@ -186,7 +186,7 @@ void ftrace_nmi_enter(void) | |||
186 | 186 | ||
187 | void ftrace_nmi_exit(void) | 187 | void ftrace_nmi_exit(void) |
188 | { | 188 | { |
189 | if (!__get_cpu_var(save_modifying_code)) | 189 | if (!__this_cpu_read(save_modifying_code)) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | /* Finish all executions before clearing nmi_running */ | 192 | /* Finish all executions before clearing nmi_running */ |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 42c594254507..02f07634d265 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | return -EBUSY; | 122 | return -EBUSY; |
123 | 123 | ||
124 | set_debugreg(info->address, i); | 124 | set_debugreg(info->address, i); |
125 | __get_cpu_var(cpu_debugreg[i]) = info->address; | 125 | __this_cpu_write(cpu_debugreg[i], info->address); |
126 | 126 | ||
127 | dr7 = &__get_cpu_var(cpu_dr7); | 127 | dr7 = &__get_cpu_var(cpu_dr7); |
128 | *dr7 |= encode_dr7(i, info->len, info->type); | 128 | *dr7 |= encode_dr7(i, info->len, info->type); |
@@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |||
397 | 397 | ||
398 | void hw_breakpoint_restore(void) | 398 | void hw_breakpoint_restore(void) |
399 | { | 399 | { |
400 | set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); | 400 | set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); |
401 | set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); | 401 | set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); |
402 | set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); | 402 | set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); |
403 | set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); | 403 | set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); |
404 | set_debugreg(current->thread.debugreg6, 6); | 404 | set_debugreg(current->thread.debugreg6, 6); |
405 | set_debugreg(__get_cpu_var(cpu_dr7), 7); | 405 | set_debugreg(__this_cpu_read(cpu_dr7), 7); |
406 | } | 406 | } |
407 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); | 407 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); |
408 | 408 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 83ec0175f986..3a43caa3beb7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
234 | exit_idle(); | 234 | exit_idle(); |
235 | irq_enter(); | 235 | irq_enter(); |
236 | 236 | ||
237 | irq = __get_cpu_var(vector_irq)[vector]; | 237 | irq = __this_cpu_read(vector_irq[vector]); |
238 | 238 | ||
239 | if (!handle_irq(irq, regs)) { | 239 | if (!handle_irq(irq, regs)) { |
240 | ack_APIC_irq(); | 240 | ack_APIC_irq(); |
@@ -350,12 +350,12 @@ void fixup_irqs(void) | |||
350 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 350 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
351 | unsigned int irr; | 351 | unsigned int irr; |
352 | 352 | ||
353 | if (__get_cpu_var(vector_irq)[vector] < 0) | 353 | if (__this_cpu_read(vector_irq[vector]) < 0) |
354 | continue; | 354 | continue; |
355 | 355 | ||
356 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | 356 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
357 | if (irr & (1 << (vector % 32))) { | 357 | if (irr & (1 << (vector % 32))) { |
358 | irq = __get_cpu_var(vector_irq)[vector]; | 358 | irq = __this_cpu_read(vector_irq[vector]); |
359 | 359 | ||
360 | data = irq_get_irq_data(irq); | 360 | data = irq_get_irq_data(irq); |
361 | raw_spin_lock(&desc->lock); | 361 | raw_spin_lock(&desc->lock); |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 96656f207751..48ff6dcffa02 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
79 | u32 *isp, arg1, arg2; | 79 | u32 *isp, arg1, arg2; |
80 | 80 | ||
81 | curctx = (union irq_ctx *) current_thread_info(); | 81 | curctx = (union irq_ctx *) current_thread_info(); |
82 | irqctx = __get_cpu_var(hardirq_ctx); | 82 | irqctx = __this_cpu_read(hardirq_ctx); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * this is where we switch to the IRQ stack. However, if we are | 85 | * this is where we switch to the IRQ stack. However, if we are |
@@ -166,7 +166,7 @@ asmlinkage void do_softirq(void) | |||
166 | 166 | ||
167 | if (local_softirq_pending()) { | 167 | if (local_softirq_pending()) { |
168 | curctx = current_thread_info(); | 168 | curctx = current_thread_info(); |
169 | irqctx = __get_cpu_var(softirq_ctx); | 169 | irqctx = __this_cpu_read(softirq_ctx); |
170 | irqctx->tinfo.task = curctx->task; | 170 | irqctx->tinfo.task = curctx->task; |
171 | irqctx->tinfo.previous_esp = current_stack_pointer; | 171 | irqctx->tinfo.previous_esp = current_stack_pointer; |
172 | 172 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 5940282bd2f9..d91c477b3f62 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
403 | 403 | ||
404 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 404 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
405 | { | 405 | { |
406 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 406 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
407 | kcb->kprobe_status = kcb->prev_kprobe.status; | 407 | kcb->kprobe_status = kcb->prev_kprobe.status; |
408 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; | 408 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
409 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | 409 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
@@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
412 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 412 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
413 | struct kprobe_ctlblk *kcb) | 413 | struct kprobe_ctlblk *kcb) |
414 | { | 414 | { |
415 | __get_cpu_var(current_kprobe) = p; | 415 | __this_cpu_write(current_kprobe, p); |
416 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 416 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
417 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); | 417 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
418 | if (is_IF_modifier(p->ainsn.insn)) | 418 | if (is_IF_modifier(p->ainsn.insn)) |
@@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
586 | preempt_enable_no_resched(); | 586 | preempt_enable_no_resched(); |
587 | return 1; | 587 | return 1; |
588 | } else if (kprobe_running()) { | 588 | } else if (kprobe_running()) { |
589 | p = __get_cpu_var(current_kprobe); | 589 | p = __this_cpu_read(current_kprobe); |
590 | if (p->break_handler && p->break_handler(p, regs)) { | 590 | if (p->break_handler && p->break_handler(p, regs)) { |
591 | setup_singlestep(p, regs, kcb, 0); | 591 | setup_singlestep(p, regs, kcb, 0); |
592 | return 1; | 592 | return 1; |
@@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
759 | 759 | ||
760 | orig_ret_address = (unsigned long)ri->ret_addr; | 760 | orig_ret_address = (unsigned long)ri->ret_addr; |
761 | if (ri->rp && ri->rp->handler) { | 761 | if (ri->rp && ri->rp->handler) { |
762 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 762 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
763 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 763 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
764 | ri->ret_addr = correct_ret_addr; | 764 | ri->ret_addr = correct_ret_addr; |
765 | ri->rp->handler(ri, regs); | 765 | ri->rp->handler(ri, regs); |
766 | __get_cpu_var(current_kprobe) = NULL; | 766 | __this_cpu_write(current_kprobe, NULL); |
767 | } | 767 | } |
768 | 768 | ||
769 | recycle_rp_inst(ri, &empty_rp); | 769 | recycle_rp_inst(ri, &empty_rp); |
@@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1202 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | 1202 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; |
1203 | regs->orig_ax = ~0UL; | 1203 | regs->orig_ax = ~0UL; |
1204 | 1204 | ||
1205 | __get_cpu_var(current_kprobe) = &op->kp; | 1205 | __this_cpu_write(current_kprobe, &op->kp); |
1206 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 1206 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
1207 | opt_pre_handler(&op->kp, regs); | 1207 | opt_pre_handler(&op->kp, regs); |
1208 | __get_cpu_var(current_kprobe) = NULL; | 1208 | __this_cpu_write(current_kprobe, NULL); |
1209 | } | 1209 | } |
1210 | preempt_enable_no_resched(); | 1210 | preempt_enable_no_resched(); |
1211 | } | 1211 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c852041bfc3d..09c08a1c706f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); | 446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); |
447 | trace_cpu_idle((ax>>4)+1, smp_processor_id()); | 447 | trace_cpu_idle((ax>>4)+1, smp_processor_id()); |
448 | if (!need_resched()) { | 448 | if (!need_resched()) { |
449 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 449 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) |
450 | clflush((void *)¤t_thread_info()->flags); | 450 | clflush((void *)¤t_thread_info()->flags); |
451 | 451 | ||
452 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 452 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
@@ -462,7 +462,7 @@ static void mwait_idle(void) | |||
462 | if (!need_resched()) { | 462 | if (!need_resched()) { |
463 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 463 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); |
464 | trace_cpu_idle(1, smp_processor_id()); | 464 | trace_cpu_idle(1, smp_processor_id()); |
465 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 465 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) |
466 | clflush((void *)¤t_thread_info()->flags); | 466 | clflush((void *)¤t_thread_info()->flags); |
467 | 467 | ||
468 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 468 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ee886fe10ef4..c7149c96d079 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
427 | 427 | ||
428 | cpumask_set_cpu(cpu, c->llc_shared_map); | 428 | cpumask_set_cpu(cpu, c->llc_shared_map); |
429 | 429 | ||
430 | if (current_cpu_data.x86_max_cores == 1) { | 430 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { |
431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
432 | c->booted_cores = 1; | 432 | c->booted_cores = 1; |
433 | return; | 433 | return; |
@@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1089 | 1089 | ||
1090 | preempt_disable(); | 1090 | preempt_disable(); |
1091 | smp_cpu_index_default(); | 1091 | smp_cpu_index_default(); |
1092 | current_cpu_data = boot_cpu_data; | 1092 | memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); |
1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); | 1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
1094 | mb(); | 1094 | mb(); |
1095 | /* | 1095 | /* |
@@ -1383,7 +1383,7 @@ void play_dead_common(void) | |||
1383 | 1383 | ||
1384 | mb(); | 1384 | mb(); |
1385 | /* Ack it */ | 1385 | /* Ack it */ |
1386 | __get_cpu_var(cpu_state) = CPU_DEAD; | 1386 | __this_cpu_write(cpu_state, CPU_DEAD); |
1387 | 1387 | ||
1388 | /* | 1388 | /* |
1389 | * With physical CPU hotplug, we should halt the cpu | 1389 | * With physical CPU hotplug, we should halt the cpu |
@@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void) | |||
1403 | int i; | 1403 | int i; |
1404 | void *mwait_ptr; | 1404 | void *mwait_ptr; |
1405 | 1405 | ||
1406 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) | 1406 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) |
1407 | return; | 1407 | return; |
1408 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) | 1408 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) |
1409 | return; | 1409 | return; |
1410 | if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | 1410 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
1411 | return; | 1411 | return; |
1412 | 1412 | ||
1413 | eax = CPUID_MWAIT_LEAF; | 1413 | eax = CPUID_MWAIT_LEAF; |
@@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void) | |||
1458 | 1458 | ||
1459 | static inline void hlt_play_dead(void) | 1459 | static inline void hlt_play_dead(void) |
1460 | { | 1460 | { |
1461 | if (current_cpu_data.x86 >= 4) | 1461 | if (__this_cpu_read(cpu_info.x86) >= 4) |
1462 | wbinvd(); | 1462 | wbinvd(); |
1463 | 1463 | ||
1464 | while (1) { | 1464 | while (1) { |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 356a0d455cf9..03d2ea82f35a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -659,7 +659,7 @@ void restore_sched_clock_state(void) | |||
659 | 659 | ||
660 | local_irq_save(flags); | 660 | local_irq_save(flags); |
661 | 661 | ||
662 | __get_cpu_var(cyc2ns_offset) = 0; | 662 | __this_cpu_write(cyc2ns_offset, 0); |
663 | offset = cyc2ns_suspend - sched_clock(); | 663 | offset = cyc2ns_suspend - sched_clock(); |
664 | 664 | ||
665 | for_each_possible_cpu(cpu) | 665 | for_each_possible_cpu(cpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b989e1f1e5d3..46a368cb651e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -976,7 +976,7 @@ static inline u64 nsec_to_cycles(u64 nsec) | |||
976 | if (kvm_tsc_changes_freq()) | 976 | if (kvm_tsc_changes_freq()) |
977 | printk_once(KERN_WARNING | 977 | printk_once(KERN_WARNING |
978 | "kvm: unreliable cycle conversion on adjustable rate TSC\n"); | 978 | "kvm: unreliable cycle conversion on adjustable rate TSC\n"); |
979 | ret = nsec * __get_cpu_var(cpu_tsc_khz); | 979 | ret = nsec * __this_cpu_read(cpu_tsc_khz); |
980 | do_div(ret, USEC_PER_SEC); | 980 | do_div(ret, USEC_PER_SEC); |
981 | return ret; | 981 | return ret; |
982 | } | 982 | } |
@@ -1061,7 +1061,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1061 | local_irq_save(flags); | 1061 | local_irq_save(flags); |
1062 | kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); | 1062 | kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); |
1063 | kernel_ns = get_kernel_ns(); | 1063 | kernel_ns = get_kernel_ns(); |
1064 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | 1064 | this_tsc_khz = __this_cpu_read(cpu_tsc_khz); |
1065 | 1065 | ||
1066 | if (unlikely(this_tsc_khz == 0)) { | 1066 | if (unlikely(this_tsc_khz == 0)) { |
1067 | local_irq_restore(flags); | 1067 | local_irq_restore(flags); |
@@ -4427,7 +4427,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out); | |||
4427 | 4427 | ||
4428 | static void tsc_bad(void *info) | 4428 | static void tsc_bad(void *info) |
4429 | { | 4429 | { |
4430 | __get_cpu_var(cpu_tsc_khz) = 0; | 4430 | __this_cpu_write(cpu_tsc_khz, 0); |
4431 | } | 4431 | } |
4432 | 4432 | ||
4433 | static void tsc_khz_changed(void *data) | 4433 | static void tsc_khz_changed(void *data) |
@@ -4441,7 +4441,7 @@ static void tsc_khz_changed(void *data) | |||
4441 | khz = cpufreq_quick_get(raw_smp_processor_id()); | 4441 | khz = cpufreq_quick_get(raw_smp_processor_id()); |
4442 | if (!khz) | 4442 | if (!khz) |
4443 | khz = tsc_khz; | 4443 | khz = tsc_khz; |
4444 | __get_cpu_var(cpu_tsc_khz) = khz; | 4444 | __this_cpu_write(cpu_tsc_khz, khz); |
4445 | } | 4445 | } |
4446 | 4446 | ||
4447 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 4447 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index ff485d361182..fc45ba887d05 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -121,7 +121,7 @@ inline void __const_udelay(unsigned long xloops) | |||
121 | asm("mull %%edx" | 121 | asm("mull %%edx" |
122 | :"=d" (xloops), "=&a" (d0) | 122 | :"=d" (xloops), "=&a" (d0) |
123 | :"1" (xloops), "0" | 123 | :"1" (xloops), "0" |
124 | (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); | 124 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
125 | 125 | ||
126 | __delay(++xloops); | 126 | __delay(++xloops); |
127 | } | 127 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 358c8b9c96a7..f24a8533bcdf 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -143,7 +143,7 @@ static inline int has_mux(void) | |||
143 | 143 | ||
144 | inline int op_x86_phys_to_virt(int phys) | 144 | inline int op_x86_phys_to_virt(int phys) |
145 | { | 145 | { |
146 | return __get_cpu_var(switch_index) + phys; | 146 | return __this_cpu_read(switch_index) + phys; |
147 | } | 147 | } |
148 | 148 | ||
149 | inline int op_x86_virt_to_phys(int virt) | 149 | inline int op_x86_virt_to_phys(int virt) |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index d769cda54082..94b745045e45 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
95 | * counter width: | 95 | * counter width: |
96 | */ | 96 | */ |
97 | if (!(eax.split.version_id == 0 && | 97 | if (!(eax.split.version_id == 0 && |
98 | current_cpu_data.x86 == 6 && | 98 | __this_cpu_read(cpu_info.x86) == 6 && |
99 | current_cpu_data.x86_model == 15)) { | 99 | __this_cpu_read(cpu_info.x86_model) == 15)) { |
100 | 100 | ||
101 | if (counter_width < eax.split.bit_width) | 101 | if (counter_width < eax.split.bit_width) |
102 | counter_width = eax.split.bit_width; | 102 | counter_width = eax.split.bit_width; |
@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void) | |||
235 | eax.full = cpuid_eax(0xa); | 235 | eax.full = cpuid_eax(0xa); |
236 | 236 | ||
237 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ | 237 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ |
238 | if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && | 238 | if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && |
239 | current_cpu_data.x86_model == 15) { | 239 | __this_cpu_read(cpu_info.x86_model) == 15) { |
240 | eax.split.version_id = 2; | 240 | eax.split.version_id = 2; |
241 | eax.split.num_counters = 2; | 241 | eax.split.num_counters = 2; |
242 | eax.split.bit_width = 40; | 242 | eax.split.bit_width = 40; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 44dcad43989d..aa8c89ae54cf 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) | |||
574 | 574 | ||
575 | preempt_disable(); | 575 | preempt_disable(); |
576 | 576 | ||
577 | start = __get_cpu_var(idt_desc).address; | 577 | start = __this_cpu_read(idt_desc.address); |
578 | end = start + __get_cpu_var(idt_desc).size + 1; | 578 | end = start + __this_cpu_read(idt_desc.size) + 1; |
579 | 579 | ||
580 | xen_mc_flush(); | 580 | xen_mc_flush(); |
581 | 581 | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 9e565da5d1f7..4ec8035e3216 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void) | |||
22 | unsigned long flags; | 22 | unsigned long flags; |
23 | /* need to disable interrupts until this entry is complete */ | 23 | /* need to disable interrupts until this entry is complete */ |
24 | local_irq_save(flags); | 24 | local_irq_save(flags); |
25 | __get_cpu_var(xen_mc_irq_flags) = flags; | 25 | __this_cpu_write(xen_mc_irq_flags, flags); |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline struct multicall_space xen_mc_entry(size_t args) | 28 | static inline struct multicall_space xen_mc_entry(size_t args) |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 23e061b9327b..cc9b1e182fcf 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | |||
159 | { | 159 | { |
160 | struct xen_spinlock *prev; | 160 | struct xen_spinlock *prev; |
161 | 161 | ||
162 | prev = __get_cpu_var(lock_spinners); | 162 | prev = __this_cpu_read(lock_spinners); |
163 | __get_cpu_var(lock_spinners) = xl; | 163 | __this_cpu_write(lock_spinners, xl); |
164 | 164 | ||
165 | wmb(); /* set lock of interest before count */ | 165 | wmb(); /* set lock of interest before count */ |
166 | 166 | ||
@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock | |||
179 | asm(LOCK_PREFIX " decw %0" | 179 | asm(LOCK_PREFIX " decw %0" |
180 | : "+m" (xl->spinners) : : "memory"); | 180 | : "+m" (xl->spinners) : : "memory"); |
181 | wmb(); /* decrement count before restoring lock */ | 181 | wmb(); /* decrement count before restoring lock */ |
182 | __get_cpu_var(lock_spinners) = prev; | 182 | __this_cpu_write(lock_spinners, prev); |
183 | } | 183 | } |
184 | 184 | ||
185 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) | 185 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) |
186 | { | 186 | { |
187 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 187 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
188 | struct xen_spinlock *prev; | 188 | struct xen_spinlock *prev; |
189 | int irq = __get_cpu_var(lock_kicker_irq); | 189 | int irq = __this_cpu_read(lock_kicker_irq); |
190 | int ret; | 190 | int ret; |
191 | u64 start; | 191 | u64 start; |
192 | 192 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5da5e53fb94c..067759e3d6a5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -135,24 +135,24 @@ static void do_stolen_accounting(void) | |||
135 | 135 | ||
136 | /* Add the appropriate number of ticks of stolen time, | 136 | /* Add the appropriate number of ticks of stolen time, |
137 | including any left-overs from last time. */ | 137 | including any left-overs from last time. */ |
138 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); | 138 | stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); |
139 | 139 | ||
140 | if (stolen < 0) | 140 | if (stolen < 0) |
141 | stolen = 0; | 141 | stolen = 0; |
142 | 142 | ||
143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
144 | __get_cpu_var(xen_residual_stolen) = stolen; | 144 | __this_cpu_write(xen_residual_stolen, stolen); |
145 | account_steal_ticks(ticks); | 145 | account_steal_ticks(ticks); |
146 | 146 | ||
147 | /* Add the appropriate number of ticks of blocked time, | 147 | /* Add the appropriate number of ticks of blocked time, |
148 | including any left-overs from last time. */ | 148 | including any left-overs from last time. */ |
149 | blocked += __get_cpu_var(xen_residual_blocked); | 149 | blocked += __this_cpu_read(xen_residual_blocked); |
150 | 150 | ||
151 | if (blocked < 0) | 151 | if (blocked < 0) |
152 | blocked = 0; | 152 | blocked = 0; |
153 | 153 | ||
154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
155 | __get_cpu_var(xen_residual_blocked) = blocked; | 155 | __this_cpu_write(xen_residual_blocked, blocked); |
156 | account_idle_ticks(ticks); | 156 | account_idle_ticks(ticks); |
157 | } | 157 | } |
158 | 158 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index dcb38f8ddfda..a765b823aa9e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -746,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
746 | struct acpi_processor *pr; | 746 | struct acpi_processor *pr; |
747 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 747 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); |
748 | 748 | ||
749 | pr = __get_cpu_var(processors); | 749 | pr = __this_cpu_read(processors); |
750 | 750 | ||
751 | if (unlikely(!pr)) | 751 | if (unlikely(!pr)) |
752 | return 0; | 752 | return 0; |
@@ -787,7 +787,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
787 | s64 idle_time_ns; | 787 | s64 idle_time_ns; |
788 | s64 idle_time; | 788 | s64 idle_time; |
789 | 789 | ||
790 | pr = __get_cpu_var(processors); | 790 | pr = __this_cpu_read(processors); |
791 | 791 | ||
792 | if (unlikely(!pr)) | 792 | if (unlikely(!pr)) |
793 | return 0; | 793 | return 0; |
@@ -864,7 +864,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
864 | s64 idle_time; | 864 | s64 idle_time; |
865 | 865 | ||
866 | 866 | ||
867 | pr = __get_cpu_var(processors); | 867 | pr = __this_cpu_read(processors); |
868 | 868 | ||
869 | if (unlikely(!pr)) | 869 | if (unlikely(!pr)) |
870 | return 0; | 870 | return 0; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 5a1aa64f4e76..72a4fcb17745 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -626,7 +626,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
626 | preempt_disable(); | 626 | preempt_disable(); |
627 | /* if over the trickle threshold, use only 1 in 4096 samples */ | 627 | /* if over the trickle threshold, use only 1 in 4096 samples */ |
628 | if (input_pool.entropy_count > trickle_thresh && | 628 | if (input_pool.entropy_count > trickle_thresh && |
629 | (__get_cpu_var(trickle_count)++ & 0xfff)) | 629 | ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) |
630 | goto out; | 630 | goto out; |
631 | 631 | ||
632 | sample.jiffies = jiffies; | 632 | sample.jiffies = jiffies; |
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index a7f046b0096c..2b46a7efa0ac 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c | |||
@@ -43,9 +43,10 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; | |||
43 | 43 | ||
44 | static inline void get_seq(__u32 *ts, int *cpu) | 44 | static inline void get_seq(__u32 *ts, int *cpu) |
45 | { | 45 | { |
46 | *ts = get_cpu_var(proc_event_counts)++; | 46 | preempt_disable(); |
47 | *ts = __this_cpu_inc_return(proc_event_counts) -1; | ||
47 | *cpu = smp_processor_id(); | 48 | *cpu = smp_processor_id(); |
48 | put_cpu_var(proc_event_counts); | 49 | preempt_enable(); |
49 | } | 50 | } |
50 | 51 | ||
51 | void proc_fork_connector(struct task_struct *task) | 52 | void proc_fork_connector(struct task_struct *task) |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 08d5f05378d9..386888f10df0 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -49,7 +49,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
49 | */ | 49 | */ |
50 | static void cpuidle_idle_call(void) | 50 | static void cpuidle_idle_call(void) |
51 | { | 51 | { |
52 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | 52 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
53 | struct cpuidle_state *target_state; | 53 | struct cpuidle_state *target_state; |
54 | int next_state; | 54 | int next_state; |
55 | 55 | ||
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index dbf741c95835..23cf8fc933ec 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -121,7 +121,7 @@ static int gameport_measure_speed(struct gameport *gameport) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | gameport_close(gameport); | 123 | gameport_close(gameport); |
124 | return (cpu_data(raw_smp_processor_id()).loops_per_jiffy * | 124 | return (this_cpu_read(cpu_info.loops_per_jiffy) * |
125 | (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); | 125 | (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); |
126 | 126 | ||
127 | #else | 127 | #else |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index f4e6cf3aceb8..430f875006f2 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
619 | s390_idle_check(regs, S390_lowcore.int_clock, | 619 | s390_idle_check(regs, S390_lowcore.int_clock, |
620 | S390_lowcore.async_enter_timer); | 620 | S390_lowcore.async_enter_timer); |
621 | irq_enter(); | 621 | irq_enter(); |
622 | __get_cpu_var(s390_idle).nohz_delay = 1; | 622 | __this_cpu_write(s390_idle.nohz_delay, 1); |
623 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | 623 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
624 | /* Serve timer interrupts first. */ | 624 | /* Serve timer interrupts first. */ |
625 | clock_comparator_work(); | 625 | clock_comparator_work(); |
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c index 971844bbee28..9bcf149c4260 100644 --- a/drivers/staging/lirc/lirc_serial.c +++ b/drivers/staging/lirc/lirc_serial.c | |||
@@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle, | |||
377 | duty_cycle = new_duty_cycle; | 377 | duty_cycle = new_duty_cycle; |
378 | freq = new_freq; | 378 | freq = new_freq; |
379 | 379 | ||
380 | loops_per_sec = current_cpu_data.loops_per_jiffy; | 380 | loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy); |
381 | loops_per_sec *= HZ; | 381 | loops_per_sec *= HZ; |
382 | 382 | ||
383 | /* How many clocks in a microsecond?, avoiding long long divide */ | 383 | /* How many clocks in a microsecond?, avoiding long long divide */ |
@@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle, | |||
398 | dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " | 398 | dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " |
399 | "clk/jiffy=%ld, pulse=%ld, space=%ld, " | 399 | "clk/jiffy=%ld, pulse=%ld, space=%ld, " |
400 | "conv_us_to_clocks=%ld\n", | 400 | "conv_us_to_clocks=%ld\n", |
401 | freq, duty_cycle, current_cpu_data.loops_per_jiffy, | 401 | freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy), |
402 | pulse_width, space_width, conv_us_to_clocks); | 402 | pulse_width, space_width, conv_us_to_clocks); |
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 65b231178f05..1b34a8771641 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c | |||
@@ -78,10 +78,10 @@ void speakup_fake_down_arrow(void) | |||
78 | /* don't change CPU */ | 78 | /* don't change CPU */ |
79 | preempt_disable(); | 79 | preempt_disable(); |
80 | 80 | ||
81 | __get_cpu_var(reporting_keystroke) = true; | 81 | __this_cpu_write(reporting_keystroke, true); |
82 | input_report_key(virt_keyboard, KEY_DOWN, PRESSED); | 82 | input_report_key(virt_keyboard, KEY_DOWN, PRESSED); |
83 | input_report_key(virt_keyboard, KEY_DOWN, RELEASED); | 83 | input_report_key(virt_keyboard, KEY_DOWN, RELEASED); |
84 | __get_cpu_var(reporting_keystroke) = false; | 84 | __this_cpu_write(reporting_keystroke, false); |
85 | 85 | ||
86 | /* reenable preemption */ | 86 | /* reenable preemption */ |
87 | preempt_enable(); | 87 | preempt_enable(); |
@@ -95,10 +95,5 @@ void speakup_fake_down_arrow(void) | |||
95 | */ | 95 | */ |
96 | bool speakup_fake_key_pressed(void) | 96 | bool speakup_fake_key_pressed(void) |
97 | { | 97 | { |
98 | bool is_pressed; | 98 | return this_cpu_read(reporting_keystroke); |
99 | |||
100 | is_pressed = get_cpu_var(reporting_keystroke); | ||
101 | put_cpu_var(reporting_keystroke); | ||
102 | |||
103 | return is_pressed; | ||
104 | } | 99 | } |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 31af0ac31a98..65f8637d13cf 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -355,7 +355,7 @@ static void unmask_evtchn(int port) | |||
355 | struct evtchn_unmask unmask = { .port = port }; | 355 | struct evtchn_unmask unmask = { .port = port }; |
356 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | 356 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); |
357 | } else { | 357 | } else { |
358 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 358 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
359 | 359 | ||
360 | sync_clear_bit(port, &s->evtchn_mask[0]); | 360 | sync_clear_bit(port, &s->evtchn_mask[0]); |
361 | 361 | ||
@@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1101 | { | 1101 | { |
1102 | int cpu = get_cpu(); | 1102 | int cpu = get_cpu(); |
1103 | struct shared_info *s = HYPERVISOR_shared_info; | 1103 | struct shared_info *s = HYPERVISOR_shared_info; |
1104 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 1104 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
1105 | unsigned count; | 1105 | unsigned count; |
1106 | 1106 | ||
1107 | do { | 1107 | do { |
@@ -1109,7 +1109,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1109 | 1109 | ||
1110 | vcpu_info->evtchn_upcall_pending = 0; | 1110 | vcpu_info->evtchn_upcall_pending = 0; |
1111 | 1111 | ||
1112 | if (__get_cpu_var(xed_nesting_count)++) | 1112 | if (__this_cpu_inc_return(xed_nesting_count) - 1) |
1113 | goto out; | 1113 | goto out; |
1114 | 1114 | ||
1115 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ | 1115 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
@@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void) | |||
1141 | 1141 | ||
1142 | BUG_ON(!irqs_disabled()); | 1142 | BUG_ON(!irqs_disabled()); |
1143 | 1143 | ||
1144 | count = __get_cpu_var(xed_nesting_count); | 1144 | count = __this_cpu_read(xed_nesting_count); |
1145 | __get_cpu_var(xed_nesting_count) = 0; | 1145 | __this_cpu_write(xed_nesting_count, 0); |
1146 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); | 1146 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
1147 | 1147 | ||
1148 | out: | 1148 | out: |
diff --git a/fs/buffer.c b/fs/buffer.c index 5930e382959b..2219a76e2caf 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1270,12 +1270,10 @@ static inline void check_irqs_on(void) | |||
1270 | static void bh_lru_install(struct buffer_head *bh) | 1270 | static void bh_lru_install(struct buffer_head *bh) |
1271 | { | 1271 | { |
1272 | struct buffer_head *evictee = NULL; | 1272 | struct buffer_head *evictee = NULL; |
1273 | struct bh_lru *lru; | ||
1274 | 1273 | ||
1275 | check_irqs_on(); | 1274 | check_irqs_on(); |
1276 | bh_lru_lock(); | 1275 | bh_lru_lock(); |
1277 | lru = &__get_cpu_var(bh_lrus); | 1276 | if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { |
1278 | if (lru->bhs[0] != bh) { | ||
1279 | struct buffer_head *bhs[BH_LRU_SIZE]; | 1277 | struct buffer_head *bhs[BH_LRU_SIZE]; |
1280 | int in; | 1278 | int in; |
1281 | int out = 0; | 1279 | int out = 0; |
@@ -1283,7 +1281,8 @@ static void bh_lru_install(struct buffer_head *bh) | |||
1283 | get_bh(bh); | 1281 | get_bh(bh); |
1284 | bhs[out++] = bh; | 1282 | bhs[out++] = bh; |
1285 | for (in = 0; in < BH_LRU_SIZE; in++) { | 1283 | for (in = 0; in < BH_LRU_SIZE; in++) { |
1286 | struct buffer_head *bh2 = lru->bhs[in]; | 1284 | struct buffer_head *bh2 = |
1285 | __this_cpu_read(bh_lrus.bhs[in]); | ||
1287 | 1286 | ||
1288 | if (bh2 == bh) { | 1287 | if (bh2 == bh) { |
1289 | __brelse(bh2); | 1288 | __brelse(bh2); |
@@ -1298,7 +1297,7 @@ static void bh_lru_install(struct buffer_head *bh) | |||
1298 | } | 1297 | } |
1299 | while (out < BH_LRU_SIZE) | 1298 | while (out < BH_LRU_SIZE) |
1300 | bhs[out++] = NULL; | 1299 | bhs[out++] = NULL; |
1301 | memcpy(lru->bhs, bhs, sizeof(bhs)); | 1300 | memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs)); |
1302 | } | 1301 | } |
1303 | bh_lru_unlock(); | 1302 | bh_lru_unlock(); |
1304 | 1303 | ||
@@ -1313,23 +1312,22 @@ static struct buffer_head * | |||
1313 | lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) | 1312 | lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) |
1314 | { | 1313 | { |
1315 | struct buffer_head *ret = NULL; | 1314 | struct buffer_head *ret = NULL; |
1316 | struct bh_lru *lru; | ||
1317 | unsigned int i; | 1315 | unsigned int i; |
1318 | 1316 | ||
1319 | check_irqs_on(); | 1317 | check_irqs_on(); |
1320 | bh_lru_lock(); | 1318 | bh_lru_lock(); |
1321 | lru = &__get_cpu_var(bh_lrus); | ||
1322 | for (i = 0; i < BH_LRU_SIZE; i++) { | 1319 | for (i = 0; i < BH_LRU_SIZE; i++) { |
1323 | struct buffer_head *bh = lru->bhs[i]; | 1320 | struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); |
1324 | 1321 | ||
1325 | if (bh && bh->b_bdev == bdev && | 1322 | if (bh && bh->b_bdev == bdev && |
1326 | bh->b_blocknr == block && bh->b_size == size) { | 1323 | bh->b_blocknr == block && bh->b_size == size) { |
1327 | if (i) { | 1324 | if (i) { |
1328 | while (i) { | 1325 | while (i) { |
1329 | lru->bhs[i] = lru->bhs[i - 1]; | 1326 | __this_cpu_write(bh_lrus.bhs[i], |
1327 | __this_cpu_read(bh_lrus.bhs[i - 1])); | ||
1330 | i--; | 1328 | i--; |
1331 | } | 1329 | } |
1332 | lru->bhs[0] = bh; | 1330 | __this_cpu_write(bh_lrus.bhs[0], bh); |
1333 | } | 1331 | } |
1334 | get_bh(bh); | 1332 | get_bh(bh); |
1335 | ret = bh; | 1333 | ret = bh; |
@@ -3203,22 +3201,23 @@ static void recalc_bh_state(void) | |||
3203 | int i; | 3201 | int i; |
3204 | int tot = 0; | 3202 | int tot = 0; |
3205 | 3203 | ||
3206 | if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) | 3204 | if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) |
3207 | return; | 3205 | return; |
3208 | __get_cpu_var(bh_accounting).ratelimit = 0; | 3206 | __this_cpu_write(bh_accounting.ratelimit, 0); |
3209 | for_each_online_cpu(i) | 3207 | for_each_online_cpu(i) |
3210 | tot += per_cpu(bh_accounting, i).nr; | 3208 | tot += per_cpu(bh_accounting, i).nr; |
3211 | buffer_heads_over_limit = (tot > max_buffer_heads); | 3209 | buffer_heads_over_limit = (tot > max_buffer_heads); |
3212 | } | 3210 | } |
3213 | 3211 | ||
3214 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) | 3212 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) |
3215 | { | 3213 | { |
3216 | struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); | 3214 | struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); |
3217 | if (ret) { | 3215 | if (ret) { |
3218 | INIT_LIST_HEAD(&ret->b_assoc_buffers); | 3216 | INIT_LIST_HEAD(&ret->b_assoc_buffers); |
3219 | get_cpu_var(bh_accounting).nr++; | 3217 | preempt_disable(); |
3218 | __this_cpu_inc(bh_accounting.nr); | ||
3220 | recalc_bh_state(); | 3219 | recalc_bh_state(); |
3221 | put_cpu_var(bh_accounting); | 3220 | preempt_enable(); |
3222 | } | 3221 | } |
3223 | return ret; | 3222 | return ret; |
3224 | } | 3223 | } |
@@ -3228,9 +3227,10 @@ void free_buffer_head(struct buffer_head *bh) | |||
3228 | { | 3227 | { |
3229 | BUG_ON(!list_empty(&bh->b_assoc_buffers)); | 3228 | BUG_ON(!list_empty(&bh->b_assoc_buffers)); |
3230 | kmem_cache_free(bh_cachep, bh); | 3229 | kmem_cache_free(bh_cachep, bh); |
3231 | get_cpu_var(bh_accounting).nr--; | 3230 | preempt_disable(); |
3231 | __this_cpu_dec(bh_accounting.nr); | ||
3232 | recalc_bh_state(); | 3232 | recalc_bh_state(); |
3233 | put_cpu_var(bh_accounting); | 3233 | preempt_enable(); |
3234 | } | 3234 | } |
3235 | EXPORT_SYMBOL(free_buffer_head); | 3235 | EXPORT_SYMBOL(free_buffer_head); |
3236 | 3236 | ||
@@ -3243,9 +3243,8 @@ static void buffer_exit_cpu(int cpu) | |||
3243 | brelse(b->bhs[i]); | 3243 | brelse(b->bhs[i]); |
3244 | b->bhs[i] = NULL; | 3244 | b->bhs[i] = NULL; |
3245 | } | 3245 | } |
3246 | get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; | 3246 | this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); |
3247 | per_cpu(bh_accounting, cpu).nr = 0; | 3247 | per_cpu(bh_accounting, cpu).nr = 0; |
3248 | put_cpu_var(bh_accounting); | ||
3249 | } | 3248 | } |
3250 | 3249 | ||
3251 | static int buffer_cpu_notify(struct notifier_block *self, | 3250 | static int buffer_cpu_notify(struct notifier_block *self, |
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h index 5ae1d07d4a12..6bf9355fa7eb 100644 --- a/include/asm-generic/irq_regs.h +++ b/include/asm-generic/irq_regs.h | |||
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs); | |||
22 | 22 | ||
23 | static inline struct pt_regs *get_irq_regs(void) | 23 | static inline struct pt_regs *get_irq_regs(void) |
24 | { | 24 | { |
25 | return __get_cpu_var(__irq_regs); | 25 | return __this_cpu_read(__irq_regs); |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | 28 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) |
29 | { | 29 | { |
30 | struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs); | 30 | struct pt_regs *old_regs; |
31 | 31 | ||
32 | old_regs = *pp_regs; | 32 | old_regs = __this_cpu_read(__irq_regs); |
33 | *pp_regs = new_regs; | 33 | __this_cpu_write(__irq_regs, new_regs); |
34 | return old_regs; | 34 | return old_regs; |
35 | } | 35 | } |
36 | 36 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4fd978e7eb83..4d857973d2c9 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -195,15 +195,9 @@ enum { | |||
195 | /* | 195 | /* |
196 | * io context count accounting | 196 | * io context count accounting |
197 | */ | 197 | */ |
198 | #define elv_ioc_count_mod(name, __val) \ | 198 | #define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val) |
199 | do { \ | 199 | #define elv_ioc_count_inc(name) this_cpu_inc(name) |
200 | preempt_disable(); \ | 200 | #define elv_ioc_count_dec(name) this_cpu_dec(name) |
201 | __get_cpu_var(name) += (__val); \ | ||
202 | preempt_enable(); \ | ||
203 | } while (0) | ||
204 | |||
205 | #define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1) | ||
206 | #define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1) | ||
207 | 201 | ||
208 | #define elv_ioc_count_read(name) \ | 202 | #define elv_ioc_count_read(name) \ |
209 | ({ \ | 203 | ({ \ |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b676c585574e..3a93f73a8acc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx); | |||
81 | 81 | ||
82 | static inline int kmap_atomic_idx_push(void) | 82 | static inline int kmap_atomic_idx_push(void) |
83 | { | 83 | { |
84 | int idx = __get_cpu_var(__kmap_atomic_idx)++; | 84 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
85 | |||
85 | #ifdef CONFIG_DEBUG_HIGHMEM | 86 | #ifdef CONFIG_DEBUG_HIGHMEM |
86 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | 87 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); |
87 | BUG_ON(idx > KM_TYPE_NR); | 88 | BUG_ON(idx > KM_TYPE_NR); |
@@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void) | |||
91 | 92 | ||
92 | static inline int kmap_atomic_idx(void) | 93 | static inline int kmap_atomic_idx(void) |
93 | { | 94 | { |
94 | return __get_cpu_var(__kmap_atomic_idx) - 1; | 95 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline int kmap_atomic_idx_pop(void) | 98 | static inline void kmap_atomic_idx_pop(void) |
98 | { | 99 | { |
99 | int idx = --__get_cpu_var(__kmap_atomic_idx); | ||
100 | #ifdef CONFIG_DEBUG_HIGHMEM | 100 | #ifdef CONFIG_DEBUG_HIGHMEM |
101 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); | ||
102 | |||
101 | BUG_ON(idx < 0); | 103 | BUG_ON(idx < 0); |
104 | #else | ||
105 | __this_cpu_dec(__kmap_atomic_idx); | ||
102 | #endif | 106 | #endif |
103 | return idx; | ||
104 | } | 107 | } |
105 | 108 | ||
106 | #endif | 109 | #endif |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index ad54c846911b..44e83ba12b5b 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void); | |||
47 | 47 | ||
48 | #ifndef CONFIG_GENERIC_HARDIRQS | 48 | #ifndef CONFIG_GENERIC_HARDIRQS |
49 | #define kstat_irqs_this_cpu(irq) \ | 49 | #define kstat_irqs_this_cpu(irq) \ |
50 | (kstat_this_cpu.irqs[irq]) | 50 | (this_cpu_read(kstat.irqs[irq]) |
51 | 51 | ||
52 | struct irq_desc; | 52 | struct irq_desc; |
53 | 53 | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index b78edb58ee66..dd7c12e875bc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -305,12 +305,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); | |||
305 | /* kprobe_running() will just return the current_kprobe on this CPU */ | 305 | /* kprobe_running() will just return the current_kprobe on this CPU */ |
306 | static inline struct kprobe *kprobe_running(void) | 306 | static inline struct kprobe *kprobe_running(void) |
307 | { | 307 | { |
308 | return (__get_cpu_var(current_kprobe)); | 308 | return (__this_cpu_read(current_kprobe)); |
309 | } | 309 | } |
310 | 310 | ||
311 | static inline void reset_current_kprobe(void) | 311 | static inline void reset_current_kprobe(void) |
312 | { | 312 | { |
313 | __get_cpu_var(current_kprobe) = NULL; | 313 | __this_cpu_write(current_kprobe, NULL); |
314 | } | 314 | } |
315 | 315 | ||
316 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) | 316 | static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5095b834a6fb..27c3c6fcfad3 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void); | |||
240 | pscr_ret__; \ | 240 | pscr_ret__; \ |
241 | }) | 241 | }) |
242 | 242 | ||
243 | #define __pcpu_size_call_return2(stem, variable, ...) \ | ||
244 | ({ \ | ||
245 | typeof(variable) pscr2_ret__; \ | ||
246 | __verify_pcpu_ptr(&(variable)); \ | ||
247 | switch(sizeof(variable)) { \ | ||
248 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ | ||
249 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ | ||
250 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ | ||
251 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ | ||
252 | default: \ | ||
253 | __bad_size_call_parameter(); break; \ | ||
254 | } \ | ||
255 | pscr2_ret__; \ | ||
256 | }) | ||
257 | |||
243 | #define __pcpu_size_call(stem, variable, ...) \ | 258 | #define __pcpu_size_call(stem, variable, ...) \ |
244 | do { \ | 259 | do { \ |
245 | __verify_pcpu_ptr(&(variable)); \ | 260 | __verify_pcpu_ptr(&(variable)); \ |
@@ -402,6 +417,89 @@ do { \ | |||
402 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) | 417 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
403 | #endif | 418 | #endif |
404 | 419 | ||
420 | #define _this_cpu_generic_add_return(pcp, val) \ | ||
421 | ({ \ | ||
422 | typeof(pcp) ret__; \ | ||
423 | preempt_disable(); \ | ||
424 | __this_cpu_add(pcp, val); \ | ||
425 | ret__ = __this_cpu_read(pcp); \ | ||
426 | preempt_enable(); \ | ||
427 | ret__; \ | ||
428 | }) | ||
429 | |||
430 | #ifndef this_cpu_add_return | ||
431 | # ifndef this_cpu_add_return_1 | ||
432 | # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
433 | # endif | ||
434 | # ifndef this_cpu_add_return_2 | ||
435 | # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
436 | # endif | ||
437 | # ifndef this_cpu_add_return_4 | ||
438 | # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
439 | # endif | ||
440 | # ifndef this_cpu_add_return_8 | ||
441 | # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) | ||
442 | # endif | ||
443 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
444 | #endif | ||
445 | |||
446 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | ||
447 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
448 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
449 | |||
450 | #define _this_cpu_generic_xchg(pcp, nval) \ | ||
451 | ({ typeof(pcp) ret__; \ | ||
452 | preempt_disable(); \ | ||
453 | ret__ = __this_cpu_read(pcp); \ | ||
454 | __this_cpu_write(pcp, nval); \ | ||
455 | preempt_enable(); \ | ||
456 | ret__; \ | ||
457 | }) | ||
458 | |||
459 | #ifndef this_cpu_xchg | ||
460 | # ifndef this_cpu_xchg_1 | ||
461 | # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
462 | # endif | ||
463 | # ifndef this_cpu_xchg_2 | ||
464 | # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
465 | # endif | ||
466 | # ifndef this_cpu_xchg_4 | ||
467 | # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
468 | # endif | ||
469 | # ifndef this_cpu_xchg_8 | ||
470 | # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) | ||
471 | # endif | ||
472 | # define this_cpu_xchg(pcp, nval) \ | ||
473 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) | ||
474 | #endif | ||
475 | |||
476 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
477 | ({ typeof(pcp) ret__; \ | ||
478 | preempt_disable(); \ | ||
479 | ret__ = __this_cpu_read(pcp); \ | ||
480 | if (ret__ == (oval)) \ | ||
481 | __this_cpu_write(pcp, nval); \ | ||
482 | preempt_enable(); \ | ||
483 | ret__; \ | ||
484 | }) | ||
485 | |||
486 | #ifndef this_cpu_cmpxchg | ||
487 | # ifndef this_cpu_cmpxchg_1 | ||
488 | # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
489 | # endif | ||
490 | # ifndef this_cpu_cmpxchg_2 | ||
491 | # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
492 | # endif | ||
493 | # ifndef this_cpu_cmpxchg_4 | ||
494 | # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
495 | # endif | ||
496 | # ifndef this_cpu_cmpxchg_8 | ||
497 | # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
498 | # endif | ||
499 | # define this_cpu_cmpxchg(pcp, oval, nval) \ | ||
500 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | ||
501 | #endif | ||
502 | |||
405 | /* | 503 | /* |
406 | * Generic percpu operations that do not require preemption handling. | 504 | * Generic percpu operations that do not require preemption handling. |
407 | * Either we do not care about races or the caller has the | 505 | * Either we do not care about races or the caller has the |
@@ -529,11 +627,87 @@ do { \ | |||
529 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) | 627 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
530 | #endif | 628 | #endif |
531 | 629 | ||
630 | #define __this_cpu_generic_add_return(pcp, val) \ | ||
631 | ({ \ | ||
632 | __this_cpu_add(pcp, val); \ | ||
633 | __this_cpu_read(pcp); \ | ||
634 | }) | ||
635 | |||
636 | #ifndef __this_cpu_add_return | ||
637 | # ifndef __this_cpu_add_return_1 | ||
638 | # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
639 | # endif | ||
640 | # ifndef __this_cpu_add_return_2 | ||
641 | # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
642 | # endif | ||
643 | # ifndef __this_cpu_add_return_4 | ||
644 | # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
645 | # endif | ||
646 | # ifndef __this_cpu_add_return_8 | ||
647 | # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) | ||
648 | # endif | ||
649 | # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | ||
650 | #endif | ||
651 | |||
652 | #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) | ||
653 | #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) | ||
654 | #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) | ||
655 | |||
656 | #define __this_cpu_generic_xchg(pcp, nval) \ | ||
657 | ({ typeof(pcp) ret__; \ | ||
658 | ret__ = __this_cpu_read(pcp); \ | ||
659 | __this_cpu_write(pcp, nval); \ | ||
660 | ret__; \ | ||
661 | }) | ||
662 | |||
663 | #ifndef __this_cpu_xchg | ||
664 | # ifndef __this_cpu_xchg_1 | ||
665 | # define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
666 | # endif | ||
667 | # ifndef __this_cpu_xchg_2 | ||
668 | # define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
669 | # endif | ||
670 | # ifndef __this_cpu_xchg_4 | ||
671 | # define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
672 | # endif | ||
673 | # ifndef __this_cpu_xchg_8 | ||
674 | # define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) | ||
675 | # endif | ||
676 | # define __this_cpu_xchg(pcp, nval) \ | ||
677 | __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) | ||
678 | #endif | ||
679 | |||
680 | #define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
681 | ({ \ | ||
682 | typeof(pcp) ret__; \ | ||
683 | ret__ = __this_cpu_read(pcp); \ | ||
684 | if (ret__ == (oval)) \ | ||
685 | __this_cpu_write(pcp, nval); \ | ||
686 | ret__; \ | ||
687 | }) | ||
688 | |||
689 | #ifndef __this_cpu_cmpxchg | ||
690 | # ifndef __this_cpu_cmpxchg_1 | ||
691 | # define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
692 | # endif | ||
693 | # ifndef __this_cpu_cmpxchg_2 | ||
694 | # define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
695 | # endif | ||
696 | # ifndef __this_cpu_cmpxchg_4 | ||
697 | # define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
698 | # endif | ||
699 | # ifndef __this_cpu_cmpxchg_8 | ||
700 | # define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) | ||
701 | # endif | ||
702 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ | ||
703 | __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) | ||
704 | #endif | ||
705 | |||
532 | /* | 706 | /* |
533 | * IRQ safe versions of the per cpu RMW operations. Note that these operations | 707 | * IRQ safe versions of the per cpu RMW operations. Note that these operations |
534 | * are *not* safe against modification of the same variable from another | 708 | * are *not* safe against modification of the same variable from another |
535 | * processors (which one gets when using regular atomic operations) | 709 | * processors (which one gets when using regular atomic operations) |
536 | . They are guaranteed to be atomic vs. local interrupts and | 710 | * They are guaranteed to be atomic vs. local interrupts and |
537 | * preemption only. | 711 | * preemption only. |
538 | */ | 712 | */ |
539 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ | 713 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ |
@@ -620,4 +794,33 @@ do { \ | |||
620 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) | 794 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
621 | #endif | 795 | #endif |
622 | 796 | ||
797 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ | ||
798 | ({ \ | ||
799 | typeof(pcp) ret__; \ | ||
800 | unsigned long flags; \ | ||
801 | local_irq_save(flags); \ | ||
802 | ret__ = __this_cpu_read(pcp); \ | ||
803 | if (ret__ == (oval)) \ | ||
804 | __this_cpu_write(pcp, nval); \ | ||
805 | local_irq_restore(flags); \ | ||
806 | ret__; \ | ||
807 | }) | ||
808 | |||
809 | #ifndef irqsafe_cpu_cmpxchg | ||
810 | # ifndef irqsafe_cpu_cmpxchg_1 | ||
811 | # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
812 | # endif | ||
813 | # ifndef irqsafe_cpu_cmpxchg_2 | ||
814 | # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
815 | # endif | ||
816 | # ifndef irqsafe_cpu_cmpxchg_4 | ||
817 | # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
818 | # endif | ||
819 | # ifndef irqsafe_cpu_cmpxchg_8 | ||
820 | # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) | ||
821 | # endif | ||
822 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ | ||
823 | __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) | ||
824 | #endif | ||
825 | |||
623 | #endif /* __LINUX_PERCPU_H */ | 826 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 676149a4ac5f..89c74861a3da 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead) | |||
69 | 69 | ||
70 | list_del_rcu(&p->tasks); | 70 | list_del_rcu(&p->tasks); |
71 | list_del_init(&p->sibling); | 71 | list_del_init(&p->sibling); |
72 | __get_cpu_var(process_counts)--; | 72 | __this_cpu_dec(process_counts); |
73 | } | 73 | } |
74 | list_del_rcu(&p->thread_group); | 74 | list_del_rcu(&p->thread_group); |
75 | } | 75 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index dc1a8bbcea7b..d9b44f20b6b0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1285,7 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1285 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1285 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
1286 | list_add_tail(&p->sibling, &p->real_parent->children); | 1286 | list_add_tail(&p->sibling, &p->real_parent->children); |
1287 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1287 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
1288 | __get_cpu_var(process_counts)++; | 1288 | __this_cpu_inc(process_counts); |
1289 | } | 1289 | } |
1290 | attach_pid(p, PIDTYPE_PID, pid); | 1290 | attach_pid(p, PIDTYPE_PID, pid); |
1291 | nr_threads++; | 1291 | nr_threads++; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f2429fc3438c..45da2b6920ab 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void) | |||
497 | */ | 497 | */ |
498 | static inline int hrtimer_hres_active(void) | 498 | static inline int hrtimer_hres_active(void) |
499 | { | 499 | { |
500 | return __get_cpu_var(hrtimer_bases).hres_active; | 500 | return __this_cpu_read(hrtimer_bases.hres_active); |
501 | } | 501 | } |
502 | 502 | ||
503 | /* | 503 | /* |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 90f881904bb1..c58fa7da8aef 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void) | |||
77 | */ | 77 | */ |
78 | static void __irq_work_queue(struct irq_work *entry) | 78 | static void __irq_work_queue(struct irq_work *entry) |
79 | { | 79 | { |
80 | struct irq_work **head, *next; | 80 | struct irq_work *next; |
81 | 81 | ||
82 | head = &get_cpu_var(irq_work_list); | 82 | preempt_disable(); |
83 | 83 | ||
84 | do { | 84 | do { |
85 | next = *head; | 85 | next = __this_cpu_read(irq_work_list); |
86 | /* Can assign non-atomic because we keep the flags set. */ | 86 | /* Can assign non-atomic because we keep the flags set. */ |
87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); | 87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); |
88 | } while (cmpxchg(head, next, entry) != next); | 88 | } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); |
89 | 89 | ||
90 | /* The list was empty, raise self-interrupt to start processing. */ | 90 | /* The list was empty, raise self-interrupt to start processing. */ |
91 | if (!irq_work_next(entry)) | 91 | if (!irq_work_next(entry)) |
92 | arch_irq_work_raise(); | 92 | arch_irq_work_raise(); |
93 | 93 | ||
94 | put_cpu_var(irq_work_list); | 94 | preempt_enable(); |
95 | } | 95 | } |
96 | 96 | ||
97 | /* | 97 | /* |
@@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue); | |||
120 | */ | 120 | */ |
121 | void irq_work_run(void) | 121 | void irq_work_run(void) |
122 | { | 122 | { |
123 | struct irq_work *list, **head; | 123 | struct irq_work *list; |
124 | 124 | ||
125 | head = &__get_cpu_var(irq_work_list); | 125 | if (this_cpu_read(irq_work_list) == NULL) |
126 | if (*head == NULL) | ||
127 | return; | 126 | return; |
128 | 127 | ||
129 | BUG_ON(!in_irq()); | 128 | BUG_ON(!in_irq()); |
130 | BUG_ON(!irqs_disabled()); | 129 | BUG_ON(!irqs_disabled()); |
131 | 130 | ||
132 | list = xchg(head, NULL); | 131 | list = this_cpu_xchg(irq_work_list, NULL); |
132 | |||
133 | while (list != NULL) { | 133 | while (list != NULL) { |
134 | struct irq_work *entry = list; | 134 | struct irq_work *entry = list; |
135 | 135 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 7663e5df0e6f..77981813a1e7 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty) | |||
317 | /* We have preemption disabled.. so it is safe to use __ versions */ | 317 | /* We have preemption disabled.. so it is safe to use __ versions */ |
318 | static inline void set_kprobe_instance(struct kprobe *kp) | 318 | static inline void set_kprobe_instance(struct kprobe *kp) |
319 | { | 319 | { |
320 | __get_cpu_var(kprobe_instance) = kp; | 320 | __this_cpu_write(kprobe_instance, kp); |
321 | } | 321 | } |
322 | 322 | ||
323 | static inline void reset_kprobe_instance(void) | 323 | static inline void reset_kprobe_instance(void) |
324 | { | 324 | { |
325 | __get_cpu_var(kprobe_instance) = NULL; | 325 | __this_cpu_write(kprobe_instance, NULL); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
@@ -965,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
965 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | 965 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
966 | int trapnr) | 966 | int trapnr) |
967 | { | 967 | { |
968 | struct kprobe *cur = __get_cpu_var(kprobe_instance); | 968 | struct kprobe *cur = __this_cpu_read(kprobe_instance); |
969 | 969 | ||
970 | /* | 970 | /* |
971 | * if we faulted "during" the execution of a user specified | 971 | * if we faulted "during" the execution of a user specified |
@@ -980,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | |||
980 | 980 | ||
981 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | 981 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
982 | { | 982 | { |
983 | struct kprobe *cur = __get_cpu_var(kprobe_instance); | 983 | struct kprobe *cur = __this_cpu_read(kprobe_instance); |
984 | int ret = 0; | 984 | int ret = 0; |
985 | 985 | ||
986 | if (cur && cur->break_handler) { | 986 | if (cur && cur->break_handler) { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d0ddfea6579d..dd4aea806f8e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -364,8 +364,8 @@ void rcu_irq_exit(void) | |||
364 | WARN_ON_ONCE(rdtp->dynticks & 0x1); | 364 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
365 | 365 | ||
366 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 366 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
367 | if (__get_cpu_var(rcu_sched_data).nxtlist || | 367 | if (__this_cpu_read(rcu_sched_data.nxtlist) || |
368 | __get_cpu_var(rcu_bh_data).nxtlist) | 368 | __this_cpu_read(rcu_bh_data.nxtlist)) |
369 | set_need_resched(); | 369 | set_need_resched(); |
370 | } | 370 | } |
371 | 371 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index c10150cb456b..0823778f87fc 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
70 | static void wakeup_softirqd(void) | 70 | static void wakeup_softirqd(void) |
71 | { | 71 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | 72 | /* Interrupts are disabled: no need to stop preemption */ |
73 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
74 | 74 | ||
75 | if (tsk && tsk->state != TASK_RUNNING) | 75 | if (tsk && tsk->state != TASK_RUNNING) |
76 | wake_up_process(tsk); | 76 | wake_up_process(tsk); |
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
388 | 388 | ||
389 | local_irq_save(flags); | 389 | local_irq_save(flags); |
390 | t->next = NULL; | 390 | t->next = NULL; |
391 | *__get_cpu_var(tasklet_vec).tail = t; | 391 | *__this_cpu_read(tasklet_vec.tail) = t; |
392 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 392 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
393 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 393 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
394 | local_irq_restore(flags); | 394 | local_irq_restore(flags); |
395 | } | 395 | } |
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
402 | 402 | ||
403 | local_irq_save(flags); | 403 | local_irq_save(flags); |
404 | t->next = NULL; | 404 | t->next = NULL; |
405 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 405 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
406 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 406 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
407 | raise_softirq_irqoff(HI_SOFTIRQ); | 407 | raise_softirq_irqoff(HI_SOFTIRQ); |
408 | local_irq_restore(flags); | 408 | local_irq_restore(flags); |
409 | } | 409 | } |
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
414 | { | 414 | { |
415 | BUG_ON(!irqs_disabled()); | 415 | BUG_ON(!irqs_disabled()); |
416 | 416 | ||
417 | t->next = __get_cpu_var(tasklet_hi_vec).head; | 417 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
418 | __get_cpu_var(tasklet_hi_vec).head = t; | 418 | __this_cpu_write(tasklet_hi_vec.head, t); |
419 | __raise_softirq_irqoff(HI_SOFTIRQ); | 419 | __raise_softirq_irqoff(HI_SOFTIRQ); |
420 | } | 420 | } |
421 | 421 | ||
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a) | |||
426 | struct tasklet_struct *list; | 426 | struct tasklet_struct *list; |
427 | 427 | ||
428 | local_irq_disable(); | 428 | local_irq_disable(); |
429 | list = __get_cpu_var(tasklet_vec).head; | 429 | list = __this_cpu_read(tasklet_vec.head); |
430 | __get_cpu_var(tasklet_vec).head = NULL; | 430 | __this_cpu_write(tasklet_vec.head, NULL); |
431 | __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; | 431 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); |
432 | local_irq_enable(); | 432 | local_irq_enable(); |
433 | 433 | ||
434 | while (list) { | 434 | while (list) { |
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a) | |||
449 | 449 | ||
450 | local_irq_disable(); | 450 | local_irq_disable(); |
451 | t->next = NULL; | 451 | t->next = NULL; |
452 | *__get_cpu_var(tasklet_vec).tail = t; | 452 | *__this_cpu_read(tasklet_vec.tail) = t; |
453 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 453 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
454 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); | 454 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
455 | local_irq_enable(); | 455 | local_irq_enable(); |
456 | } | 456 | } |
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
461 | struct tasklet_struct *list; | 461 | struct tasklet_struct *list; |
462 | 462 | ||
463 | local_irq_disable(); | 463 | local_irq_disable(); |
464 | list = __get_cpu_var(tasklet_hi_vec).head; | 464 | list = __this_cpu_read(tasklet_hi_vec.head); |
465 | __get_cpu_var(tasklet_hi_vec).head = NULL; | 465 | __this_cpu_write(tasklet_hi_vec.head, NULL); |
466 | __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; | 466 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); |
467 | local_irq_enable(); | 467 | local_irq_enable(); |
468 | 468 | ||
469 | while (list) { | 469 | while (list) { |
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
484 | 484 | ||
485 | local_irq_disable(); | 485 | local_irq_disable(); |
486 | t->next = NULL; | 486 | t->next = NULL; |
487 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 487 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
488 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 488 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
489 | __raise_softirq_irqoff(HI_SOFTIRQ); | 489 | __raise_softirq_irqoff(HI_SOFTIRQ); |
490 | local_irq_enable(); | 490 | local_irq_enable(); |
491 | } | 491 | } |
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu) | |||
802 | 802 | ||
803 | /* Find end, append list for that CPU. */ | 803 | /* Find end, append list for that CPU. */ |
804 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { | 804 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
805 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; | 805 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
806 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; | 806 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
807 | per_cpu(tasklet_vec, cpu).head = NULL; | 807 | per_cpu(tasklet_vec, cpu).head = NULL; |
808 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | 808 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
809 | } | 809 | } |
810 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 810 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
811 | 811 | ||
812 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { | 812 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
813 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; | 813 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
814 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; | 814 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
815 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | 815 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
816 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | 816 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
817 | } | 817 | } |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 3308fd7f1b52..69691eb4b715 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | |||
89 | return -ENOMEM; | 89 | return -ENOMEM; |
90 | 90 | ||
91 | if (!info) { | 91 | if (!info) { |
92 | int seq = get_cpu_var(taskstats_seqnum)++; | 92 | int seq = this_cpu_inc_return(taskstats_seqnum) - 1; |
93 | put_cpu_var(taskstats_seqnum); | ||
94 | 93 | ||
95 | reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); | 94 | reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); |
96 | } else | 95 | } else |
@@ -612,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead) | |||
612 | fill_tgid_exit(tsk); | 611 | fill_tgid_exit(tsk); |
613 | } | 612 | } |
614 | 613 | ||
615 | listeners = &__raw_get_cpu_var(listener_array); | 614 | listeners = __this_cpu_ptr(&listener_array); |
616 | if (list_empty(&listeners->list)) | 615 | if (list_empty(&listeners->list)) |
617 | return; | 616 | return; |
618 | 617 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b6b898d2eeef..051bc80a0c43 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu) | |||
49 | */ | 49 | */ |
50 | int tick_is_oneshot_available(void) | 50 | int tick_is_oneshot_available(void) |
51 | { | 51 | { |
52 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
53 | 53 | ||
54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); | 54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); |
55 | } | 55 | } |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index aada0e52680a..5cbc101f908b 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | |||
95 | */ | 95 | */ |
96 | int tick_program_event(ktime_t expires, int force) | 96 | int tick_program_event(ktime_t expires, int force) |
97 | { | 97 | { |
98 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 98 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
99 | 99 | ||
100 | return tick_dev_program_event(dev, expires, force); | 100 | return tick_dev_program_event(dev, expires, force); |
101 | } | 101 | } |
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void) | |||
167 | int ret; | 167 | int ret; |
168 | 168 | ||
169 | local_irq_save(flags); | 169 | local_irq_save(flags); |
170 | ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT; | 170 | ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; |
171 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
172 | 172 | ||
173 | return ret; | 173 | return ret; |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e7b575ac33c..d7ebdf4cea98 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -118,12 +118,12 @@ static void __touch_watchdog(void) | |||
118 | { | 118 | { |
119 | int this_cpu = smp_processor_id(); | 119 | int this_cpu = smp_processor_id(); |
120 | 120 | ||
121 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); | 121 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
122 | } | 122 | } |
123 | 123 | ||
124 | void touch_softlockup_watchdog(void) | 124 | void touch_softlockup_watchdog(void) |
125 | { | 125 | { |
126 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | 126 | __this_cpu_write(watchdog_touch_ts, 0); |
127 | } | 127 | } |
128 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 128 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
129 | 129 | ||
@@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void) | |||
167 | /* watchdog detector functions */ | 167 | /* watchdog detector functions */ |
168 | static int is_hardlockup(void) | 168 | static int is_hardlockup(void) |
169 | { | 169 | { |
170 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); | 170 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
171 | 171 | ||
172 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) | 172 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
173 | return 1; | 173 | return 1; |
174 | 174 | ||
175 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; | 175 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | #endif | 178 | #endif |
@@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
205 | /* Ensure the watchdog never gets throttled */ | 205 | /* Ensure the watchdog never gets throttled */ |
206 | event->hw.interrupts = 0; | 206 | event->hw.interrupts = 0; |
207 | 207 | ||
208 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | 208 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
209 | __get_cpu_var(watchdog_nmi_touch) = false; | 209 | __this_cpu_write(watchdog_nmi_touch, false); |
210 | return; | 210 | return; |
211 | } | 211 | } |
212 | 212 | ||
@@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
220 | int this_cpu = smp_processor_id(); | 220 | int this_cpu = smp_processor_id(); |
221 | 221 | ||
222 | /* only print hardlockups once */ | 222 | /* only print hardlockups once */ |
223 | if (__get_cpu_var(hard_watchdog_warn) == true) | 223 | if (__this_cpu_read(hard_watchdog_warn) == true) |
224 | return; | 224 | return; |
225 | 225 | ||
226 | if (hardlockup_panic) | 226 | if (hardlockup_panic) |
@@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
228 | else | 228 | else |
229 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | 229 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
230 | 230 | ||
231 | __get_cpu_var(hard_watchdog_warn) = true; | 231 | __this_cpu_write(hard_watchdog_warn, true); |
232 | return; | 232 | return; |
233 | } | 233 | } |
234 | 234 | ||
235 | __get_cpu_var(hard_watchdog_warn) = false; | 235 | __this_cpu_write(hard_watchdog_warn, false); |
236 | return; | 236 | return; |
237 | } | 237 | } |
238 | static void watchdog_interrupt_count(void) | 238 | static void watchdog_interrupt_count(void) |
239 | { | 239 | { |
240 | __get_cpu_var(hrtimer_interrupts)++; | 240 | __this_cpu_inc(hrtimer_interrupts); |
241 | } | 241 | } |
242 | #else | 242 | #else |
243 | static inline void watchdog_interrupt_count(void) { return; } | 243 | static inline void watchdog_interrupt_count(void) { return; } |
@@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; } | |||
246 | /* watchdog kicker functions */ | 246 | /* watchdog kicker functions */ |
247 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 247 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
248 | { | 248 | { |
249 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); | 249 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
250 | struct pt_regs *regs = get_irq_regs(); | 250 | struct pt_regs *regs = get_irq_regs(); |
251 | int duration; | 251 | int duration; |
252 | 252 | ||
@@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
254 | watchdog_interrupt_count(); | 254 | watchdog_interrupt_count(); |
255 | 255 | ||
256 | /* kick the softlockup detector */ | 256 | /* kick the softlockup detector */ |
257 | wake_up_process(__get_cpu_var(softlockup_watchdog)); | 257 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
258 | 258 | ||
259 | /* .. and repeat */ | 259 | /* .. and repeat */ |
260 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | 260 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
261 | 261 | ||
262 | if (touch_ts == 0) { | 262 | if (touch_ts == 0) { |
263 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { | 263 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
264 | /* | 264 | /* |
265 | * If the time stamp was touched atomically | 265 | * If the time stamp was touched atomically |
266 | * make sure the scheduler tick is up to date. | 266 | * make sure the scheduler tick is up to date. |
267 | */ | 267 | */ |
268 | __get_cpu_var(softlockup_touch_sync) = false; | 268 | __this_cpu_write(softlockup_touch_sync, false); |
269 | sched_clock_tick(); | 269 | sched_clock_tick(); |
270 | } | 270 | } |
271 | __touch_watchdog(); | 271 | __touch_watchdog(); |
@@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
281 | duration = is_softlockup(touch_ts); | 281 | duration = is_softlockup(touch_ts); |
282 | if (unlikely(duration)) { | 282 | if (unlikely(duration)) { |
283 | /* only warn once */ | 283 | /* only warn once */ |
284 | if (__get_cpu_var(soft_watchdog_warn) == true) | 284 | if (__this_cpu_read(soft_watchdog_warn) == true) |
285 | return HRTIMER_RESTART; | 285 | return HRTIMER_RESTART; |
286 | 286 | ||
287 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 287 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
@@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
296 | 296 | ||
297 | if (softlockup_panic) | 297 | if (softlockup_panic) |
298 | panic("softlockup: hung tasks"); | 298 | panic("softlockup: hung tasks"); |
299 | __get_cpu_var(soft_watchdog_warn) = true; | 299 | __this_cpu_write(soft_watchdog_warn, true); |
300 | } else | 300 | } else |
301 | __get_cpu_var(soft_watchdog_warn) = false; | 301 | __this_cpu_write(soft_watchdog_warn, false); |
302 | 302 | ||
303 | return HRTIMER_RESTART; | 303 | return HRTIMER_RESTART; |
304 | } | 304 | } |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 604678d7d06d..28f2c33c6b53 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -72,18 +72,16 @@ EXPORT_SYMBOL(percpu_counter_set); | |||
72 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | 72 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) |
73 | { | 73 | { |
74 | s64 count; | 74 | s64 count; |
75 | s32 *pcount; | ||
76 | 75 | ||
77 | preempt_disable(); | 76 | preempt_disable(); |
78 | pcount = this_cpu_ptr(fbc->counters); | 77 | count = __this_cpu_read(*fbc->counters) + amount; |
79 | count = *pcount + amount; | ||
80 | if (count >= batch || count <= -batch) { | 78 | if (count >= batch || count <= -batch) { |
81 | spin_lock(&fbc->lock); | 79 | spin_lock(&fbc->lock); |
82 | fbc->count += count; | 80 | fbc->count += count; |
83 | *pcount = 0; | 81 | __this_cpu_write(*fbc->counters, 0); |
84 | spin_unlock(&fbc->lock); | 82 | spin_unlock(&fbc->lock); |
85 | } else { | 83 | } else { |
86 | *pcount = count; | 84 | __this_cpu_write(*fbc->counters, count); |
87 | } | 85 | } |
88 | preempt_enable(); | 86 | preempt_enable(); |
89 | } | 87 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index 02ba91230b99..3dd4984bdef8 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -293,12 +293,8 @@ static void *pcpu_mem_alloc(size_t size) | |||
293 | 293 | ||
294 | if (size <= PAGE_SIZE) | 294 | if (size <= PAGE_SIZE) |
295 | return kzalloc(size, GFP_KERNEL); | 295 | return kzalloc(size, GFP_KERNEL); |
296 | else { | 296 | else |
297 | void *ptr = vmalloc(size); | 297 | return vzalloc(size); |
298 | if (ptr) | ||
299 | memset(ptr, 0, size); | ||
300 | return ptr; | ||
301 | } | ||
302 | } | 298 | } |
303 | 299 | ||
304 | /** | 300 | /** |
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu) | |||
829 | 829 | ||
830 | static void next_reap_node(void) | 830 | static void next_reap_node(void) |
831 | { | 831 | { |
832 | int node = __get_cpu_var(slab_reap_node); | 832 | int node = __this_cpu_read(slab_reap_node); |
833 | 833 | ||
834 | node = next_node(node, node_online_map); | 834 | node = next_node(node, node_online_map); |
835 | if (unlikely(node >= MAX_NUMNODES)) | 835 | if (unlikely(node >= MAX_NUMNODES)) |
836 | node = first_node(node_online_map); | 836 | node = first_node(node_online_map); |
837 | __get_cpu_var(slab_reap_node) = node; | 837 | __this_cpu_write(slab_reap_node, node); |
838 | } | 838 | } |
839 | 839 | ||
840 | #else | 840 | #else |
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1012 | */ | 1012 | */ |
1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1013 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
1014 | { | 1014 | { |
1015 | int node = __get_cpu_var(slab_reap_node); | 1015 | int node = __this_cpu_read(slab_reap_node); |
1016 | 1016 | ||
1017 | if (l3->alien) { | 1017 | if (l3->alien) { |
1018 | struct array_cache *ac = l3->alien[node]; | 1018 | struct array_cache *ac = l3->alien[node]; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 33c33e7a0f9b..312d728976f1 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -167,36 +167,24 @@ static void refresh_zone_stat_thresholds(void) | |||
167 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 167 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
168 | int delta) | 168 | int delta) |
169 | { | 169 | { |
170 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 170 | struct per_cpu_pageset __percpu *pcp = zone->pageset; |
171 | 171 | s8 __percpu *p = pcp->vm_stat_diff + item; | |
172 | s8 *p = pcp->vm_stat_diff + item; | ||
173 | long x; | 172 | long x; |
173 | long t; | ||
174 | |||
175 | x = delta + __this_cpu_read(*p); | ||
174 | 176 | ||
175 | x = delta + *p; | 177 | t = __this_cpu_read(pcp->stat_threshold); |
176 | 178 | ||
177 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { | 179 | if (unlikely(x > t || x < -t)) { |
178 | zone_page_state_add(x, zone, item); | 180 | zone_page_state_add(x, zone, item); |
179 | x = 0; | 181 | x = 0; |
180 | } | 182 | } |
181 | *p = x; | 183 | __this_cpu_write(*p, x); |
182 | } | 184 | } |
183 | EXPORT_SYMBOL(__mod_zone_page_state); | 185 | EXPORT_SYMBOL(__mod_zone_page_state); |
184 | 186 | ||
185 | /* | 187 | /* |
186 | * For an unknown interrupt state | ||
187 | */ | ||
188 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | ||
189 | int delta) | ||
190 | { | ||
191 | unsigned long flags; | ||
192 | |||
193 | local_irq_save(flags); | ||
194 | __mod_zone_page_state(zone, item, delta); | ||
195 | local_irq_restore(flags); | ||
196 | } | ||
197 | EXPORT_SYMBOL(mod_zone_page_state); | ||
198 | |||
199 | /* | ||
200 | * Optimized increment and decrement functions. | 188 | * Optimized increment and decrement functions. |
201 | * | 189 | * |
202 | * These are only for a single page and therefore can take a struct page * | 190 | * These are only for a single page and therefore can take a struct page * |
@@ -221,16 +209,17 @@ EXPORT_SYMBOL(mod_zone_page_state); | |||
221 | */ | 209 | */ |
222 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | 210 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
223 | { | 211 | { |
224 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 212 | struct per_cpu_pageset __percpu *pcp = zone->pageset; |
225 | s8 *p = pcp->vm_stat_diff + item; | 213 | s8 __percpu *p = pcp->vm_stat_diff + item; |
214 | s8 v, t; | ||
226 | 215 | ||
227 | (*p)++; | 216 | v = __this_cpu_inc_return(*p); |
217 | t = __this_cpu_read(pcp->stat_threshold); | ||
218 | if (unlikely(v > t)) { | ||
219 | s8 overstep = t >> 1; | ||
228 | 220 | ||
229 | if (unlikely(*p > pcp->stat_threshold)) { | 221 | zone_page_state_add(v + overstep, zone, item); |
230 | int overstep = pcp->stat_threshold / 2; | 222 | __this_cpu_write(*p, -overstep); |
231 | |||
232 | zone_page_state_add(*p + overstep, zone, item); | ||
233 | *p = -overstep; | ||
234 | } | 223 | } |
235 | } | 224 | } |
236 | 225 | ||
@@ -242,16 +231,17 @@ EXPORT_SYMBOL(__inc_zone_page_state); | |||
242 | 231 | ||
243 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 232 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
244 | { | 233 | { |
245 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 234 | struct per_cpu_pageset __percpu *pcp = zone->pageset; |
246 | s8 *p = pcp->vm_stat_diff + item; | 235 | s8 __percpu *p = pcp->vm_stat_diff + item; |
247 | 236 | s8 v, t; | |
248 | (*p)--; | ||
249 | 237 | ||
250 | if (unlikely(*p < - pcp->stat_threshold)) { | 238 | v = __this_cpu_dec_return(*p); |
251 | int overstep = pcp->stat_threshold / 2; | 239 | t = __this_cpu_read(pcp->stat_threshold); |
240 | if (unlikely(v < - t)) { | ||
241 | s8 overstep = t >> 1; | ||
252 | 242 | ||
253 | zone_page_state_add(*p - overstep, zone, item); | 243 | zone_page_state_add(v - overstep, zone, item); |
254 | *p = overstep; | 244 | __this_cpu_write(*p, overstep); |
255 | } | 245 | } |
256 | } | 246 | } |
257 | 247 | ||
@@ -261,6 +251,92 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | |||
261 | } | 251 | } |
262 | EXPORT_SYMBOL(__dec_zone_page_state); | 252 | EXPORT_SYMBOL(__dec_zone_page_state); |
263 | 253 | ||
254 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
255 | /* | ||
256 | * If we have cmpxchg_local support then we do not need to incur the overhead | ||
257 | * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. | ||
258 | * | ||
259 | * mod_state() modifies the zone counter state through atomic per cpu | ||
260 | * operations. | ||
261 | * | ||
262 | * Overstep mode specifies how overstep should handled: | ||
263 | * 0 No overstepping | ||
264 | * 1 Overstepping half of threshold | ||
265 | * -1 Overstepping minus half of threshold | ||
266 | */ | ||
267 | static inline void mod_state(struct zone *zone, | ||
268 | enum zone_stat_item item, int delta, int overstep_mode) | ||
269 | { | ||
270 | struct per_cpu_pageset __percpu *pcp = zone->pageset; | ||
271 | s8 __percpu *p = pcp->vm_stat_diff + item; | ||
272 | long o, n, t, z; | ||
273 | |||
274 | do { | ||
275 | z = 0; /* overflow to zone counters */ | ||
276 | |||
277 | /* | ||
278 | * The fetching of the stat_threshold is racy. We may apply | ||
279 | * a counter threshold to the wrong the cpu if we get | ||
280 | * rescheduled while executing here. However, the following | ||
281 | * will apply the threshold again and therefore bring the | ||
282 | * counter under the threshold. | ||
283 | */ | ||
284 | t = this_cpu_read(pcp->stat_threshold); | ||
285 | |||
286 | o = this_cpu_read(*p); | ||
287 | n = delta + o; | ||
288 | |||
289 | if (n > t || n < -t) { | ||
290 | int os = overstep_mode * (t >> 1) ; | ||
291 | |||
292 | /* Overflow must be added to zone counters */ | ||
293 | z = n + os; | ||
294 | n = -os; | ||
295 | } | ||
296 | } while (this_cpu_cmpxchg(*p, o, n) != o); | ||
297 | |||
298 | if (z) | ||
299 | zone_page_state_add(z, zone, item); | ||
300 | } | ||
301 | |||
302 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | ||
303 | int delta) | ||
304 | { | ||
305 | mod_state(zone, item, delta, 0); | ||
306 | } | ||
307 | EXPORT_SYMBOL(mod_zone_page_state); | ||
308 | |||
309 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | ||
310 | { | ||
311 | mod_state(zone, item, 1, 1); | ||
312 | } | ||
313 | |||
314 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) | ||
315 | { | ||
316 | mod_state(page_zone(page), item, 1, 1); | ||
317 | } | ||
318 | EXPORT_SYMBOL(inc_zone_page_state); | ||
319 | |||
320 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | ||
321 | { | ||
322 | mod_state(page_zone(page), item, -1, -1); | ||
323 | } | ||
324 | EXPORT_SYMBOL(dec_zone_page_state); | ||
325 | #else | ||
326 | /* | ||
327 | * Use interrupt disable to serialize counter updates | ||
328 | */ | ||
329 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | ||
330 | int delta) | ||
331 | { | ||
332 | unsigned long flags; | ||
333 | |||
334 | local_irq_save(flags); | ||
335 | __mod_zone_page_state(zone, item, delta); | ||
336 | local_irq_restore(flags); | ||
337 | } | ||
338 | EXPORT_SYMBOL(mod_zone_page_state); | ||
339 | |||
264 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | 340 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
265 | { | 341 | { |
266 | unsigned long flags; | 342 | unsigned long flags; |
@@ -291,6 +367,7 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item) | |||
291 | local_irq_restore(flags); | 367 | local_irq_restore(flags); |
292 | } | 368 | } |
293 | EXPORT_SYMBOL(dec_zone_page_state); | 369 | EXPORT_SYMBOL(dec_zone_page_state); |
370 | #endif | ||
294 | 371 | ||
295 | /* | 372 | /* |
296 | * Update the zone counters for one cpu. | 373 | * Update the zone counters for one cpu. |