diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 20:02:58 -0500 |
commit | 72eb6a791459c87a0340318840bb3bd9252b627b (patch) | |
tree | 3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /arch | |
parent | 23d69b09b78c4876e134f104a3814c30747c53f1 (diff) | |
parent | 55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff) |
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
gameport: use this_cpu_read instead of lookup
x86: udelay: Use this_cpu_read to avoid address calculation
x86: Use this_cpu_inc_return for nmi counter
x86: Replace uses of current_cpu_data with this_cpu ops
x86: Use this_cpu_ops to optimize code
vmstat: User per cpu atomics to avoid interrupt disable / enable
irq_work: Use per cpu atomics instead of regular atomics
cpuops: Use cmpxchg for xchg to avoid lock semantics
x86: this_cpu_cmpxchg and this_cpu_xchg operations
percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
percpu,x86: relocate this_cpu_add_return() and friends
connector: Use this_cpu operations
xen: Use this_cpu_inc_return
taskstats: Use this_cpu_ops
random: Use this_cpu_inc_return
fs: Use this_cpu_inc_return in buffer.c
highmem: Use this_cpu_xx_return() operations
vmstat: Use this_cpu_inc_return for vm statistics
x86: Support for this_cpu_add, sub, dec, inc_return
percpu: Generic support for this_cpu_add, sub, dec, inc_return
...
Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.
Diffstat (limited to 'arch')
30 files changed, 250 insertions, 97 deletions
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 2ac9069890cd..15588a0ef466 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT | |||
310 | config X86_CMPXCHG | 310 | config X86_CMPXCHG |
311 | def_bool X86_64 || (X86_32 && !M386) | 311 | def_bool X86_64 || (X86_32 && !M386) |
312 | 312 | ||
313 | config CMPXCHG_LOCAL | ||
314 | def_bool X86_64 || (X86_32 && !M386) | ||
315 | |||
313 | config X86_L1_CACHE_SHIFT | 316 | config X86_L1_CACHE_SHIFT |
314 | int | 317 | int |
315 | default "7" if MPENTIUM4 || MPSC | 318 | default "7" if MPENTIUM4 || MPSC |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index b81002f23614..078ad0caefc6 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void) | |||
94 | 94 | ||
95 | static inline int hw_breakpoint_active(void) | 95 | static inline int hw_breakpoint_active(void) |
96 | { | 96 | { |
97 | return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; | 97 | return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; |
98 | } | 98 | } |
99 | 99 | ||
100 | extern void aout_dump_debugregs(struct user *dump); | 100 | extern void aout_dump_debugregs(struct user *dump); |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f899e01a8ac9..8ee45167e817 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -230,6 +230,125 @@ do { \ | |||
230 | }) | 230 | }) |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * Add return operation | ||
234 | */ | ||
235 | #define percpu_add_return_op(var, val) \ | ||
236 | ({ \ | ||
237 | typeof(var) paro_ret__ = val; \ | ||
238 | switch (sizeof(var)) { \ | ||
239 | case 1: \ | ||
240 | asm("xaddb %0, "__percpu_arg(1) \ | ||
241 | : "+q" (paro_ret__), "+m" (var) \ | ||
242 | : : "memory"); \ | ||
243 | break; \ | ||
244 | case 2: \ | ||
245 | asm("xaddw %0, "__percpu_arg(1) \ | ||
246 | : "+r" (paro_ret__), "+m" (var) \ | ||
247 | : : "memory"); \ | ||
248 | break; \ | ||
249 | case 4: \ | ||
250 | asm("xaddl %0, "__percpu_arg(1) \ | ||
251 | : "+r" (paro_ret__), "+m" (var) \ | ||
252 | : : "memory"); \ | ||
253 | break; \ | ||
254 | case 8: \ | ||
255 | asm("xaddq %0, "__percpu_arg(1) \ | ||
256 | : "+re" (paro_ret__), "+m" (var) \ | ||
257 | : : "memory"); \ | ||
258 | break; \ | ||
259 | default: __bad_percpu_size(); \ | ||
260 | } \ | ||
261 | paro_ret__ += val; \ | ||
262 | paro_ret__; \ | ||
263 | }) | ||
264 | |||
265 | /* | ||
266 | * xchg is implemented using cmpxchg without a lock prefix. xchg is | ||
267 | * expensive due to the implied lock prefix. The processor cannot prefetch | ||
268 | * cachelines if xchg is used. | ||
269 | */ | ||
270 | #define percpu_xchg_op(var, nval) \ | ||
271 | ({ \ | ||
272 | typeof(var) pxo_ret__; \ | ||
273 | typeof(var) pxo_new__ = (nval); \ | ||
274 | switch (sizeof(var)) { \ | ||
275 | case 1: \ | ||
276 | asm("\n1:mov "__percpu_arg(1)",%%al" \ | ||
277 | "\n\tcmpxchgb %2, "__percpu_arg(1) \ | ||
278 | "\n\tjnz 1b" \ | ||
279 | : "=a" (pxo_ret__), "+m" (var) \ | ||
280 | : "q" (pxo_new__) \ | ||
281 | : "memory"); \ | ||
282 | break; \ | ||
283 | case 2: \ | ||
284 | asm("\n1:mov "__percpu_arg(1)",%%ax" \ | ||
285 | "\n\tcmpxchgw %2, "__percpu_arg(1) \ | ||
286 | "\n\tjnz 1b" \ | ||
287 | : "=a" (pxo_ret__), "+m" (var) \ | ||
288 | : "r" (pxo_new__) \ | ||
289 | : "memory"); \ | ||
290 | break; \ | ||
291 | case 4: \ | ||
292 | asm("\n1:mov "__percpu_arg(1)",%%eax" \ | ||
293 | "\n\tcmpxchgl %2, "__percpu_arg(1) \ | ||
294 | "\n\tjnz 1b" \ | ||
295 | : "=a" (pxo_ret__), "+m" (var) \ | ||
296 | : "r" (pxo_new__) \ | ||
297 | : "memory"); \ | ||
298 | break; \ | ||
299 | case 8: \ | ||
300 | asm("\n1:mov "__percpu_arg(1)",%%rax" \ | ||
301 | "\n\tcmpxchgq %2, "__percpu_arg(1) \ | ||
302 | "\n\tjnz 1b" \ | ||
303 | : "=a" (pxo_ret__), "+m" (var) \ | ||
304 | : "r" (pxo_new__) \ | ||
305 | : "memory"); \ | ||
306 | break; \ | ||
307 | default: __bad_percpu_size(); \ | ||
308 | } \ | ||
309 | pxo_ret__; \ | ||
310 | }) | ||
311 | |||
312 | /* | ||
313 | * cmpxchg has no such implied lock semantics as a result it is much | ||
314 | * more efficient for cpu local operations. | ||
315 | */ | ||
316 | #define percpu_cmpxchg_op(var, oval, nval) \ | ||
317 | ({ \ | ||
318 | typeof(var) pco_ret__; \ | ||
319 | typeof(var) pco_old__ = (oval); \ | ||
320 | typeof(var) pco_new__ = (nval); \ | ||
321 | switch (sizeof(var)) { \ | ||
322 | case 1: \ | ||
323 | asm("cmpxchgb %2, "__percpu_arg(1) \ | ||
324 | : "=a" (pco_ret__), "+m" (var) \ | ||
325 | : "q" (pco_new__), "0" (pco_old__) \ | ||
326 | : "memory"); \ | ||
327 | break; \ | ||
328 | case 2: \ | ||
329 | asm("cmpxchgw %2, "__percpu_arg(1) \ | ||
330 | : "=a" (pco_ret__), "+m" (var) \ | ||
331 | : "r" (pco_new__), "0" (pco_old__) \ | ||
332 | : "memory"); \ | ||
333 | break; \ | ||
334 | case 4: \ | ||
335 | asm("cmpxchgl %2, "__percpu_arg(1) \ | ||
336 | : "=a" (pco_ret__), "+m" (var) \ | ||
337 | : "r" (pco_new__), "0" (pco_old__) \ | ||
338 | : "memory"); \ | ||
339 | break; \ | ||
340 | case 8: \ | ||
341 | asm("cmpxchgq %2, "__percpu_arg(1) \ | ||
342 | : "=a" (pco_ret__), "+m" (var) \ | ||
343 | : "r" (pco_new__), "0" (pco_old__) \ | ||
344 | : "memory"); \ | ||
345 | break; \ | ||
346 | default: __bad_percpu_size(); \ | ||
347 | } \ | ||
348 | pco_ret__; \ | ||
349 | }) | ||
350 | |||
351 | /* | ||
233 | * percpu_read() makes gcc load the percpu variable every time it is | 352 | * percpu_read() makes gcc load the percpu variable every time it is |
234 | * accessed while percpu_read_stable() allows the value to be cached. | 353 | * accessed while percpu_read_stable() allows the value to be cached. |
235 | * percpu_read_stable() is more efficient and can be used if its value | 354 | * percpu_read_stable() is more efficient and can be used if its value |
@@ -267,6 +386,12 @@ do { \ | |||
267 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 386 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
268 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 387 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
269 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 388 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
389 | /* | ||
390 | * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much | ||
391 | * faster than an xchg with forced lock semantics. | ||
392 | */ | ||
393 | #define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
394 | #define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
270 | 395 | ||
271 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 396 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
272 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 397 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
@@ -286,6 +411,11 @@ do { \ | |||
286 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 411 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
287 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 412 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
288 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 413 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
414 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) | ||
415 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | ||
416 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | ||
417 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
418 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
289 | 419 | ||
290 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) | 420 | #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
291 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) | 421 | #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
@@ -299,6 +429,31 @@ do { \ | |||
299 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | 429 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) |
300 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | 430 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) |
301 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | 431 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) |
432 | #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) | ||
433 | #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) | ||
434 | #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) | ||
435 | #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) | ||
436 | #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
437 | |||
438 | #ifndef CONFIG_M386 | ||
439 | #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) | ||
440 | #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | ||
441 | #define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | ||
442 | #define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
443 | #define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
444 | #define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
445 | |||
446 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) | ||
447 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) | ||
448 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) | ||
449 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
450 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
451 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
452 | |||
453 | #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
454 | #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
455 | #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) | ||
456 | #endif /* !CONFIG_M386 */ | ||
302 | 457 | ||
303 | /* | 458 | /* |
304 | * Per cpu atomic 64 bit operations are only available under 64 bit. | 459 | * Per cpu atomic 64 bit operations are only available under 64 bit. |
@@ -311,6 +466,7 @@ do { \ | |||
311 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 466 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
312 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 467 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
313 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 468 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
469 | #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | ||
314 | 470 | ||
315 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 471 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
316 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | 472 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
@@ -318,12 +474,12 @@ do { \ | |||
318 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 474 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
319 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 475 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
320 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 476 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
477 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) | ||
321 | 478 | ||
322 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) | 479 | #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
323 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | 480 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
324 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | 481 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
325 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | 482 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) |
326 | |||
327 | #endif | 483 | #endif |
328 | 484 | ||
329 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 485 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cae9c3cb95cf..c6efecf85a6a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS]; | |||
141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
142 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 142 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
143 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | 143 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
144 | #define current_cpu_data __get_cpu_var(cpu_info) | ||
145 | #else | 144 | #else |
145 | #define cpu_info boot_cpu_data | ||
146 | #define cpu_data(cpu) boot_cpu_data | 146 | #define cpu_data(cpu) boot_cpu_data |
147 | #define current_cpu_data boot_cpu_data | ||
148 | #endif | 147 | #endif |
149 | 148 | ||
150 | extern const struct seq_operations cpuinfo_op; | 149 | extern const struct seq_operations cpuinfo_op; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ce65d449b750..79e6baa8aa0a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
516 | { | 516 | { |
517 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 517 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
518 | 518 | ||
519 | if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { | 519 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { |
520 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 520 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
521 | /* Make LAPIC timer preferrable over percpu HPET */ | 521 | /* Make LAPIC timer preferrable over percpu HPET */ |
522 | lapic_clockevent.rating = 150; | 522 | lapic_clockevent.rating = 150; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 52735a710c30..697dc34b7b87 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2329 | unsigned int irr; | 2329 | unsigned int irr; |
2330 | struct irq_desc *desc; | 2330 | struct irq_desc *desc; |
2331 | struct irq_cfg *cfg; | 2331 | struct irq_cfg *cfg; |
2332 | irq = __get_cpu_var(vector_irq)[vector]; | 2332 | irq = __this_cpu_read(vector_irq[vector]); |
2333 | 2333 | ||
2334 | if (irq == -1) | 2334 | if (irq == -1) |
2335 | continue; | 2335 | continue; |
@@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2363 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | 2363 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); |
2364 | goto unlock; | 2364 | goto unlock; |
2365 | } | 2365 | } |
2366 | __get_cpu_var(vector_irq)[vector] = -1; | 2366 | __this_cpu_write(vector_irq[vector], -1); |
2367 | unlock: | 2367 | unlock: |
2368 | raw_spin_unlock(&desc->lock); | 2368 | raw_spin_unlock(&desc->lock); |
2369 | } | 2369 | } |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 2a3f2a7db243..ecca5f41ad2c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
120 | else if (!strcmp(oem_table_id, "UVX")) | 120 | else if (!strcmp(oem_table_id, "UVX")) |
121 | uv_system_type = UV_X2APIC; | 121 | uv_system_type = UV_X2APIC; |
122 | else if (!strcmp(oem_table_id, "UVH")) { | 122 | else if (!strcmp(oem_table_id, "UVH")) { |
123 | __get_cpu_var(x2apic_extra_bits) = | 123 | __this_cpu_write(x2apic_extra_bits, |
124 | pnodeid << uvh_apicid.s.pnode_shift; | 124 | pnodeid << uvh_apicid.s.pnode_shift); |
125 | uv_system_type = UV_NON_UNIQUE_APIC; | 125 | uv_system_type = UV_NON_UNIQUE_APIC; |
126 | uv_set_apicid_hibit(); | 126 | uv_set_apicid_hibit(); |
127 | return 1; | 127 | return 1; |
@@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x) | |||
286 | unsigned int id; | 286 | unsigned int id; |
287 | 287 | ||
288 | WARN_ON(preemptible() && num_online_cpus() > 1); | 288 | WARN_ON(preemptible() && num_online_cpus() > 1); |
289 | id = x | __get_cpu_var(x2apic_extra_bits); | 289 | id = x | __this_cpu_read(x2apic_extra_bits); |
290 | 290 | ||
291 | return id; | 291 | return id; |
292 | } | 292 | } |
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = { | |||
378 | 378 | ||
379 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 379 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
380 | { | 380 | { |
381 | __get_cpu_var(x2apic_extra_bits) = (pnode << 6); | 381 | __this_cpu_write(x2apic_extra_bits, (pnode << 6)); |
382 | } | 382 | } |
383 | 383 | ||
384 | /* | 384 | /* |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9e093f8fe78c..7c7bedb83c5a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383); | |||
668 | 668 | ||
669 | bool cpu_has_amd_erratum(const int *erratum) | 669 | bool cpu_has_amd_erratum(const int *erratum) |
670 | { | 670 | { |
671 | struct cpuinfo_x86 *cpu = ¤t_cpu_data; | 671 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); |
672 | int osvw_id = *erratum++; | 672 | int osvw_id = *erratum++; |
673 | u32 range; | 673 | u32 range; |
674 | u32 ms; | 674 | u32 ms; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 491977baf6c0..35c7e65e59be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc) | |||
521 | 521 | ||
522 | *rc = -ENODEV; | 522 | *rc = -ENODEV; |
523 | 523 | ||
524 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | 524 | if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) |
525 | return; | 525 | return; |
526 | 526 | ||
527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | |||
1377 | static void query_values_on_cpu(void *_err) | 1377 | static void query_values_on_cpu(void *_err) |
1378 | { | 1378 | { |
1379 | int *err = _err; | 1379 | int *err = _err; |
1380 | struct powernow_k8_data *data = __get_cpu_var(powernow_data); | 1380 | struct powernow_k8_data *data = __this_cpu_read(powernow_data); |
1381 | 1381 | ||
1382 | *err = query_current_values_with_pending_wait(data); | 1382 | *err = query_current_values_with_pending_wait(data); |
1383 | } | 1383 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 9ecf81f9b90f..7283e98deaae 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
265 | line_size = l2.line_size; | 265 | line_size = l2.line_size; |
266 | lines_per_tag = l2.lines_per_tag; | 266 | lines_per_tag = l2.lines_per_tag; |
267 | /* cpu_data has errata corrections for K7 applied */ | 267 | /* cpu_data has errata corrections for K7 applied */ |
268 | size_in_kb = current_cpu_data.x86_cache_size; | 268 | size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); |
269 | break; | 269 | break; |
270 | case 3: | 270 | case 3: |
271 | if (!l3.val) | 271 | if (!l3.val) |
@@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
287 | eax->split.type = types[leaf]; | 287 | eax->split.type = types[leaf]; |
288 | eax->split.level = levels[leaf]; | 288 | eax->split.level = levels[leaf]; |
289 | eax->split.num_threads_sharing = 0; | 289 | eax->split.num_threads_sharing = 0; |
290 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 290 | eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; |
291 | 291 | ||
292 | 292 | ||
293 | if (assoc == 0xffff) | 293 | if (assoc == 0xffff) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7a35b72d7c03..d916183b7f9c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) | |||
326 | 326 | ||
327 | static int msr_to_offset(u32 msr) | 327 | static int msr_to_offset(u32 msr) |
328 | { | 328 | { |
329 | unsigned bank = __get_cpu_var(injectm.bank); | 329 | unsigned bank = __this_cpu_read(injectm.bank); |
330 | 330 | ||
331 | if (msr == rip_msr) | 331 | if (msr == rip_msr) |
332 | return offsetof(struct mce, ip); | 332 | return offsetof(struct mce, ip); |
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
346 | { | 346 | { |
347 | u64 v; | 347 | u64 v; |
348 | 348 | ||
349 | if (__get_cpu_var(injectm).finished) { | 349 | if (__this_cpu_read(injectm.finished)) { |
350 | int offset = msr_to_offset(msr); | 350 | int offset = msr_to_offset(msr); |
351 | 351 | ||
352 | if (offset < 0) | 352 | if (offset < 0) |
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
369 | 369 | ||
370 | static void mce_wrmsrl(u32 msr, u64 v) | 370 | static void mce_wrmsrl(u32 msr, u64 v) |
371 | { | 371 | { |
372 | if (__get_cpu_var(injectm).finished) { | 372 | if (__this_cpu_read(injectm.finished)) { |
373 | int offset = msr_to_offset(msr); | 373 | int offset = msr_to_offset(msr); |
374 | 374 | ||
375 | if (offset >= 0) | 375 | if (offset >= 0) |
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data) | |||
1159 | 1159 | ||
1160 | WARN_ON(smp_processor_id() != data); | 1160 | WARN_ON(smp_processor_id() != data); |
1161 | 1161 | ||
1162 | if (mce_available(¤t_cpu_data)) { | 1162 | if (mce_available(__this_cpu_ptr(&cpu_info))) { |
1163 | machine_check_poll(MCP_TIMESTAMP, | 1163 | machine_check_poll(MCP_TIMESTAMP, |
1164 | &__get_cpu_var(mce_poll_banks)); | 1164 | &__get_cpu_var(mce_poll_banks)); |
1165 | } | 1165 | } |
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev) | |||
1767 | static int mce_resume(struct sys_device *dev) | 1767 | static int mce_resume(struct sys_device *dev) |
1768 | { | 1768 | { |
1769 | __mcheck_cpu_init_generic(); | 1769 | __mcheck_cpu_init_generic(); |
1770 | __mcheck_cpu_init_vendor(¤t_cpu_data); | 1770 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); |
1771 | 1771 | ||
1772 | return 0; | 1772 | return 0; |
1773 | } | 1773 | } |
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev) | |||
1775 | static void mce_cpu_restart(void *data) | 1775 | static void mce_cpu_restart(void *data) |
1776 | { | 1776 | { |
1777 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1777 | del_timer_sync(&__get_cpu_var(mce_timer)); |
1778 | if (!mce_available(¤t_cpu_data)) | 1778 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1779 | return; | 1779 | return; |
1780 | __mcheck_cpu_init_generic(); | 1780 | __mcheck_cpu_init_generic(); |
1781 | __mcheck_cpu_init_timer(); | 1781 | __mcheck_cpu_init_timer(); |
@@ -1790,7 +1790,7 @@ static void mce_restart(void) | |||
1790 | /* Toggle features for corrected errors */ | 1790 | /* Toggle features for corrected errors */ |
1791 | static void mce_disable_ce(void *all) | 1791 | static void mce_disable_ce(void *all) |
1792 | { | 1792 | { |
1793 | if (!mce_available(¤t_cpu_data)) | 1793 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1794 | return; | 1794 | return; |
1795 | if (all) | 1795 | if (all) |
1796 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1796 | del_timer_sync(&__get_cpu_var(mce_timer)); |
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all) | |||
1799 | 1799 | ||
1800 | static void mce_enable_ce(void *all) | 1800 | static void mce_enable_ce(void *all) |
1801 | { | 1801 | { |
1802 | if (!mce_available(¤t_cpu_data)) | 1802 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1803 | return; | 1803 | return; |
1804 | cmci_reenable(); | 1804 | cmci_reenable(); |
1805 | cmci_recheck(); | 1805 | cmci_recheck(); |
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h) | |||
2022 | unsigned long action = *(unsigned long *)h; | 2022 | unsigned long action = *(unsigned long *)h; |
2023 | int i; | 2023 | int i; |
2024 | 2024 | ||
2025 | if (!mce_available(¤t_cpu_data)) | 2025 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2026 | return; | 2026 | return; |
2027 | 2027 | ||
2028 | if (!(action & CPU_TASKS_FROZEN)) | 2028 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h) | |||
2040 | unsigned long action = *(unsigned long *)h; | 2040 | unsigned long action = *(unsigned long *)h; |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | if (!mce_available(¤t_cpu_data)) | 2043 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2044 | return; | 2044 | return; |
2045 | 2045 | ||
2046 | if (!(action & CPU_TASKS_FROZEN)) | 2046 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 6fcd0936194f..8694ef56459d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -130,7 +130,7 @@ void cmci_recheck(void) | |||
130 | unsigned long flags; | 130 | unsigned long flags; |
131 | int banks; | 131 | int banks; |
132 | 132 | ||
133 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | 133 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
134 | return; | 134 | return; |
135 | local_irq_save(flags); | 135 | local_irq_save(flags); |
136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0a360d146596..04921017abe0 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
997 | 997 | ||
998 | static void x86_pmu_enable_event(struct perf_event *event) | 998 | static void x86_pmu_enable_event(struct perf_event *event) |
999 | { | 999 | { |
1000 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1000 | if (__this_cpu_read(cpu_hw_events.enabled)) |
1001 | if (cpuc->enabled) | ||
1002 | __x86_pmu_enable_event(&event->hw, | 1001 | __x86_pmu_enable_event(&event->hw, |
1003 | ARCH_PERFMON_EVENTSEL_ENABLE); | 1002 | ARCH_PERFMON_EVENTSEL_ENABLE); |
1004 | } | 1003 | } |
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1272 | break; | 1271 | break; |
1273 | case DIE_NMIUNKNOWN: | 1272 | case DIE_NMIUNKNOWN: |
1274 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1273 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1275 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | 1274 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) |
1276 | /* let the kernel handle the unknown nmi */ | 1275 | /* let the kernel handle the unknown nmi */ |
1277 | return NOTIFY_DONE; | 1276 | return NOTIFY_DONE; |
1278 | /* | 1277 | /* |
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1296 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1295 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1297 | if ((handled > 1) || | 1296 | if ((handled > 1) || |
1298 | /* the next nmi could be a back-to-back nmi */ | 1297 | /* the next nmi could be a back-to-back nmi */ |
1299 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | 1298 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && |
1300 | (__get_cpu_var(pmu_nmi).handled > 1))) { | 1299 | (__this_cpu_read(pmu_nmi.handled) > 1))) { |
1301 | /* | 1300 | /* |
1302 | * We could have two subsequent back-to-back nmis: The | 1301 | * We could have two subsequent back-to-back nmis: The |
1303 | * first handles more than one counter, the 2nd | 1302 | * first handles more than one counter, the 2nd |
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1308 | * handling more than one counter. We will mark the | 1307 | * handling more than one counter. We will mark the |
1309 | * next (3rd) and then drop it if unhandled. | 1308 | * next (3rd) and then drop it if unhandled. |
1310 | */ | 1309 | */ |
1311 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | 1310 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); |
1312 | __get_cpu_var(pmu_nmi).handled = handled; | 1311 | __this_cpu_write(pmu_nmi.handled, handled); |
1313 | } | 1312 | } |
1314 | 1313 | ||
1315 | return NOTIFY_STOP; | 1314 | return NOTIFY_STOP; |
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1484 | */ | 1483 | */ |
1485 | static void x86_pmu_start_txn(struct pmu *pmu) | 1484 | static void x86_pmu_start_txn(struct pmu *pmu) |
1486 | { | 1485 | { |
1487 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1488 | |||
1489 | perf_pmu_disable(pmu); | 1486 | perf_pmu_disable(pmu); |
1490 | cpuc->group_flag |= PERF_EVENT_TXN; | 1487 | __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); |
1491 | cpuc->n_txn = 0; | 1488 | __this_cpu_write(cpu_hw_events.n_txn, 0); |
1492 | } | 1489 | } |
1493 | 1490 | ||
1494 | /* | 1491 | /* |
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu) | |||
1498 | */ | 1495 | */ |
1499 | static void x86_pmu_cancel_txn(struct pmu *pmu) | 1496 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1500 | { | 1497 | { |
1501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1498 | __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); |
1502 | |||
1503 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1504 | /* | 1499 | /* |
1505 | * Truncate the collected events. | 1500 | * Truncate the collected events. |
1506 | */ | 1501 | */ |
1507 | cpuc->n_added -= cpuc->n_txn; | 1502 | __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); |
1508 | cpuc->n_events -= cpuc->n_txn; | 1503 | __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); |
1509 | perf_pmu_enable(pmu); | 1504 | perf_pmu_enable(pmu); |
1510 | } | 1505 | } |
1511 | 1506 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 24e390e40f2e..008835c1d79c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
649 | struct hw_perf_event *hwc = &event->hw; | 649 | struct hw_perf_event *hwc = &event->hw; |
650 | 650 | ||
651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
652 | if (!__get_cpu_var(cpu_hw_events).enabled) | 652 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
653 | return; | 653 | return; |
654 | 654 | ||
655 | intel_pmu_enable_bts(hwc->config); | 655 | intel_pmu_enable_bts(hwc->config); |
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) | |||
679 | 679 | ||
680 | static void intel_pmu_reset(void) | 680 | static void intel_pmu_reset(void) |
681 | { | 681 | { |
682 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | 682 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
683 | unsigned long flags; | 683 | unsigned long flags; |
684 | int idx; | 684 | int idx; |
685 | 685 | ||
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 298448656b60..382eb2936d4d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -170,9 +170,9 @@ static void ftrace_mod_code(void) | |||
170 | 170 | ||
171 | void ftrace_nmi_enter(void) | 171 | void ftrace_nmi_enter(void) |
172 | { | 172 | { |
173 | __get_cpu_var(save_modifying_code) = modifying_code; | 173 | __this_cpu_write(save_modifying_code, modifying_code); |
174 | 174 | ||
175 | if (!__get_cpu_var(save_modifying_code)) | 175 | if (!__this_cpu_read(save_modifying_code)) |
176 | return; | 176 | return; |
177 | 177 | ||
178 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | 178 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
@@ -186,7 +186,7 @@ void ftrace_nmi_enter(void) | |||
186 | 186 | ||
187 | void ftrace_nmi_exit(void) | 187 | void ftrace_nmi_exit(void) |
188 | { | 188 | { |
189 | if (!__get_cpu_var(save_modifying_code)) | 189 | if (!__this_cpu_read(save_modifying_code)) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | /* Finish all executions before clearing nmi_running */ | 192 | /* Finish all executions before clearing nmi_running */ |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 42c594254507..02f07634d265 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | return -EBUSY; | 122 | return -EBUSY; |
123 | 123 | ||
124 | set_debugreg(info->address, i); | 124 | set_debugreg(info->address, i); |
125 | __get_cpu_var(cpu_debugreg[i]) = info->address; | 125 | __this_cpu_write(cpu_debugreg[i], info->address); |
126 | 126 | ||
127 | dr7 = &__get_cpu_var(cpu_dr7); | 127 | dr7 = &__get_cpu_var(cpu_dr7); |
128 | *dr7 |= encode_dr7(i, info->len, info->type); | 128 | *dr7 |= encode_dr7(i, info->len, info->type); |
@@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | |||
397 | 397 | ||
398 | void hw_breakpoint_restore(void) | 398 | void hw_breakpoint_restore(void) |
399 | { | 399 | { |
400 | set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); | 400 | set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); |
401 | set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); | 401 | set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); |
402 | set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); | 402 | set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); |
403 | set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); | 403 | set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); |
404 | set_debugreg(current->thread.debugreg6, 6); | 404 | set_debugreg(current->thread.debugreg6, 6); |
405 | set_debugreg(__get_cpu_var(cpu_dr7), 7); | 405 | set_debugreg(__this_cpu_read(cpu_dr7), 7); |
406 | } | 406 | } |
407 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); | 407 | EXPORT_SYMBOL_GPL(hw_breakpoint_restore); |
408 | 408 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 83ec0175f986..3a43caa3beb7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
234 | exit_idle(); | 234 | exit_idle(); |
235 | irq_enter(); | 235 | irq_enter(); |
236 | 236 | ||
237 | irq = __get_cpu_var(vector_irq)[vector]; | 237 | irq = __this_cpu_read(vector_irq[vector]); |
238 | 238 | ||
239 | if (!handle_irq(irq, regs)) { | 239 | if (!handle_irq(irq, regs)) { |
240 | ack_APIC_irq(); | 240 | ack_APIC_irq(); |
@@ -350,12 +350,12 @@ void fixup_irqs(void) | |||
350 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 350 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
351 | unsigned int irr; | 351 | unsigned int irr; |
352 | 352 | ||
353 | if (__get_cpu_var(vector_irq)[vector] < 0) | 353 | if (__this_cpu_read(vector_irq[vector]) < 0) |
354 | continue; | 354 | continue; |
355 | 355 | ||
356 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | 356 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
357 | if (irr & (1 << (vector % 32))) { | 357 | if (irr & (1 << (vector % 32))) { |
358 | irq = __get_cpu_var(vector_irq)[vector]; | 358 | irq = __this_cpu_read(vector_irq[vector]); |
359 | 359 | ||
360 | data = irq_get_irq_data(irq); | 360 | data = irq_get_irq_data(irq); |
361 | raw_spin_lock(&desc->lock); | 361 | raw_spin_lock(&desc->lock); |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 96656f207751..48ff6dcffa02 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
79 | u32 *isp, arg1, arg2; | 79 | u32 *isp, arg1, arg2; |
80 | 80 | ||
81 | curctx = (union irq_ctx *) current_thread_info(); | 81 | curctx = (union irq_ctx *) current_thread_info(); |
82 | irqctx = __get_cpu_var(hardirq_ctx); | 82 | irqctx = __this_cpu_read(hardirq_ctx); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * this is where we switch to the IRQ stack. However, if we are | 85 | * this is where we switch to the IRQ stack. However, if we are |
@@ -166,7 +166,7 @@ asmlinkage void do_softirq(void) | |||
166 | 166 | ||
167 | if (local_softirq_pending()) { | 167 | if (local_softirq_pending()) { |
168 | curctx = current_thread_info(); | 168 | curctx = current_thread_info(); |
169 | irqctx = __get_cpu_var(softirq_ctx); | 169 | irqctx = __this_cpu_read(softirq_ctx); |
170 | irqctx->tinfo.task = curctx->task; | 170 | irqctx->tinfo.task = curctx->task; |
171 | irqctx->tinfo.previous_esp = current_stack_pointer; | 171 | irqctx->tinfo.previous_esp = current_stack_pointer; |
172 | 172 | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 5940282bd2f9..d91c477b3f62 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
403 | 403 | ||
404 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 404 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
405 | { | 405 | { |
406 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 406 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
407 | kcb->kprobe_status = kcb->prev_kprobe.status; | 407 | kcb->kprobe_status = kcb->prev_kprobe.status; |
408 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; | 408 | kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; |
409 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; | 409 | kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; |
@@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
412 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 412 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
413 | struct kprobe_ctlblk *kcb) | 413 | struct kprobe_ctlblk *kcb) |
414 | { | 414 | { |
415 | __get_cpu_var(current_kprobe) = p; | 415 | __this_cpu_write(current_kprobe, p); |
416 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 416 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
417 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); | 417 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
418 | if (is_IF_modifier(p->ainsn.insn)) | 418 | if (is_IF_modifier(p->ainsn.insn)) |
@@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
586 | preempt_enable_no_resched(); | 586 | preempt_enable_no_resched(); |
587 | return 1; | 587 | return 1; |
588 | } else if (kprobe_running()) { | 588 | } else if (kprobe_running()) { |
589 | p = __get_cpu_var(current_kprobe); | 589 | p = __this_cpu_read(current_kprobe); |
590 | if (p->break_handler && p->break_handler(p, regs)) { | 590 | if (p->break_handler && p->break_handler(p, regs)) { |
591 | setup_singlestep(p, regs, kcb, 0); | 591 | setup_singlestep(p, regs, kcb, 0); |
592 | return 1; | 592 | return 1; |
@@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
759 | 759 | ||
760 | orig_ret_address = (unsigned long)ri->ret_addr; | 760 | orig_ret_address = (unsigned long)ri->ret_addr; |
761 | if (ri->rp && ri->rp->handler) { | 761 | if (ri->rp && ri->rp->handler) { |
762 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 762 | __this_cpu_write(current_kprobe, &ri->rp->kp); |
763 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 763 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
764 | ri->ret_addr = correct_ret_addr; | 764 | ri->ret_addr = correct_ret_addr; |
765 | ri->rp->handler(ri, regs); | 765 | ri->rp->handler(ri, regs); |
766 | __get_cpu_var(current_kprobe) = NULL; | 766 | __this_cpu_write(current_kprobe, NULL); |
767 | } | 767 | } |
768 | 768 | ||
769 | recycle_rp_inst(ri, &empty_rp); | 769 | recycle_rp_inst(ri, &empty_rp); |
@@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, | |||
1202 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | 1202 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; |
1203 | regs->orig_ax = ~0UL; | 1203 | regs->orig_ax = ~0UL; |
1204 | 1204 | ||
1205 | __get_cpu_var(current_kprobe) = &op->kp; | 1205 | __this_cpu_write(current_kprobe, &op->kp); |
1206 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 1206 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
1207 | opt_pre_handler(&op->kp, regs); | 1207 | opt_pre_handler(&op->kp, regs); |
1208 | __get_cpu_var(current_kprobe) = NULL; | 1208 | __this_cpu_write(current_kprobe, NULL); |
1209 | } | 1209 | } |
1210 | preempt_enable_no_resched(); | 1210 | preempt_enable_no_resched(); |
1211 | } | 1211 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c852041bfc3d..09c08a1c706f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | |||
446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); | 446 | trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); |
447 | trace_cpu_idle((ax>>4)+1, smp_processor_id()); | 447 | trace_cpu_idle((ax>>4)+1, smp_processor_id()); |
448 | if (!need_resched()) { | 448 | if (!need_resched()) { |
449 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 449 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) |
450 | clflush((void *)¤t_thread_info()->flags); | 450 | clflush((void *)¤t_thread_info()->flags); |
451 | 451 | ||
452 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 452 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
@@ -462,7 +462,7 @@ static void mwait_idle(void) | |||
462 | if (!need_resched()) { | 462 | if (!need_resched()) { |
463 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 463 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); |
464 | trace_cpu_idle(1, smp_processor_id()); | 464 | trace_cpu_idle(1, smp_processor_id()); |
465 | if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) | 465 | if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) |
466 | clflush((void *)¤t_thread_info()->flags); | 466 | clflush((void *)¤t_thread_info()->flags); |
467 | 467 | ||
468 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 468 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ee886fe10ef4..c7149c96d079 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
427 | 427 | ||
428 | cpumask_set_cpu(cpu, c->llc_shared_map); | 428 | cpumask_set_cpu(cpu, c->llc_shared_map); |
429 | 429 | ||
430 | if (current_cpu_data.x86_max_cores == 1) { | 430 | if (__this_cpu_read(cpu_info.x86_max_cores) == 1) { |
431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 431 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
432 | c->booted_cores = 1; | 432 | c->booted_cores = 1; |
433 | return; | 433 | return; |
@@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1089 | 1089 | ||
1090 | preempt_disable(); | 1090 | preempt_disable(); |
1091 | smp_cpu_index_default(); | 1091 | smp_cpu_index_default(); |
1092 | current_cpu_data = boot_cpu_data; | 1092 | memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info)); |
1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); | 1093 | cpumask_copy(cpu_callin_mask, cpumask_of(0)); |
1094 | mb(); | 1094 | mb(); |
1095 | /* | 1095 | /* |
@@ -1383,7 +1383,7 @@ void play_dead_common(void) | |||
1383 | 1383 | ||
1384 | mb(); | 1384 | mb(); |
1385 | /* Ack it */ | 1385 | /* Ack it */ |
1386 | __get_cpu_var(cpu_state) = CPU_DEAD; | 1386 | __this_cpu_write(cpu_state, CPU_DEAD); |
1387 | 1387 | ||
1388 | /* | 1388 | /* |
1389 | * With physical CPU hotplug, we should halt the cpu | 1389 | * With physical CPU hotplug, we should halt the cpu |
@@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void) | |||
1403 | int i; | 1403 | int i; |
1404 | void *mwait_ptr; | 1404 | void *mwait_ptr; |
1405 | 1405 | ||
1406 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) | 1406 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT)) |
1407 | return; | 1407 | return; |
1408 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) | 1408 | if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) |
1409 | return; | 1409 | return; |
1410 | if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | 1410 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
1411 | return; | 1411 | return; |
1412 | 1412 | ||
1413 | eax = CPUID_MWAIT_LEAF; | 1413 | eax = CPUID_MWAIT_LEAF; |
@@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void) | |||
1458 | 1458 | ||
1459 | static inline void hlt_play_dead(void) | 1459 | static inline void hlt_play_dead(void) |
1460 | { | 1460 | { |
1461 | if (current_cpu_data.x86 >= 4) | 1461 | if (__this_cpu_read(cpu_info.x86) >= 4) |
1462 | wbinvd(); | 1462 | wbinvd(); |
1463 | 1463 | ||
1464 | while (1) { | 1464 | while (1) { |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 356a0d455cf9..03d2ea82f35a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -659,7 +659,7 @@ void restore_sched_clock_state(void) | |||
659 | 659 | ||
660 | local_irq_save(flags); | 660 | local_irq_save(flags); |
661 | 661 | ||
662 | __get_cpu_var(cyc2ns_offset) = 0; | 662 | __this_cpu_write(cyc2ns_offset, 0); |
663 | offset = cyc2ns_suspend - sched_clock(); | 663 | offset = cyc2ns_suspend - sched_clock(); |
664 | 664 | ||
665 | for_each_possible_cpu(cpu) | 665 | for_each_possible_cpu(cpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b989e1f1e5d3..46a368cb651e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -976,7 +976,7 @@ static inline u64 nsec_to_cycles(u64 nsec) | |||
976 | if (kvm_tsc_changes_freq()) | 976 | if (kvm_tsc_changes_freq()) |
977 | printk_once(KERN_WARNING | 977 | printk_once(KERN_WARNING |
978 | "kvm: unreliable cycle conversion on adjustable rate TSC\n"); | 978 | "kvm: unreliable cycle conversion on adjustable rate TSC\n"); |
979 | ret = nsec * __get_cpu_var(cpu_tsc_khz); | 979 | ret = nsec * __this_cpu_read(cpu_tsc_khz); |
980 | do_div(ret, USEC_PER_SEC); | 980 | do_div(ret, USEC_PER_SEC); |
981 | return ret; | 981 | return ret; |
982 | } | 982 | } |
@@ -1061,7 +1061,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1061 | local_irq_save(flags); | 1061 | local_irq_save(flags); |
1062 | kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); | 1062 | kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); |
1063 | kernel_ns = get_kernel_ns(); | 1063 | kernel_ns = get_kernel_ns(); |
1064 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | 1064 | this_tsc_khz = __this_cpu_read(cpu_tsc_khz); |
1065 | 1065 | ||
1066 | if (unlikely(this_tsc_khz == 0)) { | 1066 | if (unlikely(this_tsc_khz == 0)) { |
1067 | local_irq_restore(flags); | 1067 | local_irq_restore(flags); |
@@ -4427,7 +4427,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out); | |||
4427 | 4427 | ||
4428 | static void tsc_bad(void *info) | 4428 | static void tsc_bad(void *info) |
4429 | { | 4429 | { |
4430 | __get_cpu_var(cpu_tsc_khz) = 0; | 4430 | __this_cpu_write(cpu_tsc_khz, 0); |
4431 | } | 4431 | } |
4432 | 4432 | ||
4433 | static void tsc_khz_changed(void *data) | 4433 | static void tsc_khz_changed(void *data) |
@@ -4441,7 +4441,7 @@ static void tsc_khz_changed(void *data) | |||
4441 | khz = cpufreq_quick_get(raw_smp_processor_id()); | 4441 | khz = cpufreq_quick_get(raw_smp_processor_id()); |
4442 | if (!khz) | 4442 | if (!khz) |
4443 | khz = tsc_khz; | 4443 | khz = tsc_khz; |
4444 | __get_cpu_var(cpu_tsc_khz) = khz; | 4444 | __this_cpu_write(cpu_tsc_khz, khz); |
4445 | } | 4445 | } |
4446 | 4446 | ||
4447 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 4447 | static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index ff485d361182..fc45ba887d05 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -121,7 +121,7 @@ inline void __const_udelay(unsigned long xloops) | |||
121 | asm("mull %%edx" | 121 | asm("mull %%edx" |
122 | :"=d" (xloops), "=&a" (d0) | 122 | :"=d" (xloops), "=&a" (d0) |
123 | :"1" (xloops), "0" | 123 | :"1" (xloops), "0" |
124 | (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); | 124 | (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4))); |
125 | 125 | ||
126 | __delay(++xloops); | 126 | __delay(++xloops); |
127 | } | 127 | } |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 358c8b9c96a7..f24a8533bcdf 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -143,7 +143,7 @@ static inline int has_mux(void) | |||
143 | 143 | ||
144 | inline int op_x86_phys_to_virt(int phys) | 144 | inline int op_x86_phys_to_virt(int phys) |
145 | { | 145 | { |
146 | return __get_cpu_var(switch_index) + phys; | 146 | return __this_cpu_read(switch_index) + phys; |
147 | } | 147 | } |
148 | 148 | ||
149 | inline int op_x86_virt_to_phys(int virt) | 149 | inline int op_x86_virt_to_phys(int virt) |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index d769cda54082..94b745045e45 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
95 | * counter width: | 95 | * counter width: |
96 | */ | 96 | */ |
97 | if (!(eax.split.version_id == 0 && | 97 | if (!(eax.split.version_id == 0 && |
98 | current_cpu_data.x86 == 6 && | 98 | __this_cpu_read(cpu_info.x86) == 6 && |
99 | current_cpu_data.x86_model == 15)) { | 99 | __this_cpu_read(cpu_info.x86_model) == 15)) { |
100 | 100 | ||
101 | if (counter_width < eax.split.bit_width) | 101 | if (counter_width < eax.split.bit_width) |
102 | counter_width = eax.split.bit_width; | 102 | counter_width = eax.split.bit_width; |
@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void) | |||
235 | eax.full = cpuid_eax(0xa); | 235 | eax.full = cpuid_eax(0xa); |
236 | 236 | ||
237 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ | 237 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ |
238 | if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && | 238 | if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && |
239 | current_cpu_data.x86_model == 15) { | 239 | __this_cpu_read(cpu_info.x86_model) == 15) { |
240 | eax.split.version_id = 2; | 240 | eax.split.version_id = 2; |
241 | eax.split.num_counters = 2; | 241 | eax.split.num_counters = 2; |
242 | eax.split.bit_width = 40; | 242 | eax.split.bit_width = 40; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 44dcad43989d..aa8c89ae54cf 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) | |||
574 | 574 | ||
575 | preempt_disable(); | 575 | preempt_disable(); |
576 | 576 | ||
577 | start = __get_cpu_var(idt_desc).address; | 577 | start = __this_cpu_read(idt_desc.address); |
578 | end = start + __get_cpu_var(idt_desc).size + 1; | 578 | end = start + __this_cpu_read(idt_desc.size) + 1; |
579 | 579 | ||
580 | xen_mc_flush(); | 580 | xen_mc_flush(); |
581 | 581 | ||
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 9e565da5d1f7..4ec8035e3216 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h | |||
@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void) | |||
22 | unsigned long flags; | 22 | unsigned long flags; |
23 | /* need to disable interrupts until this entry is complete */ | 23 | /* need to disable interrupts until this entry is complete */ |
24 | local_irq_save(flags); | 24 | local_irq_save(flags); |
25 | __get_cpu_var(xen_mc_irq_flags) = flags; | 25 | __this_cpu_write(xen_mc_irq_flags, flags); |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline struct multicall_space xen_mc_entry(size_t args) | 28 | static inline struct multicall_space xen_mc_entry(size_t args) |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 23e061b9327b..cc9b1e182fcf 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl) | |||
159 | { | 159 | { |
160 | struct xen_spinlock *prev; | 160 | struct xen_spinlock *prev; |
161 | 161 | ||
162 | prev = __get_cpu_var(lock_spinners); | 162 | prev = __this_cpu_read(lock_spinners); |
163 | __get_cpu_var(lock_spinners) = xl; | 163 | __this_cpu_write(lock_spinners, xl); |
164 | 164 | ||
165 | wmb(); /* set lock of interest before count */ | 165 | wmb(); /* set lock of interest before count */ |
166 | 166 | ||
@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock | |||
179 | asm(LOCK_PREFIX " decw %0" | 179 | asm(LOCK_PREFIX " decw %0" |
180 | : "+m" (xl->spinners) : : "memory"); | 180 | : "+m" (xl->spinners) : : "memory"); |
181 | wmb(); /* decrement count before restoring lock */ | 181 | wmb(); /* decrement count before restoring lock */ |
182 | __get_cpu_var(lock_spinners) = prev; | 182 | __this_cpu_write(lock_spinners, prev); |
183 | } | 183 | } |
184 | 184 | ||
185 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) | 185 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) |
186 | { | 186 | { |
187 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 187 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
188 | struct xen_spinlock *prev; | 188 | struct xen_spinlock *prev; |
189 | int irq = __get_cpu_var(lock_kicker_irq); | 189 | int irq = __this_cpu_read(lock_kicker_irq); |
190 | int ret; | 190 | int ret; |
191 | u64 start; | 191 | u64 start; |
192 | 192 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5da5e53fb94c..067759e3d6a5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -135,24 +135,24 @@ static void do_stolen_accounting(void) | |||
135 | 135 | ||
136 | /* Add the appropriate number of ticks of stolen time, | 136 | /* Add the appropriate number of ticks of stolen time, |
137 | including any left-overs from last time. */ | 137 | including any left-overs from last time. */ |
138 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); | 138 | stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); |
139 | 139 | ||
140 | if (stolen < 0) | 140 | if (stolen < 0) |
141 | stolen = 0; | 141 | stolen = 0; |
142 | 142 | ||
143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
144 | __get_cpu_var(xen_residual_stolen) = stolen; | 144 | __this_cpu_write(xen_residual_stolen, stolen); |
145 | account_steal_ticks(ticks); | 145 | account_steal_ticks(ticks); |
146 | 146 | ||
147 | /* Add the appropriate number of ticks of blocked time, | 147 | /* Add the appropriate number of ticks of blocked time, |
148 | including any left-overs from last time. */ | 148 | including any left-overs from last time. */ |
149 | blocked += __get_cpu_var(xen_residual_blocked); | 149 | blocked += __this_cpu_read(xen_residual_blocked); |
150 | 150 | ||
151 | if (blocked < 0) | 151 | if (blocked < 0) |
152 | blocked = 0; | 152 | blocked = 0; |
153 | 153 | ||
154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
155 | __get_cpu_var(xen_residual_blocked) = blocked; | 155 | __this_cpu_write(xen_residual_blocked, blocked); |
156 | account_idle_ticks(ticks); | 156 | account_idle_ticks(ticks); |
157 | } | 157 | } |
158 | 158 | ||