diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2014-10-24 16:26:37 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-10-24 16:26:37 -0400 |
commit | db65bcfd9563a7531c3dd46c350565705be1fd84 (patch) | |
tree | ff7b068472764e36ab02a739917e206dd11d6b46 /arch/x86/kernel | |
parent | b47dcbdc5161d3d5756f430191e2840d9b855492 (diff) | |
parent | f114040e3ea6e07372334ade75d1ee0775c355e1 (diff) |
Merge tag 'v3.18-rc1' into x86/urgent
Reason:
Need to apply audit patch on top of v3.18-rc1.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
23 files changed, 144 insertions, 132 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index af5b08ab3b71..5972b108f15a 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void) | |||
146 | static int __init apbt_clockevent_register(void) | 146 | static int __init apbt_clockevent_register(void) |
147 | { | 147 | { |
148 | struct sfi_timer_table_entry *mtmr; | 148 | struct sfi_timer_table_entry *mtmr; |
149 | struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); | 149 | struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); |
150 | 150 | ||
151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | 151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); |
152 | if (mtmr == NULL) { | 152 | if (mtmr == NULL) { |
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void) | |||
200 | if (!cpu) | 200 | if (!cpu) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | adev = &__get_cpu_var(cpu_apbt_dev); | 203 | adev = this_cpu_ptr(&cpu_apbt_dev); |
204 | if (!adev->timer) { | 204 | if (!adev->timer) { |
205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, | 205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, |
206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), | 206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 24b5894396a0..ba6cc041edb1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
561 | */ | 561 | */ |
562 | static void setup_APIC_timer(void) | 562 | static void setup_APIC_timer(void) |
563 | { | 563 | { |
564 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 564 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
565 | 565 | ||
566 | if (this_cpu_has(X86_FEATURE_ARAT)) { | 566 | if (this_cpu_has(X86_FEATURE_ARAT)) { |
567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) | |||
696 | 696 | ||
697 | static int __init calibrate_APIC_clock(void) | 697 | static int __init calibrate_APIC_clock(void) |
698 | { | 698 | { |
699 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 699 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
700 | void (*real_handler)(struct clock_event_device *dev); | 700 | void (*real_handler)(struct clock_event_device *dev); |
701 | unsigned long deltaj; | 701 | unsigned long deltaj; |
702 | long delta, deltatsc; | 702 | long delta, deltatsc; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 6ce600f9bc78..e658f21681c8 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) | |||
42 | * We are to modify mask, so we need an own copy | 42 | * We are to modify mask, so we need an own copy |
43 | * and be sure it's manipulated with irq off. | 43 | * and be sure it's manipulated with irq off. |
44 | */ | 44 | */ |
45 | ipi_mask_ptr = __raw_get_cpu_var(ipi_mask); | 45 | ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask); |
46 | cpumask_copy(ipi_mask_ptr, mask); | 46 | cpumask_copy(ipi_mask_ptr, mask); |
47 | 47 | ||
48 | /* | 48 | /* |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 700f958652f8..4b4f78c9ba19 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -964,6 +964,7 @@ static void vgetcpu_set_mode(void) | |||
964 | vgetcpu_mode = VGETCPU_LSL; | 964 | vgetcpu_mode = VGETCPU_LSL; |
965 | } | 965 | } |
966 | 966 | ||
967 | #ifdef CONFIG_IA32_EMULATION | ||
967 | /* May not be __init: called during resume */ | 968 | /* May not be __init: called during resume */ |
968 | static void syscall32_cpu_init(void) | 969 | static void syscall32_cpu_init(void) |
969 | { | 970 | { |
@@ -975,7 +976,8 @@ static void syscall32_cpu_init(void) | |||
975 | 976 | ||
976 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | 977 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
977 | } | 978 | } |
978 | #endif | 979 | #endif /* CONFIG_IA32_EMULATION */ |
980 | #endif /* CONFIG_X86_64 */ | ||
979 | 981 | ||
980 | #ifdef CONFIG_X86_32 | 982 | #ifdef CONFIG_X86_32 |
981 | void enable_sep_cpu(void) | 983 | void enable_sep_cpu(void) |
@@ -1198,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage); | |||
1198 | 1200 | ||
1199 | int is_debug_stack(unsigned long addr) | 1201 | int is_debug_stack(unsigned long addr) |
1200 | { | 1202 | { |
1201 | return __get_cpu_var(debug_stack_usage) || | 1203 | return __this_cpu_read(debug_stack_usage) || |
1202 | (addr <= __get_cpu_var(debug_stack_addr) && | 1204 | (addr <= __this_cpu_read(debug_stack_addr) && |
1203 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | 1205 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); |
1204 | } | 1206 | } |
1205 | NOKPROBE_SYMBOL(is_debug_stack); | 1207 | NOKPROBE_SYMBOL(is_debug_stack); |
1206 | 1208 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 5ac2d1fb28bc..4cfba4371a71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex); | |||
83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | 83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
84 | { | 84 | { |
85 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = this_cpu_ptr(&injectm); |
87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NMI_DONE; | 88 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | |||
97 | static void mce_irq_ipi(void *info) | 97 | static void mce_irq_ipi(void *info) |
98 | { | 98 | { |
99 | int cpu = smp_processor_id(); | 99 | int cpu = smp_processor_id(); |
100 | struct mce *m = &__get_cpu_var(injectm); | 100 | struct mce *m = this_cpu_ptr(&injectm); |
101 | 101 | ||
102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && | 102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && |
103 | m->inject_flags & MCJ_EXCEPTION) { | 103 | m->inject_flags & MCJ_EXCEPTION) { |
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info) | |||
109 | /* Inject mce on current CPU */ | 109 | /* Inject mce on current CPU */ |
110 | static int raise_local(void) | 110 | static int raise_local(void) |
111 | { | 111 | { |
112 | struct mce *m = &__get_cpu_var(injectm); | 112 | struct mce *m = this_cpu_ptr(&injectm); |
113 | int context = MCJ_CTX(m->inject_flags); | 113 | int context = MCJ_CTX(m->inject_flags); |
114 | int ret = 0; | 114 | int ret = 0; |
115 | int cpu = m->extcpu; | 115 | int cpu = m->extcpu; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd9ccda8087f..61a9668cebfd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
400 | 400 | ||
401 | if (offset < 0) | 401 | if (offset < 0) |
402 | return 0; | 402 | return 0; |
403 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | 403 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
404 | } | 404 | } |
405 | 405 | ||
406 | if (rdmsrl_safe(msr, &v)) { | 406 | if (rdmsrl_safe(msr, &v)) { |
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v) | |||
422 | int offset = msr_to_offset(msr); | 422 | int offset = msr_to_offset(msr); |
423 | 423 | ||
424 | if (offset >= 0) | 424 | if (offset >= 0) |
425 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | 425 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
426 | return; | 426 | return; |
427 | } | 427 | } |
428 | wrmsrl(msr, v); | 428 | wrmsrl(msr, v); |
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring); | |||
478 | /* Runs with CPU affinity in workqueue */ | 478 | /* Runs with CPU affinity in workqueue */ |
479 | static int mce_ring_empty(void) | 479 | static int mce_ring_empty(void) |
480 | { | 480 | { |
481 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 481 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
482 | 482 | ||
483 | return r->start == r->end; | 483 | return r->start == r->end; |
484 | } | 484 | } |
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn) | |||
490 | 490 | ||
491 | *pfn = 0; | 491 | *pfn = 0; |
492 | get_cpu(); | 492 | get_cpu(); |
493 | r = &__get_cpu_var(mce_ring); | 493 | r = this_cpu_ptr(&mce_ring); |
494 | if (r->start == r->end) | 494 | if (r->start == r->end) |
495 | goto out; | 495 | goto out; |
496 | *pfn = r->ring[r->start]; | 496 | *pfn = r->ring[r->start]; |
@@ -504,7 +504,7 @@ out: | |||
504 | /* Always runs in MCE context with preempt off */ | 504 | /* Always runs in MCE context with preempt off */ |
505 | static int mce_ring_add(unsigned long pfn) | 505 | static int mce_ring_add(unsigned long pfn) |
506 | { | 506 | { |
507 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 507 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
508 | unsigned next; | 508 | unsigned next; |
509 | 509 | ||
510 | next = (r->end + 1) % MCE_RING_SIZE; | 510 | next = (r->end + 1) % MCE_RING_SIZE; |
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c) | |||
526 | static void mce_schedule_work(void) | 526 | static void mce_schedule_work(void) |
527 | { | 527 | { |
528 | if (!mce_ring_empty()) | 528 | if (!mce_ring_empty()) |
529 | schedule_work(&__get_cpu_var(mce_work)); | 529 | schedule_work(this_cpu_ptr(&mce_work)); |
530 | } | 530 | } |
531 | 531 | ||
532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
551 | return; | 551 | return; |
552 | } | 552 | } |
553 | 553 | ||
554 | irq_work_queue(&__get_cpu_var(mce_irq_work)); | 554 | irq_work_queue(this_cpu_ptr(&mce_irq_work)); |
555 | } | 555 | } |
556 | 556 | ||
557 | /* | 557 | /* |
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
1045 | 1045 | ||
1046 | mce_gather_info(&m, regs); | 1046 | mce_gather_info(&m, regs); |
1047 | 1047 | ||
1048 | final = &__get_cpu_var(mces_seen); | 1048 | final = this_cpu_ptr(&mces_seen); |
1049 | *final = m; | 1049 | *final = m; |
1050 | 1050 | ||
1051 | memset(valid_banks, 0, sizeof(valid_banks)); | 1051 | memset(valid_banks, 0, sizeof(valid_banks)); |
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) = | |||
1278 | 1278 | ||
1279 | static int cmc_error_seen(void) | 1279 | static int cmc_error_seen(void) |
1280 | { | 1280 | { |
1281 | unsigned long *v = &__get_cpu_var(mce_polled_error); | 1281 | unsigned long *v = this_cpu_ptr(&mce_polled_error); |
1282 | 1282 | ||
1283 | return test_and_clear_bit(0, v); | 1283 | return test_and_clear_bit(0, v); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void mce_timer_fn(unsigned long data) | 1286 | static void mce_timer_fn(unsigned long data) |
1287 | { | 1287 | { |
1288 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1288 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1289 | unsigned long iv; | 1289 | unsigned long iv; |
1290 | int notify; | 1290 | int notify; |
1291 | 1291 | ||
1292 | WARN_ON(smp_processor_id() != data); | 1292 | WARN_ON(smp_processor_id() != data); |
1293 | 1293 | ||
1294 | if (mce_available(__this_cpu_ptr(&cpu_info))) { | 1294 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1295 | machine_check_poll(MCP_TIMESTAMP, | 1295 | machine_check_poll(MCP_TIMESTAMP, |
1296 | &__get_cpu_var(mce_poll_banks)); | 1296 | this_cpu_ptr(&mce_poll_banks)); |
1297 | mce_intel_cmci_poll(); | 1297 | mce_intel_cmci_poll(); |
1298 | } | 1298 | } |
1299 | 1299 | ||
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data) | |||
1323 | */ | 1323 | */ |
1324 | void mce_timer_kick(unsigned long interval) | 1324 | void mce_timer_kick(unsigned long interval) |
1325 | { | 1325 | { |
1326 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1326 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1327 | unsigned long when = jiffies + interval; | 1327 | unsigned long when = jiffies + interval; |
1328 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1328 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1329 | 1329 | ||
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) | |||
1659 | 1659 | ||
1660 | static void __mcheck_cpu_init_timer(void) | 1660 | static void __mcheck_cpu_init_timer(void) |
1661 | { | 1661 | { |
1662 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1662 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1663 | unsigned int cpu = smp_processor_id(); | 1663 | unsigned int cpu = smp_processor_id(); |
1664 | 1664 | ||
1665 | setup_timer(t, mce_timer_fn, cpu); | 1665 | setup_timer(t, mce_timer_fn, cpu); |
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) | |||
1702 | __mcheck_cpu_init_generic(); | 1702 | __mcheck_cpu_init_generic(); |
1703 | __mcheck_cpu_init_vendor(c); | 1703 | __mcheck_cpu_init_vendor(c); |
1704 | __mcheck_cpu_init_timer(); | 1704 | __mcheck_cpu_init_timer(); |
1705 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1705 | INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); |
1706 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); | 1706 | init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | /* | 1709 | /* |
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = { | |||
1955 | static void __mce_disable_bank(void *arg) | 1955 | static void __mce_disable_bank(void *arg) |
1956 | { | 1956 | { |
1957 | int bank = *((int *)arg); | 1957 | int bank = *((int *)arg); |
1958 | __clear_bit(bank, __get_cpu_var(mce_poll_banks)); | 1958 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
1959 | cmci_disable_bank(bank); | 1959 | cmci_disable_bank(bank); |
1960 | } | 1960 | } |
1961 | 1961 | ||
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void) | |||
2065 | static void mce_syscore_resume(void) | 2065 | static void mce_syscore_resume(void) |
2066 | { | 2066 | { |
2067 | __mcheck_cpu_init_generic(); | 2067 | __mcheck_cpu_init_generic(); |
2068 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); | 2068 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static struct syscore_ops mce_syscore_ops = { | 2071 | static struct syscore_ops mce_syscore_ops = { |
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = { | |||
2080 | 2080 | ||
2081 | static void mce_cpu_restart(void *data) | 2081 | static void mce_cpu_restart(void *data) |
2082 | { | 2082 | { |
2083 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2083 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2084 | return; | 2084 | return; |
2085 | __mcheck_cpu_init_generic(); | 2085 | __mcheck_cpu_init_generic(); |
2086 | __mcheck_cpu_init_timer(); | 2086 | __mcheck_cpu_init_timer(); |
@@ -2096,14 +2096,14 @@ static void mce_restart(void) | |||
2096 | /* Toggle features for corrected errors */ | 2096 | /* Toggle features for corrected errors */ |
2097 | static void mce_disable_cmci(void *data) | 2097 | static void mce_disable_cmci(void *data) |
2098 | { | 2098 | { |
2099 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2099 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2100 | return; | 2100 | return; |
2101 | cmci_clear(); | 2101 | cmci_clear(); |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | static void mce_enable_ce(void *all) | 2104 | static void mce_enable_ce(void *all) |
2105 | { | 2105 | { |
2106 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2106 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2107 | return; | 2107 | return; |
2108 | cmci_reenable(); | 2108 | cmci_reenable(); |
2109 | cmci_recheck(); | 2109 | cmci_recheck(); |
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h) | |||
2336 | unsigned long action = *(unsigned long *)h; | 2336 | unsigned long action = *(unsigned long *)h; |
2337 | int i; | 2337 | int i; |
2338 | 2338 | ||
2339 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2339 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2340 | return; | 2340 | return; |
2341 | 2341 | ||
2342 | if (!(action & CPU_TASKS_FROZEN)) | 2342 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h) | |||
2354 | unsigned long action = *(unsigned long *)h; | 2354 | unsigned long action = *(unsigned long *)h; |
2355 | int i; | 2355 | int i; |
2356 | 2356 | ||
2357 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2357 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2358 | return; | 2358 | return; |
2359 | 2359 | ||
2360 | if (!(action & CPU_TASKS_FROZEN)) | 2360 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1e49f8f41276..5d4999f95aec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void) | |||
310 | * event. | 310 | * event. |
311 | */ | 311 | */ |
312 | machine_check_poll(MCP_TIMESTAMP, | 312 | machine_check_poll(MCP_TIMESTAMP, |
313 | &__get_cpu_var(mce_poll_banks)); | 313 | this_cpu_ptr(&mce_poll_banks)); |
314 | 314 | ||
315 | if (high & MASK_OVERFLOW_HI) { | 315 | if (high & MASK_OVERFLOW_HI) { |
316 | rdmsrl(address, m.misc); | 316 | rdmsrl(address, m.misc); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..b3c97bafc123 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void) | |||
86 | { | 86 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 88 | return; |
89 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
90 | } | 90 | } |
91 | 91 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 92 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void) | |||
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = this_cpu_ptr(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 195 | { |
196 | if (cmci_storm_detect()) | 196 | if (cmci_storm_detect()) |
197 | return; | 197 | return; |
198 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 199 | mce_notify_irq(); |
200 | } | 200 | } |
201 | 201 | ||
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void) | |||
206 | */ | 206 | */ |
207 | static void cmci_discover(int banks) | 207 | static void cmci_discover(int banks) |
208 | { | 208 | { |
209 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | 209 | unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
@@ -228,7 +228,7 @@ static void cmci_discover(int banks) | |||
228 | /* Already owned by someone else? */ | 228 | /* Already owned by someone else? */ |
229 | if (val & MCI_CTL2_CMCI_EN) { | 229 | if (val & MCI_CTL2_CMCI_EN) { |
230 | clear_bit(i, owned); | 230 | clear_bit(i, owned); |
231 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 231 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
232 | continue; | 232 | continue; |
233 | } | 233 | } |
234 | 234 | ||
@@ -252,7 +252,7 @@ static void cmci_discover(int banks) | |||
252 | /* Did the enable bit stick? -- the bank supports CMCI */ | 252 | /* Did the enable bit stick? -- the bank supports CMCI */ |
253 | if (val & MCI_CTL2_CMCI_EN) { | 253 | if (val & MCI_CTL2_CMCI_EN) { |
254 | set_bit(i, owned); | 254 | set_bit(i, owned); |
255 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 255 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
256 | /* | 256 | /* |
257 | * We are able to set thresholds for some banks that | 257 | * We are able to set thresholds for some banks that |
258 | * had a threshold of 0. This means the BIOS has not | 258 | * had a threshold of 0. This means the BIOS has not |
@@ -263,7 +263,7 @@ static void cmci_discover(int banks) | |||
263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) | 263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) |
264 | bios_wrong_thresh = 1; | 264 | bios_wrong_thresh = 1; |
265 | } else { | 265 | } else { |
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
@@ -284,10 +284,10 @@ void cmci_recheck(void) | |||
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | int banks; | 285 | int banks; |
286 | 286 | ||
287 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 288 | return; |
289 | local_irq_save(flags); | 289 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
292 | } | 292 | } |
293 | 293 | ||
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank) | |||
296 | { | 296 | { |
297 | u64 val; | 297 | u64 val; |
298 | 298 | ||
299 | if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) | 299 | if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) |
300 | return; | 300 | return; |
301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
302 | val &= ~MCI_CTL2_CMCI_EN; | 302 | val &= ~MCI_CTL2_CMCI_EN; |
303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
304 | __clear_bit(bank, __get_cpu_var(mce_banks_owned)); | 304 | __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 16c73022306e..1b8299dd3d91 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
494 | 494 | ||
495 | void x86_pmu_disable_all(void) | 495 | void x86_pmu_disable_all(void) |
496 | { | 496 | { |
497 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 497 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
498 | int idx; | 498 | int idx; |
499 | 499 | ||
500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void) | |||
512 | 512 | ||
513 | static void x86_pmu_disable(struct pmu *pmu) | 513 | static void x86_pmu_disable(struct pmu *pmu) |
514 | { | 514 | { |
515 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 515 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
516 | 516 | ||
517 | if (!x86_pmu_initialized()) | 517 | if (!x86_pmu_initialized()) |
518 | return; | 518 | return; |
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
529 | 529 | ||
530 | void x86_pmu_enable_all(int added) | 530 | void x86_pmu_enable_all(int added) |
531 | { | 531 | { |
532 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 532 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
533 | int idx; | 533 | int idx; |
534 | 534 | ||
535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags); | |||
876 | 876 | ||
877 | static void x86_pmu_enable(struct pmu *pmu) | 877 | static void x86_pmu_enable(struct pmu *pmu) |
878 | { | 878 | { |
879 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 879 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
880 | struct perf_event *event; | 880 | struct perf_event *event; |
881 | struct hw_perf_event *hwc; | 881 | struct hw_perf_event *hwc; |
882 | int i, added = cpuc->n_added; | 882 | int i, added = cpuc->n_added; |
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event) | |||
1030 | */ | 1030 | */ |
1031 | static int x86_pmu_add(struct perf_event *event, int flags) | 1031 | static int x86_pmu_add(struct perf_event *event, int flags) |
1032 | { | 1032 | { |
1033 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1033 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1034 | struct hw_perf_event *hwc; | 1034 | struct hw_perf_event *hwc; |
1035 | int assign[X86_PMC_IDX_MAX]; | 1035 | int assign[X86_PMC_IDX_MAX]; |
1036 | int n, n0, ret; | 1036 | int n, n0, ret; |
@@ -1081,7 +1081,7 @@ out: | |||
1081 | 1081 | ||
1082 | static void x86_pmu_start(struct perf_event *event, int flags) | 1082 | static void x86_pmu_start(struct perf_event *event, int flags) |
1083 | { | 1083 | { |
1084 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1084 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1085 | int idx = event->hw.idx; | 1085 | int idx = event->hw.idx; |
1086 | 1086 | ||
1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void) | |||
1160 | 1160 | ||
1161 | void x86_pmu_stop(struct perf_event *event, int flags) | 1161 | void x86_pmu_stop(struct perf_event *event, int flags) |
1162 | { | 1162 | { |
1163 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1163 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1164 | struct hw_perf_event *hwc = &event->hw; | 1164 | struct hw_perf_event *hwc = &event->hw; |
1165 | 1165 | ||
1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { | 1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) | |||
1182 | 1182 | ||
1183 | static void x86_pmu_del(struct perf_event *event, int flags) | 1183 | static void x86_pmu_del(struct perf_event *event, int flags) |
1184 | { | 1184 | { |
1185 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1185 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1186 | int i; | 1186 | int i; |
1187 | 1187 | ||
1188 | /* | 1188 | /* |
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1237 | int idx, handled = 0; | 1237 | int idx, handled = 0; |
1238 | u64 val; | 1238 | u64 val; |
1239 | 1239 | ||
1240 | cpuc = &__get_cpu_var(cpu_hw_events); | 1240 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1241 | 1241 | ||
1242 | /* | 1242 | /* |
1243 | * Some chipsets need to unmask the LVTPC in a particular spot | 1243 | * Some chipsets need to unmask the LVTPC in a particular spot |
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) | |||
1646 | */ | 1646 | */ |
1647 | static int x86_pmu_commit_txn(struct pmu *pmu) | 1647 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1648 | { | 1648 | { |
1649 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1649 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1650 | int assign[X86_PMC_IDX_MAX]; | 1650 | int assign[X86_PMC_IDX_MAX]; |
1651 | int n, ret; | 1651 | int n, ret; |
1652 | 1652 | ||
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2005 | if (idx > GDT_ENTRIES) | 2005 | if (idx > GDT_ENTRIES) |
2006 | return 0; | 2006 | return 0; |
2007 | 2007 | ||
2008 | desc = __this_cpu_ptr(&gdt_page.gdt[0]); | 2008 | desc = raw_cpu_ptr(gdt_page.gdt); |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | return get_desc_base(desc + idx); | 2011 | return get_desc_base(desc + idx); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..28926311aac1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void) | |||
699 | 699 | ||
700 | void amd_pmu_enable_virt(void) | 700 | void amd_pmu_enable_virt(void) |
701 | { | 701 | { |
702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
703 | 703 | ||
704 | cpuc->perf_ctr_virt_mask = 0; | 704 | cpuc->perf_ctr_virt_mask = 0; |
705 | 705 | ||
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |||
711 | 711 | ||
712 | void amd_pmu_disable_virt(void) | 712 | void amd_pmu_disable_virt(void) |
713 | { | 713 | { |
714 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 714 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
715 | 715 | ||
716 | /* | 716 | /* |
717 | * We only mask out the Host-only bit so that host-only counting works | 717 | * We only mask out the Host-only bit so that host-only counting works |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3851def5057c..a73947c53b65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | |||
1174 | 1174 | ||
1175 | static void intel_pmu_disable_all(void) | 1175 | static void intel_pmu_disable_all(void) |
1176 | { | 1176 | { |
1177 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1177 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1178 | 1178 | ||
1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1180 | 1180 | ||
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void) | |||
1187 | 1187 | ||
1188 | static void intel_pmu_enable_all(int added) | 1188 | static void intel_pmu_enable_all(int added) |
1189 | { | 1189 | { |
1190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1190 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1191 | 1191 | ||
1192 | intel_pmu_pebs_enable_all(); | 1192 | intel_pmu_pebs_enable_all(); |
1193 | intel_pmu_lbr_enable_all(); | 1193 | intel_pmu_lbr_enable_all(); |
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added) | |||
1221 | */ | 1221 | */ |
1222 | static void intel_pmu_nhm_workaround(void) | 1222 | static void intel_pmu_nhm_workaround(void) |
1223 | { | 1223 | { |
1224 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1224 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1225 | static const unsigned long nhm_magic[4] = { | 1225 | static const unsigned long nhm_magic[4] = { |
1226 | 0x4300B5, | 1226 | 0x4300B5, |
1227 | 0x4300D2, | 1227 | 0x4300D2, |
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event) | |||
1320 | static void intel_pmu_disable_event(struct perf_event *event) | 1320 | static void intel_pmu_disable_event(struct perf_event *event) |
1321 | { | 1321 | { |
1322 | struct hw_perf_event *hwc = &event->hw; | 1322 | struct hw_perf_event *hwc = &event->hw; |
1323 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1323 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1324 | 1324 | ||
1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1326 | intel_pmu_disable_bts(); | 1326 | intel_pmu_disable_bts(); |
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
1384 | static void intel_pmu_enable_event(struct perf_event *event) | 1384 | static void intel_pmu_enable_event(struct perf_event *event) |
1385 | { | 1385 | { |
1386 | struct hw_perf_event *hwc = &event->hw; | 1386 | struct hw_perf_event *hwc = &event->hw; |
1387 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1387 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1388 | 1388 | ||
1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1478 | u64 status; | 1478 | u64 status; |
1479 | int handled; | 1479 | int handled; |
1480 | 1480 | ||
1481 | cpuc = &__get_cpu_var(cpu_hw_events); | 1481 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1482 | 1482 | ||
1483 | /* | 1483 | /* |
1484 | * No known reason to not always do late ACK, | 1484 | * No known reason to not always do late ACK, |
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | |||
1910 | 1910 | ||
1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | 1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) |
1912 | { | 1912 | { |
1913 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1913 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1915 | 1915 | ||
1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | 1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; |
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | |||
1931 | 1931 | ||
1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | 1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) |
1933 | { | 1933 | { |
1934 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1934 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1936 | int idx; | 1936 | int idx; |
1937 | 1937 | ||
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event) | |||
1965 | 1965 | ||
1966 | static void core_pmu_enable_all(int added) | 1966 | static void core_pmu_enable_all(int added) |
1967 | { | 1967 | { |
1968 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1968 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1969 | int idx; | 1969 | int idx; |
1970 | 1970 | ||
1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b1553d05a5cb..46211bcc813e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config) | |||
474 | 474 | ||
475 | void intel_pmu_disable_bts(void) | 475 | void intel_pmu_disable_bts(void) |
476 | { | 476 | { |
477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 477 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
478 | unsigned long debugctlmsr; | 478 | unsigned long debugctlmsr; |
479 | 479 | ||
480 | if (!cpuc->ds) | 480 | if (!cpuc->ds) |
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void) | |||
491 | 491 | ||
492 | int intel_pmu_drain_bts_buffer(void) | 492 | int intel_pmu_drain_bts_buffer(void) |
493 | { | 493 | { |
494 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 494 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
495 | struct debug_store *ds = cpuc->ds; | 495 | struct debug_store *ds = cpuc->ds; |
496 | struct bts_record { | 496 | struct bts_record { |
497 | u64 from; | 497 | u64 from; |
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) | |||
669 | 669 | ||
670 | void intel_pmu_pebs_enable(struct perf_event *event) | 670 | void intel_pmu_pebs_enable(struct perf_event *event) |
671 | { | 671 | { |
672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 672 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
673 | struct hw_perf_event *hwc = &event->hw; | 673 | struct hw_perf_event *hwc = &event->hw; |
674 | 674 | ||
675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
684 | 684 | ||
685 | void intel_pmu_pebs_disable(struct perf_event *event) | 685 | void intel_pmu_pebs_disable(struct perf_event *event) |
686 | { | 686 | { |
687 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 687 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
688 | struct hw_perf_event *hwc = &event->hw; | 688 | struct hw_perf_event *hwc = &event->hw; |
689 | 689 | ||
690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); | 690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
702 | 702 | ||
703 | void intel_pmu_pebs_enable_all(void) | 703 | void intel_pmu_pebs_enable_all(void) |
704 | { | 704 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 705 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
706 | 706 | ||
707 | if (cpuc->pebs_enabled) | 707 | if (cpuc->pebs_enabled) |
708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void) | |||
710 | 710 | ||
711 | void intel_pmu_pebs_disable_all(void) | 711 | void intel_pmu_pebs_disable_all(void) |
712 | { | 712 | { |
713 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 713 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
714 | 714 | ||
715 | if (cpuc->pebs_enabled) | 715 | if (cpuc->pebs_enabled) |
716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | 716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void) | |||
718 | 718 | ||
719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | 719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 721 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
722 | unsigned long from = cpuc->lbr_entries[0].from; | 722 | unsigned long from = cpuc->lbr_entries[0].from; |
723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | 723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
724 | unsigned long ip = regs->ip; | 724 | unsigned long ip = regs->ip; |
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
829 | * We cast to the biggest pebs_record but are careful not to | 829 | * We cast to the biggest pebs_record but are careful not to |
830 | * unconditionally access the 'extra' entries. | 830 | * unconditionally access the 'extra' entries. |
831 | */ | 831 | */ |
832 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 832 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
833 | struct pebs_record_hsw *pebs = __pebs; | 833 | struct pebs_record_hsw *pebs = __pebs; |
834 | struct perf_sample_data data; | 834 | struct perf_sample_data data; |
835 | struct pt_regs regs; | 835 | struct pt_regs regs; |
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
916 | 916 | ||
917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
918 | { | 918 | { |
919 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 919 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
920 | struct debug_store *ds = cpuc->ds; | 920 | struct debug_store *ds = cpuc->ds; |
921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | 921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
922 | struct pebs_record_core *at, *top; | 922 | struct pebs_record_core *at, *top; |
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
957 | 957 | ||
958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
959 | { | 959 | { |
960 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 960 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
961 | struct debug_store *ds = cpuc->ds; | 961 | struct debug_store *ds = cpuc->ds; |
962 | struct perf_event *event = NULL; | 962 | struct perf_event *event = NULL; |
963 | void *at, *top; | 963 | void *at, *top; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 4af10617de33..45fa730a5283 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |||
133 | static void __intel_pmu_lbr_enable(void) | 133 | static void __intel_pmu_lbr_enable(void) |
134 | { | 134 | { |
135 | u64 debugctl; | 135 | u64 debugctl; |
136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
137 | 137 | ||
138 | if (cpuc->lbr_sel) | 138 | if (cpuc->lbr_sel) |
139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | 139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); |
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void) | |||
183 | 183 | ||
184 | void intel_pmu_lbr_enable(struct perf_event *event) | 184 | void intel_pmu_lbr_enable(struct perf_event *event) |
185 | { | 185 | { |
186 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 186 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
187 | 187 | ||
188 | if (!x86_pmu.lbr_nr) | 188 | if (!x86_pmu.lbr_nr) |
189 | return; | 189 | return; |
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
203 | 203 | ||
204 | void intel_pmu_lbr_disable(struct perf_event *event) | 204 | void intel_pmu_lbr_disable(struct perf_event *event) |
205 | { | 205 | { |
206 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 206 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
207 | 207 | ||
208 | if (!x86_pmu.lbr_nr) | 208 | if (!x86_pmu.lbr_nr) |
209 | return; | 209 | return; |
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event) | |||
220 | 220 | ||
221 | void intel_pmu_lbr_enable_all(void) | 221 | void intel_pmu_lbr_enable_all(void) |
222 | { | 222 | { |
223 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 223 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
224 | 224 | ||
225 | if (cpuc->lbr_users) | 225 | if (cpuc->lbr_users) |
226 | __intel_pmu_lbr_enable(); | 226 | __intel_pmu_lbr_enable(); |
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void) | |||
228 | 228 | ||
229 | void intel_pmu_lbr_disable_all(void) | 229 | void intel_pmu_lbr_disable_all(void) |
230 | { | 230 | { |
231 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 231 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
232 | 232 | ||
233 | if (cpuc->lbr_users) | 233 | if (cpuc->lbr_users) |
234 | __intel_pmu_lbr_disable(); | 234 | __intel_pmu_lbr_disable(); |
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
332 | 332 | ||
333 | void intel_pmu_lbr_read(void) | 333 | void intel_pmu_lbr_read(void) |
334 | { | 334 | { |
335 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 335 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
336 | 336 | ||
337 | if (!cpuc->lbr_users) | 337 | if (!cpuc->lbr_users) |
338 | return; | 338 | return; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 619f7699487a..d64f275fe274 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v) | |||
135 | * or use ldexp(count, -32). | 135 | * or use ldexp(count, -32). |
136 | * Watts = Joules/Time delta | 136 | * Watts = Joules/Time delta |
137 | */ | 137 | */ |
138 | return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); | 138 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); |
139 | } | 139 | } |
140 | 140 | ||
141 | static u64 rapl_event_update(struct perf_event *event) | 141 | static u64 rapl_event_update(struct perf_event *event) |
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu) | |||
187 | 187 | ||
188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) | 188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) |
189 | { | 189 | { |
190 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 190 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
191 | struct perf_event *event; | 191 | struct perf_event *event; |
192 | unsigned long flags; | 192 | unsigned long flags; |
193 | 193 | ||
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, | |||
234 | 234 | ||
235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) | 235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) |
236 | { | 236 | { |
237 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 237 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | 239 | ||
240 | spin_lock_irqsave(&pmu->lock, flags); | 240 | spin_lock_irqsave(&pmu->lock, flags); |
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) | |||
244 | 244 | ||
245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) | 245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) |
246 | { | 246 | { |
247 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 247 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
248 | struct hw_perf_event *hwc = &event->hw; | 248 | struct hw_perf_event *hwc = &event->hw; |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) | |||
278 | 278 | ||
279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) | 279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) |
280 | { | 280 | { |
281 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 281 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
282 | struct hw_perf_event *hwc = &event->hw; | 282 | struct hw_perf_event *hwc = &event->hw; |
283 | unsigned long flags; | 283 | unsigned long flags; |
284 | 284 | ||
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void) | |||
696 | return -1; | 696 | return -1; |
697 | } | 697 | } |
698 | 698 | ||
699 | pmu = __get_cpu_var(rapl_pmu); | 699 | pmu = __this_cpu_read(rapl_pmu); |
700 | 700 | ||
701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," | 701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," |
702 | " API unit is 2^-32 Joules," | 702 | " API unit is 2^-32 Joules," |
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 838fa8772c62..5b0c232d1ee6 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c | |||
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs) | |||
217 | int bit, loops; | 217 | int bit, loops; |
218 | u64 status; | 218 | u64 status; |
219 | 219 | ||
220 | cpuc = &__get_cpu_var(cpu_hw_events); | 220 | cpuc = this_cpu_ptr(&cpu_hw_events); |
221 | 221 | ||
222 | knc_pmu_disable_all(); | 222 | knc_pmu_disable_all(); |
223 | 223 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 5d466b7d8609..f2e56783af3d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
915 | 915 | ||
916 | static void p4_pmu_disable_all(void) | 916 | static void p4_pmu_disable_all(void) |
917 | { | 917 | { |
918 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 918 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
919 | int idx; | 919 | int idx; |
920 | 920 | ||
921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
984 | 984 | ||
985 | static void p4_pmu_enable_all(int added) | 985 | static void p4_pmu_enable_all(int added) |
986 | { | 986 | { |
987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
988 | int idx; | 988 | int idx; |
989 | 989 | ||
990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
1004 | int idx, handled = 0; | 1004 | int idx, handled = 0; |
1005 | u64 val; | 1005 | u64 val; |
1006 | 1006 | ||
1007 | cpuc = &__get_cpu_var(cpu_hw_events); | 1007 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1008 | 1008 | ||
1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1010 | int overflow; | 1010 | int overflow; |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a618fcd2c07d..f5ab56d14287 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -237,7 +237,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, | |||
237 | ced->max_nr_ranges++; | 237 | ced->max_nr_ranges++; |
238 | 238 | ||
239 | /* If crashk_low_res is not 0, another range split possible */ | 239 | /* If crashk_low_res is not 0, another range split possible */ |
240 | if (crashk_low_res.end != 0) | 240 | if (crashk_low_res.end) |
241 | ced->max_nr_ranges++; | 241 | ced->max_nr_ranges++; |
242 | } | 242 | } |
243 | 243 | ||
@@ -335,9 +335,11 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced, | |||
335 | if (ret) | 335 | if (ret) |
336 | return ret; | 336 | return ret; |
337 | 337 | ||
338 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); | 338 | if (crashk_low_res.end) { |
339 | if (ret) | 339 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); |
340 | return ret; | 340 | if (ret) |
341 | return ret; | ||
342 | } | ||
341 | 343 | ||
342 | /* Exclude GART region */ | 344 | /* Exclude GART region */ |
343 | if (ced->gart_end) { | 345 | if (ced->gart_end) { |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 4b0e1dfa2226..b553ed89e5f5 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -449,12 +449,11 @@ sysenter_audit: | |||
449 | jnz syscall_trace_entry | 449 | jnz syscall_trace_entry |
450 | addl $4,%esp | 450 | addl $4,%esp |
451 | CFI_ADJUST_CFA_OFFSET -4 | 451 | CFI_ADJUST_CFA_OFFSET -4 |
452 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | 452 | movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ |
453 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | 453 | movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ |
454 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | 454 | /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ |
455 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | 455 | movl %ebx,%edx /* 2nd arg: 1st syscall arg */ |
456 | movl %eax,%edx /* 2nd arg: syscall number */ | 456 | /* %eax already in %eax 1st arg: syscall number */ |
457 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | ||
458 | call __audit_syscall_entry | 457 | call __audit_syscall_entry |
459 | pushl_cfi %ebx | 458 | pushl_cfi %ebx |
460 | movl PT_EAX(%esp),%eax /* reload syscall number */ | 459 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 5f9cf20cdb68..3d5fb509bdeb 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | for (i = 0; i < HBP_NUM; i++) { | 110 | for (i = 0; i < HBP_NUM; i++) { |
111 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 111 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
112 | 112 | ||
113 | if (!*slot) { | 113 | if (!*slot) { |
114 | *slot = bp; | 114 | *slot = bp; |
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | set_debugreg(info->address, i); | 122 | set_debugreg(info->address, i); |
123 | __this_cpu_write(cpu_debugreg[i], info->address); | 123 | __this_cpu_write(cpu_debugreg[i], info->address); |
124 | 124 | ||
125 | dr7 = &__get_cpu_var(cpu_dr7); | 125 | dr7 = this_cpu_ptr(&cpu_dr7); |
126 | *dr7 |= encode_dr7(i, info->len, info->type); | 126 | *dr7 |= encode_dr7(i, info->len, info->type); |
127 | 127 | ||
128 | set_debugreg(*dr7, 7); | 128 | set_debugreg(*dr7, 7); |
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | for (i = 0; i < HBP_NUM; i++) { | 148 | for (i = 0; i < HBP_NUM; i++) { |
149 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 149 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
150 | 150 | ||
151 | if (*slot == bp) { | 151 | if (*slot == bp) { |
152 | *slot = NULL; | 152 | *slot = NULL; |
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | 157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) |
158 | return; | 158 | return; |
159 | 159 | ||
160 | dr7 = &__get_cpu_var(cpu_dr7); | 160 | dr7 = this_cpu_ptr(&cpu_dr7); |
161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | 161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); |
162 | 162 | ||
163 | set_debugreg(*dr7, 7); | 163 | set_debugreg(*dr7, 7); |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 4d1c746892eb..e4b503d5558c 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
52 | regs->sp <= curbase + THREAD_SIZE) | 52 | regs->sp <= curbase + THREAD_SIZE) |
53 | return; | 53 | return; |
54 | 54 | ||
55 | irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + | 55 | irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + |
56 | STACK_TOP_MARGIN; | 56 | STACK_TOP_MARGIN; |
57 | irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); | 57 | irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); |
58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) | 58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) |
59 | return; | 59 | return; |
60 | 60 | ||
61 | oist = &__get_cpu_var(orig_ist); | 61 | oist = this_cpu_ptr(&orig_ist); |
62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; | 62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; |
63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; | 63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; |
64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) | 64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 9642b9b33655..ca05f86481aa 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include <asm/crash.h> | 27 | #include <asm/crash.h> |
28 | #include <asm/efi.h> | 28 | #include <asm/efi.h> |
29 | #include <asm/kexec-bzimage64.h> | ||
29 | 30 | ||
30 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ | 31 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ |
31 | 32 | ||
@@ -267,7 +268,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, | |||
267 | return ret; | 268 | return ret; |
268 | } | 269 | } |
269 | 270 | ||
270 | int bzImage64_probe(const char *buf, unsigned long len) | 271 | static int bzImage64_probe(const char *buf, unsigned long len) |
271 | { | 272 | { |
272 | int ret = -ENOEXEC; | 273 | int ret = -ENOEXEC; |
273 | struct setup_header *header; | 274 | struct setup_header *header; |
@@ -325,10 +326,10 @@ int bzImage64_probe(const char *buf, unsigned long len) | |||
325 | return ret; | 326 | return ret; |
326 | } | 327 | } |
327 | 328 | ||
328 | void *bzImage64_load(struct kimage *image, char *kernel, | 329 | static void *bzImage64_load(struct kimage *image, char *kernel, |
329 | unsigned long kernel_len, char *initrd, | 330 | unsigned long kernel_len, char *initrd, |
330 | unsigned long initrd_len, char *cmdline, | 331 | unsigned long initrd_len, char *cmdline, |
331 | unsigned long cmdline_len) | 332 | unsigned long cmdline_len) |
332 | { | 333 | { |
333 | 334 | ||
334 | struct setup_header *header; | 335 | struct setup_header *header; |
@@ -514,7 +515,7 @@ out_free_params: | |||
514 | } | 515 | } |
515 | 516 | ||
516 | /* This cleanup function is called after various segments have been loaded */ | 517 | /* This cleanup function is called after various segments have been loaded */ |
517 | int bzImage64_cleanup(void *loader_data) | 518 | static int bzImage64_cleanup(void *loader_data) |
518 | { | 519 | { |
519 | struct bzimage64_data *ldata = loader_data; | 520 | struct bzimage64_data *ldata = loader_data; |
520 | 521 | ||
@@ -528,7 +529,7 @@ int bzImage64_cleanup(void *loader_data) | |||
528 | } | 529 | } |
529 | 530 | ||
530 | #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG | 531 | #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG |
531 | int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) | 532 | static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) |
532 | { | 533 | { |
533 | bool trusted; | 534 | bool trusted; |
534 | int ret; | 535 | int ret; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3dd8e2c4d74a..f6945bef2cd1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/kprobes.h> | 36 | #include <linux/kprobes.h> |
37 | #include <linux/debugfs.h> | 37 | #include <linux/debugfs.h> |
38 | #include <linux/nmi.h> | ||
38 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
39 | #include <asm/cpu.h> | 40 | #include <asm/cpu.h> |
40 | #include <asm/traps.h> | 41 | #include <asm/traps.h> |
@@ -243,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void) | |||
243 | { | 244 | { |
244 | u32 reason = 0; | 245 | u32 reason = 0; |
245 | 246 | ||
246 | if (__get_cpu_var(apf_reason).enabled) { | 247 | if (__this_cpu_read(apf_reason.enabled)) { |
247 | reason = __get_cpu_var(apf_reason).reason; | 248 | reason = __this_cpu_read(apf_reason.reason); |
248 | __get_cpu_var(apf_reason).reason = 0; | 249 | __this_cpu_write(apf_reason.reason, 0); |
249 | } | 250 | } |
250 | 251 | ||
251 | return reason; | 252 | return reason; |
@@ -318,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
318 | * there's no need for lock or memory barriers. | 319 | * there's no need for lock or memory barriers. |
319 | * An optimization barrier is implied in apic write. | 320 | * An optimization barrier is implied in apic write. |
320 | */ | 321 | */ |
321 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | 322 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) |
322 | return; | 323 | return; |
323 | apic_write(APIC_EOI, APIC_EOI_ACK); | 324 | apic_write(APIC_EOI, APIC_EOI_ACK); |
324 | } | 325 | } |
@@ -329,13 +330,13 @@ void kvm_guest_cpu_init(void) | |||
329 | return; | 330 | return; |
330 | 331 | ||
331 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | 332 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { |
332 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); | 333 | u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); |
333 | 334 | ||
334 | #ifdef CONFIG_PREEMPT | 335 | #ifdef CONFIG_PREEMPT |
335 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | 336 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; |
336 | #endif | 337 | #endif |
337 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); | 338 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
338 | __get_cpu_var(apf_reason).enabled = 1; | 339 | __this_cpu_write(apf_reason.enabled, 1); |
339 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 340 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
340 | smp_processor_id()); | 341 | smp_processor_id()); |
341 | } | 342 | } |
@@ -344,8 +345,8 @@ void kvm_guest_cpu_init(void) | |||
344 | unsigned long pa; | 345 | unsigned long pa; |
345 | /* Size alignment is implied but just to make it explicit. */ | 346 | /* Size alignment is implied but just to make it explicit. */ |
346 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | 347 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); |
347 | __get_cpu_var(kvm_apic_eoi) = 0; | 348 | __this_cpu_write(kvm_apic_eoi, 0); |
348 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) | 349 | pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) |
349 | | KVM_MSR_ENABLED; | 350 | | KVM_MSR_ENABLED; |
350 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); | 351 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
351 | } | 352 | } |
@@ -356,11 +357,11 @@ void kvm_guest_cpu_init(void) | |||
356 | 357 | ||
357 | static void kvm_pv_disable_apf(void) | 358 | static void kvm_pv_disable_apf(void) |
358 | { | 359 | { |
359 | if (!__get_cpu_var(apf_reason).enabled) | 360 | if (!__this_cpu_read(apf_reason.enabled)) |
360 | return; | 361 | return; |
361 | 362 | ||
362 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | 363 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); |
363 | __get_cpu_var(apf_reason).enabled = 0; | 364 | __this_cpu_write(apf_reason.enabled, 0); |
364 | 365 | ||
365 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | 366 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", |
366 | smp_processor_id()); | 367 | smp_processor_id()); |
@@ -499,6 +500,13 @@ void __init kvm_guest_init(void) | |||
499 | #else | 500 | #else |
500 | kvm_guest_cpu_init(); | 501 | kvm_guest_cpu_init(); |
501 | #endif | 502 | #endif |
503 | |||
504 | /* | ||
505 | * Hard lockup detection is enabled by default. Disable it, as guests | ||
506 | * can get false positives too easily, for example if the host is | ||
507 | * overcommitted. | ||
508 | */ | ||
509 | watchdog_enable_hardlockup_detector(false); | ||
502 | } | 510 | } |
503 | 511 | ||
504 | static noinline uint32_t __kvm_cpuid_base(void) | 512 | static noinline uint32_t __kvm_cpuid_base(void) |
@@ -716,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
716 | if (in_nmi()) | 724 | if (in_nmi()) |
717 | return; | 725 | return; |
718 | 726 | ||
719 | w = &__get_cpu_var(klock_waiting); | 727 | w = this_cpu_ptr(&klock_waiting); |
720 | cpu = smp_processor_id(); | 728 | cpu = smp_processor_id(); |
721 | start = spin_time_start(); | 729 | start = spin_time_start(); |
722 | 730 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 29576c244699..749b0e423419 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1445,12 +1445,12 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) | |||
1445 | { | 1445 | { |
1446 | #ifdef CONFIG_X86_64 | 1446 | #ifdef CONFIG_X86_64 |
1447 | if (arch == AUDIT_ARCH_X86_64) { | 1447 | if (arch == AUDIT_ARCH_X86_64) { |
1448 | audit_syscall_entry(arch, regs->orig_ax, regs->di, | 1448 | audit_syscall_entry(regs->orig_ax, regs->di, |
1449 | regs->si, regs->dx, regs->r10); | 1449 | regs->si, regs->dx, regs->r10); |
1450 | } else | 1450 | } else |
1451 | #endif | 1451 | #endif |
1452 | { | 1452 | { |
1453 | audit_syscall_entry(arch, regs->orig_ax, regs->bx, | 1453 | audit_syscall_entry(regs->orig_ax, regs->bx, |
1454 | regs->cx, regs->dx, regs->si); | 1454 | regs->cx, regs->dx, regs->si); |
1455 | } | 1455 | } |
1456 | } | 1456 | } |