diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2014-10-24 16:26:37 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-10-24 16:26:37 -0400 |
commit | db65bcfd9563a7531c3dd46c350565705be1fd84 (patch) | |
tree | ff7b068472764e36ab02a739917e206dd11d6b46 /arch/x86 | |
parent | b47dcbdc5161d3d5756f430191e2840d9b855492 (diff) | |
parent | f114040e3ea6e07372334ade75d1ee0775c355e1 (diff) |
Merge tag 'v3.18-rc1' into x86/urgent
Reason:
Need to apply audit patch on top of v3.18-rc1.
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
49 files changed, 325 insertions, 514 deletions
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 711de084ab57..8ffba18395c8 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -198,12 +198,12 @@ sysexit_from_sys_call: | |||
198 | 198 | ||
199 | #ifdef CONFIG_AUDITSYSCALL | 199 | #ifdef CONFIG_AUDITSYSCALL |
200 | .macro auditsys_entry_common | 200 | .macro auditsys_entry_common |
201 | movl %esi,%r9d /* 6th arg: 4th syscall arg */ | 201 | movl %esi,%r8d /* 5th arg: 4th syscall arg */ |
202 | movl %edx,%r8d /* 5th arg: 3rd syscall arg */ | 202 | movl %ecx,%r9d /*swap with edx*/ |
203 | /* (already in %ecx) 4th arg: 2nd syscall arg */ | 203 | movl %edx,%ecx /* 4th arg: 3rd syscall arg */ |
204 | movl %ebx,%edx /* 3rd arg: 1st syscall arg */ | 204 | movl %r9d,%edx /* 3rd arg: 2nd syscall arg */ |
205 | movl %eax,%esi /* 2nd arg: syscall number */ | 205 | movl %ebx,%esi /* 2nd arg: 1st syscall arg */ |
206 | movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ | 206 | movl %eax,%edi /* 1st arg: syscall number */ |
207 | call __audit_syscall_entry | 207 | call __audit_syscall_entry |
208 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ | 208 | movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ |
209 | cmpq $(IA32_NR_syscalls-1),%rax | 209 | cmpq $(IA32_NR_syscalls-1),%rax |
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 3bf000fab0ae..d55a210a49bf 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild | |||
@@ -6,6 +6,7 @@ genhdr-y += unistd_x32.h | |||
6 | 6 | ||
7 | generic-y += clkdev.h | 7 | generic-y += clkdev.h |
8 | generic-y += cputime.h | 8 | generic-y += cputime.h |
9 | generic-y += dma-contiguous.h | ||
9 | generic-y += early_ioremap.h | 10 | generic-y += early_ioremap.h |
10 | generic-y += mcs_spinlock.h | 11 | generic-y += mcs_spinlock.h |
11 | generic-y += scatterlist.h | 12 | generic-y += scatterlist.h |
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 4b528a970bd4..61fd18b83b6c 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h | |||
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void); | |||
97 | DECLARE_PER_CPU(int, debug_stack_usage); | 97 | DECLARE_PER_CPU(int, debug_stack_usage); |
98 | static inline void debug_stack_usage_inc(void) | 98 | static inline void debug_stack_usage_inc(void) |
99 | { | 99 | { |
100 | __get_cpu_var(debug_stack_usage)++; | 100 | __this_cpu_inc(debug_stack_usage); |
101 | } | 101 | } |
102 | static inline void debug_stack_usage_dec(void) | 102 | static inline void debug_stack_usage_dec(void) |
103 | { | 103 | { |
104 | __get_cpu_var(debug_stack_usage)--; | 104 | __this_cpu_dec(debug_stack_usage); |
105 | } | 105 | } |
106 | int is_debug_stack(unsigned long addr); | 106 | int is_debug_stack(unsigned long addr); |
107 | void debug_stack_set_zero(void); | 107 | void debug_stack_set_zero(void); |
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h deleted file mode 100644 index b4b38bacb404..000000000000 --- a/arch/x86/include/asm/dma-contiguous.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | #ifndef ASMX86_DMA_CONTIGUOUS_H | ||
2 | #define ASMX86_DMA_CONTIGUOUS_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/types.h> | ||
7 | |||
8 | static inline void | ||
9 | dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } | ||
10 | |||
11 | #endif | ||
12 | #endif | ||
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 53cdfb2857ab..4421b5da409d 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/insn.h> | 27 | #include <asm/insn.h> |
28 | 28 | ||
29 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 29 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
30 | #define ARCH_SUPPORTS_KPROBES_ON_FTRACE | ||
31 | 30 | ||
32 | struct pt_regs; | 31 | struct pt_regs; |
33 | struct kprobe; | 32 | struct kprobe; |
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 85e13ccf15c4..d725382c2ae0 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -189,7 +189,7 @@ static inline int p4_ht_thread(int cpu) | |||
189 | { | 189 | { |
190 | #ifdef CONFIG_SMP | 190 | #ifdef CONFIG_SMP |
191 | if (smp_num_siblings == 2) | 191 | if (smp_num_siblings == 2) |
192 | return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); | 192 | return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); |
193 | #endif | 193 | #endif |
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index c63e925fd6b7..a00ad8f2a657 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -164,7 +164,7 @@ struct uv_hub_info_s { | |||
164 | }; | 164 | }; |
165 | 165 | ||
166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 166 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
167 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | 167 | #define uv_hub_info this_cpu_ptr(&__uv_hub_info) |
168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | 168 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) |
169 | 169 | ||
170 | /* | 170 | /* |
@@ -601,16 +601,16 @@ struct uv_hub_nmi_s { | |||
601 | 601 | ||
602 | struct uv_cpu_nmi_s { | 602 | struct uv_cpu_nmi_s { |
603 | struct uv_hub_nmi_s *hub; | 603 | struct uv_hub_nmi_s *hub; |
604 | atomic_t state; | 604 | int state; |
605 | atomic_t pinging; | 605 | int pinging; |
606 | int queries; | 606 | int queries; |
607 | int pings; | 607 | int pings; |
608 | }; | 608 | }; |
609 | 609 | ||
610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 610 | DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
611 | #define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi)) | 611 | |
612 | #define uv_hub_nmi (uv_cpu_nmi.hub) | 612 | #define uv_hub_nmi (uv_cpu_nmi.hub) |
613 | #define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu)) | 613 | #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) |
614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) | 614 | #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) |
615 | 615 | ||
616 | /* uv_cpu_nmi_states */ | 616 | /* uv_cpu_nmi_states */ |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index af5b08ab3b71..5972b108f15a 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void) | |||
146 | static int __init apbt_clockevent_register(void) | 146 | static int __init apbt_clockevent_register(void) |
147 | { | 147 | { |
148 | struct sfi_timer_table_entry *mtmr; | 148 | struct sfi_timer_table_entry *mtmr; |
149 | struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); | 149 | struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); |
150 | 150 | ||
151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); | 151 | mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); |
152 | if (mtmr == NULL) { | 152 | if (mtmr == NULL) { |
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void) | |||
200 | if (!cpu) | 200 | if (!cpu) |
201 | return; | 201 | return; |
202 | 202 | ||
203 | adev = &__get_cpu_var(cpu_apbt_dev); | 203 | adev = this_cpu_ptr(&cpu_apbt_dev); |
204 | if (!adev->timer) { | 204 | if (!adev->timer) { |
205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, | 205 | adev->timer = dw_apb_clockevent_init(cpu, adev->name, |
206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), | 206 | APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 24b5894396a0..ba6cc041edb1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
561 | */ | 561 | */ |
562 | static void setup_APIC_timer(void) | 562 | static void setup_APIC_timer(void) |
563 | { | 563 | { |
564 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 564 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
565 | 565 | ||
566 | if (this_cpu_has(X86_FEATURE_ARAT)) { | 566 | if (this_cpu_has(X86_FEATURE_ARAT)) { |
567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | 567 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; |
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc) | |||
696 | 696 | ||
697 | static int __init calibrate_APIC_clock(void) | 697 | static int __init calibrate_APIC_clock(void) |
698 | { | 698 | { |
699 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 699 | struct clock_event_device *levt = this_cpu_ptr(&lapic_events); |
700 | void (*real_handler)(struct clock_event_device *dev); | 700 | void (*real_handler)(struct clock_event_device *dev); |
701 | unsigned long deltaj; | 701 | unsigned long deltaj; |
702 | long delta, deltatsc; | 702 | long delta, deltatsc; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 6ce600f9bc78..e658f21681c8 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) | |||
42 | * We are to modify mask, so we need an own copy | 42 | * We are to modify mask, so we need an own copy |
43 | * and be sure it's manipulated with irq off. | 43 | * and be sure it's manipulated with irq off. |
44 | */ | 44 | */ |
45 | ipi_mask_ptr = __raw_get_cpu_var(ipi_mask); | 45 | ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask); |
46 | cpumask_copy(ipi_mask_ptr, mask); | 46 | cpumask_copy(ipi_mask_ptr, mask); |
47 | 47 | ||
48 | /* | 48 | /* |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 700f958652f8..4b4f78c9ba19 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -964,6 +964,7 @@ static void vgetcpu_set_mode(void) | |||
964 | vgetcpu_mode = VGETCPU_LSL; | 964 | vgetcpu_mode = VGETCPU_LSL; |
965 | } | 965 | } |
966 | 966 | ||
967 | #ifdef CONFIG_IA32_EMULATION | ||
967 | /* May not be __init: called during resume */ | 968 | /* May not be __init: called during resume */ |
968 | static void syscall32_cpu_init(void) | 969 | static void syscall32_cpu_init(void) |
969 | { | 970 | { |
@@ -975,7 +976,8 @@ static void syscall32_cpu_init(void) | |||
975 | 976 | ||
976 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | 977 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
977 | } | 978 | } |
978 | #endif | 979 | #endif /* CONFIG_IA32_EMULATION */ |
980 | #endif /* CONFIG_X86_64 */ | ||
979 | 981 | ||
980 | #ifdef CONFIG_X86_32 | 982 | #ifdef CONFIG_X86_32 |
981 | void enable_sep_cpu(void) | 983 | void enable_sep_cpu(void) |
@@ -1198,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage); | |||
1198 | 1200 | ||
1199 | int is_debug_stack(unsigned long addr) | 1201 | int is_debug_stack(unsigned long addr) |
1200 | { | 1202 | { |
1201 | return __get_cpu_var(debug_stack_usage) || | 1203 | return __this_cpu_read(debug_stack_usage) || |
1202 | (addr <= __get_cpu_var(debug_stack_addr) && | 1204 | (addr <= __this_cpu_read(debug_stack_addr) && |
1203 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | 1205 | addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); |
1204 | } | 1206 | } |
1205 | NOKPROBE_SYMBOL(is_debug_stack); | 1207 | NOKPROBE_SYMBOL(is_debug_stack); |
1206 | 1208 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 5ac2d1fb28bc..4cfba4371a71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex); | |||
83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | 83 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
84 | { | 84 | { |
85 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = this_cpu_ptr(&injectm); |
87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NMI_DONE; | 88 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) | |||
97 | static void mce_irq_ipi(void *info) | 97 | static void mce_irq_ipi(void *info) |
98 | { | 98 | { |
99 | int cpu = smp_processor_id(); | 99 | int cpu = smp_processor_id(); |
100 | struct mce *m = &__get_cpu_var(injectm); | 100 | struct mce *m = this_cpu_ptr(&injectm); |
101 | 101 | ||
102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && | 102 | if (cpumask_test_cpu(cpu, mce_inject_cpumask) && |
103 | m->inject_flags & MCJ_EXCEPTION) { | 103 | m->inject_flags & MCJ_EXCEPTION) { |
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info) | |||
109 | /* Inject mce on current CPU */ | 109 | /* Inject mce on current CPU */ |
110 | static int raise_local(void) | 110 | static int raise_local(void) |
111 | { | 111 | { |
112 | struct mce *m = &__get_cpu_var(injectm); | 112 | struct mce *m = this_cpu_ptr(&injectm); |
113 | int context = MCJ_CTX(m->inject_flags); | 113 | int context = MCJ_CTX(m->inject_flags); |
114 | int ret = 0; | 114 | int ret = 0; |
115 | int cpu = m->extcpu; | 115 | int cpu = m->extcpu; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bd9ccda8087f..61a9668cebfd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
400 | 400 | ||
401 | if (offset < 0) | 401 | if (offset < 0) |
402 | return 0; | 402 | return 0; |
403 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | 403 | return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); |
404 | } | 404 | } |
405 | 405 | ||
406 | if (rdmsrl_safe(msr, &v)) { | 406 | if (rdmsrl_safe(msr, &v)) { |
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v) | |||
422 | int offset = msr_to_offset(msr); | 422 | int offset = msr_to_offset(msr); |
423 | 423 | ||
424 | if (offset >= 0) | 424 | if (offset >= 0) |
425 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | 425 | *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; |
426 | return; | 426 | return; |
427 | } | 427 | } |
428 | wrmsrl(msr, v); | 428 | wrmsrl(msr, v); |
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring); | |||
478 | /* Runs with CPU affinity in workqueue */ | 478 | /* Runs with CPU affinity in workqueue */ |
479 | static int mce_ring_empty(void) | 479 | static int mce_ring_empty(void) |
480 | { | 480 | { |
481 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 481 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
482 | 482 | ||
483 | return r->start == r->end; | 483 | return r->start == r->end; |
484 | } | 484 | } |
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn) | |||
490 | 490 | ||
491 | *pfn = 0; | 491 | *pfn = 0; |
492 | get_cpu(); | 492 | get_cpu(); |
493 | r = &__get_cpu_var(mce_ring); | 493 | r = this_cpu_ptr(&mce_ring); |
494 | if (r->start == r->end) | 494 | if (r->start == r->end) |
495 | goto out; | 495 | goto out; |
496 | *pfn = r->ring[r->start]; | 496 | *pfn = r->ring[r->start]; |
@@ -504,7 +504,7 @@ out: | |||
504 | /* Always runs in MCE context with preempt off */ | 504 | /* Always runs in MCE context with preempt off */ |
505 | static int mce_ring_add(unsigned long pfn) | 505 | static int mce_ring_add(unsigned long pfn) |
506 | { | 506 | { |
507 | struct mce_ring *r = &__get_cpu_var(mce_ring); | 507 | struct mce_ring *r = this_cpu_ptr(&mce_ring); |
508 | unsigned next; | 508 | unsigned next; |
509 | 509 | ||
510 | next = (r->end + 1) % MCE_RING_SIZE; | 510 | next = (r->end + 1) % MCE_RING_SIZE; |
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c) | |||
526 | static void mce_schedule_work(void) | 526 | static void mce_schedule_work(void) |
527 | { | 527 | { |
528 | if (!mce_ring_empty()) | 528 | if (!mce_ring_empty()) |
529 | schedule_work(&__get_cpu_var(mce_work)); | 529 | schedule_work(this_cpu_ptr(&mce_work)); |
530 | } | 530 | } |
531 | 531 | ||
532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
551 | return; | 551 | return; |
552 | } | 552 | } |
553 | 553 | ||
554 | irq_work_queue(&__get_cpu_var(mce_irq_work)); | 554 | irq_work_queue(this_cpu_ptr(&mce_irq_work)); |
555 | } | 555 | } |
556 | 556 | ||
557 | /* | 557 | /* |
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
1045 | 1045 | ||
1046 | mce_gather_info(&m, regs); | 1046 | mce_gather_info(&m, regs); |
1047 | 1047 | ||
1048 | final = &__get_cpu_var(mces_seen); | 1048 | final = this_cpu_ptr(&mces_seen); |
1049 | *final = m; | 1049 | *final = m; |
1050 | 1050 | ||
1051 | memset(valid_banks, 0, sizeof(valid_banks)); | 1051 | memset(valid_banks, 0, sizeof(valid_banks)); |
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) = | |||
1278 | 1278 | ||
1279 | static int cmc_error_seen(void) | 1279 | static int cmc_error_seen(void) |
1280 | { | 1280 | { |
1281 | unsigned long *v = &__get_cpu_var(mce_polled_error); | 1281 | unsigned long *v = this_cpu_ptr(&mce_polled_error); |
1282 | 1282 | ||
1283 | return test_and_clear_bit(0, v); | 1283 | return test_and_clear_bit(0, v); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void mce_timer_fn(unsigned long data) | 1286 | static void mce_timer_fn(unsigned long data) |
1287 | { | 1287 | { |
1288 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1288 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1289 | unsigned long iv; | 1289 | unsigned long iv; |
1290 | int notify; | 1290 | int notify; |
1291 | 1291 | ||
1292 | WARN_ON(smp_processor_id() != data); | 1292 | WARN_ON(smp_processor_id() != data); |
1293 | 1293 | ||
1294 | if (mce_available(__this_cpu_ptr(&cpu_info))) { | 1294 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1295 | machine_check_poll(MCP_TIMESTAMP, | 1295 | machine_check_poll(MCP_TIMESTAMP, |
1296 | &__get_cpu_var(mce_poll_banks)); | 1296 | this_cpu_ptr(&mce_poll_banks)); |
1297 | mce_intel_cmci_poll(); | 1297 | mce_intel_cmci_poll(); |
1298 | } | 1298 | } |
1299 | 1299 | ||
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data) | |||
1323 | */ | 1323 | */ |
1324 | void mce_timer_kick(unsigned long interval) | 1324 | void mce_timer_kick(unsigned long interval) |
1325 | { | 1325 | { |
1326 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1326 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1327 | unsigned long when = jiffies + interval; | 1327 | unsigned long when = jiffies + interval; |
1328 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1328 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1329 | 1329 | ||
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) | |||
1659 | 1659 | ||
1660 | static void __mcheck_cpu_init_timer(void) | 1660 | static void __mcheck_cpu_init_timer(void) |
1661 | { | 1661 | { |
1662 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1662 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1663 | unsigned int cpu = smp_processor_id(); | 1663 | unsigned int cpu = smp_processor_id(); |
1664 | 1664 | ||
1665 | setup_timer(t, mce_timer_fn, cpu); | 1665 | setup_timer(t, mce_timer_fn, cpu); |
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) | |||
1702 | __mcheck_cpu_init_generic(); | 1702 | __mcheck_cpu_init_generic(); |
1703 | __mcheck_cpu_init_vendor(c); | 1703 | __mcheck_cpu_init_vendor(c); |
1704 | __mcheck_cpu_init_timer(); | 1704 | __mcheck_cpu_init_timer(); |
1705 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1705 | INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work); |
1706 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); | 1706 | init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb); |
1707 | } | 1707 | } |
1708 | 1708 | ||
1709 | /* | 1709 | /* |
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = { | |||
1955 | static void __mce_disable_bank(void *arg) | 1955 | static void __mce_disable_bank(void *arg) |
1956 | { | 1956 | { |
1957 | int bank = *((int *)arg); | 1957 | int bank = *((int *)arg); |
1958 | __clear_bit(bank, __get_cpu_var(mce_poll_banks)); | 1958 | __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); |
1959 | cmci_disable_bank(bank); | 1959 | cmci_disable_bank(bank); |
1960 | } | 1960 | } |
1961 | 1961 | ||
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void) | |||
2065 | static void mce_syscore_resume(void) | 2065 | static void mce_syscore_resume(void) |
2066 | { | 2066 | { |
2067 | __mcheck_cpu_init_generic(); | 2067 | __mcheck_cpu_init_generic(); |
2068 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); | 2068 | __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static struct syscore_ops mce_syscore_ops = { | 2071 | static struct syscore_ops mce_syscore_ops = { |
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = { | |||
2080 | 2080 | ||
2081 | static void mce_cpu_restart(void *data) | 2081 | static void mce_cpu_restart(void *data) |
2082 | { | 2082 | { |
2083 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2083 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2084 | return; | 2084 | return; |
2085 | __mcheck_cpu_init_generic(); | 2085 | __mcheck_cpu_init_generic(); |
2086 | __mcheck_cpu_init_timer(); | 2086 | __mcheck_cpu_init_timer(); |
@@ -2096,14 +2096,14 @@ static void mce_restart(void) | |||
2096 | /* Toggle features for corrected errors */ | 2096 | /* Toggle features for corrected errors */ |
2097 | static void mce_disable_cmci(void *data) | 2097 | static void mce_disable_cmci(void *data) |
2098 | { | 2098 | { |
2099 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2099 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2100 | return; | 2100 | return; |
2101 | cmci_clear(); | 2101 | cmci_clear(); |
2102 | } | 2102 | } |
2103 | 2103 | ||
2104 | static void mce_enable_ce(void *all) | 2104 | static void mce_enable_ce(void *all) |
2105 | { | 2105 | { |
2106 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2106 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2107 | return; | 2107 | return; |
2108 | cmci_reenable(); | 2108 | cmci_reenable(); |
2109 | cmci_recheck(); | 2109 | cmci_recheck(); |
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h) | |||
2336 | unsigned long action = *(unsigned long *)h; | 2336 | unsigned long action = *(unsigned long *)h; |
2337 | int i; | 2337 | int i; |
2338 | 2338 | ||
2339 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2339 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2340 | return; | 2340 | return; |
2341 | 2341 | ||
2342 | if (!(action & CPU_TASKS_FROZEN)) | 2342 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h) | |||
2354 | unsigned long action = *(unsigned long *)h; | 2354 | unsigned long action = *(unsigned long *)h; |
2355 | int i; | 2355 | int i; |
2356 | 2356 | ||
2357 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 2357 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2358 | return; | 2358 | return; |
2359 | 2359 | ||
2360 | if (!(action & CPU_TASKS_FROZEN)) | 2360 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 1e49f8f41276..5d4999f95aec 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void) | |||
310 | * event. | 310 | * event. |
311 | */ | 311 | */ |
312 | machine_check_poll(MCP_TIMESTAMP, | 312 | machine_check_poll(MCP_TIMESTAMP, |
313 | &__get_cpu_var(mce_poll_banks)); | 313 | this_cpu_ptr(&mce_poll_banks)); |
314 | 314 | ||
315 | if (high & MASK_OVERFLOW_HI) { | 315 | if (high & MASK_OVERFLOW_HI) { |
316 | rdmsrl(address, m.misc); | 316 | rdmsrl(address, m.misc); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 3bdb95ae8c43..b3c97bafc123 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void) | |||
86 | { | 86 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 88 | return; |
89 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
90 | } | 90 | } |
91 | 91 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 92 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void) | |||
145 | u64 val; | 145 | u64 val; |
146 | 146 | ||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | 147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
148 | owned = __get_cpu_var(mce_banks_owned); | 148 | owned = this_cpu_ptr(mce_banks_owned); |
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | 149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
151 | val &= ~MCI_CTL2_CMCI_EN; | 151 | val &= ~MCI_CTL2_CMCI_EN; |
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 195 | { |
196 | if (cmci_storm_detect()) | 196 | if (cmci_storm_detect()) |
197 | return; | 197 | return; |
198 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 199 | mce_notify_irq(); |
200 | } | 200 | } |
201 | 201 | ||
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void) | |||
206 | */ | 206 | */ |
207 | static void cmci_discover(int banks) | 207 | static void cmci_discover(int banks) |
208 | { | 208 | { |
209 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | 209 | unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | int i; | 211 | int i; |
212 | int bios_wrong_thresh = 0; | 212 | int bios_wrong_thresh = 0; |
@@ -228,7 +228,7 @@ static void cmci_discover(int banks) | |||
228 | /* Already owned by someone else? */ | 228 | /* Already owned by someone else? */ |
229 | if (val & MCI_CTL2_CMCI_EN) { | 229 | if (val & MCI_CTL2_CMCI_EN) { |
230 | clear_bit(i, owned); | 230 | clear_bit(i, owned); |
231 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 231 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
232 | continue; | 232 | continue; |
233 | } | 233 | } |
234 | 234 | ||
@@ -252,7 +252,7 @@ static void cmci_discover(int banks) | |||
252 | /* Did the enable bit stick? -- the bank supports CMCI */ | 252 | /* Did the enable bit stick? -- the bank supports CMCI */ |
253 | if (val & MCI_CTL2_CMCI_EN) { | 253 | if (val & MCI_CTL2_CMCI_EN) { |
254 | set_bit(i, owned); | 254 | set_bit(i, owned); |
255 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 255 | __clear_bit(i, this_cpu_ptr(mce_poll_banks)); |
256 | /* | 256 | /* |
257 | * We are able to set thresholds for some banks that | 257 | * We are able to set thresholds for some banks that |
258 | * had a threshold of 0. This means the BIOS has not | 258 | * had a threshold of 0. This means the BIOS has not |
@@ -263,7 +263,7 @@ static void cmci_discover(int banks) | |||
263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) | 263 | (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) |
264 | bios_wrong_thresh = 1; | 264 | bios_wrong_thresh = 1; |
265 | } else { | 265 | } else { |
266 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | 266 | WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); |
267 | } | 267 | } |
268 | } | 268 | } |
269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | 269 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
@@ -284,10 +284,10 @@ void cmci_recheck(void) | |||
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | int banks; | 285 | int banks; |
286 | 286 | ||
287 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 288 | return; |
289 | local_irq_save(flags); | 289 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
292 | } | 292 | } |
293 | 293 | ||
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank) | |||
296 | { | 296 | { |
297 | u64 val; | 297 | u64 val; |
298 | 298 | ||
299 | if (!test_bit(bank, __get_cpu_var(mce_banks_owned))) | 299 | if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) |
300 | return; | 300 | return; |
301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | 301 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
302 | val &= ~MCI_CTL2_CMCI_EN; | 302 | val &= ~MCI_CTL2_CMCI_EN; |
303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | 303 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
304 | __clear_bit(bank, __get_cpu_var(mce_banks_owned)); | 304 | __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 16c73022306e..1b8299dd3d91 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
494 | 494 | ||
495 | void x86_pmu_disable_all(void) | 495 | void x86_pmu_disable_all(void) |
496 | { | 496 | { |
497 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 497 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
498 | int idx; | 498 | int idx; |
499 | 499 | ||
500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 500 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void) | |||
512 | 512 | ||
513 | static void x86_pmu_disable(struct pmu *pmu) | 513 | static void x86_pmu_disable(struct pmu *pmu) |
514 | { | 514 | { |
515 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 515 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
516 | 516 | ||
517 | if (!x86_pmu_initialized()) | 517 | if (!x86_pmu_initialized()) |
518 | return; | 518 | return; |
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu) | |||
529 | 529 | ||
530 | void x86_pmu_enable_all(int added) | 530 | void x86_pmu_enable_all(int added) |
531 | { | 531 | { |
532 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 532 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
533 | int idx; | 533 | int idx; |
534 | 534 | ||
535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 535 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags); | |||
876 | 876 | ||
877 | static void x86_pmu_enable(struct pmu *pmu) | 877 | static void x86_pmu_enable(struct pmu *pmu) |
878 | { | 878 | { |
879 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 879 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
880 | struct perf_event *event; | 880 | struct perf_event *event; |
881 | struct hw_perf_event *hwc; | 881 | struct hw_perf_event *hwc; |
882 | int i, added = cpuc->n_added; | 882 | int i, added = cpuc->n_added; |
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event) | |||
1030 | */ | 1030 | */ |
1031 | static int x86_pmu_add(struct perf_event *event, int flags) | 1031 | static int x86_pmu_add(struct perf_event *event, int flags) |
1032 | { | 1032 | { |
1033 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1033 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1034 | struct hw_perf_event *hwc; | 1034 | struct hw_perf_event *hwc; |
1035 | int assign[X86_PMC_IDX_MAX]; | 1035 | int assign[X86_PMC_IDX_MAX]; |
1036 | int n, n0, ret; | 1036 | int n, n0, ret; |
@@ -1081,7 +1081,7 @@ out: | |||
1081 | 1081 | ||
1082 | static void x86_pmu_start(struct perf_event *event, int flags) | 1082 | static void x86_pmu_start(struct perf_event *event, int flags) |
1083 | { | 1083 | { |
1084 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1084 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1085 | int idx = event->hw.idx; | 1085 | int idx = event->hw.idx; |
1086 | 1086 | ||
1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | 1087 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void) | |||
1160 | 1160 | ||
1161 | void x86_pmu_stop(struct perf_event *event, int flags) | 1161 | void x86_pmu_stop(struct perf_event *event, int flags) |
1162 | { | 1162 | { |
1163 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1163 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1164 | struct hw_perf_event *hwc = &event->hw; | 1164 | struct hw_perf_event *hwc = &event->hw; |
1165 | 1165 | ||
1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { | 1166 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) | |||
1182 | 1182 | ||
1183 | static void x86_pmu_del(struct perf_event *event, int flags) | 1183 | static void x86_pmu_del(struct perf_event *event, int flags) |
1184 | { | 1184 | { |
1185 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1185 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1186 | int i; | 1186 | int i; |
1187 | 1187 | ||
1188 | /* | 1188 | /* |
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1237 | int idx, handled = 0; | 1237 | int idx, handled = 0; |
1238 | u64 val; | 1238 | u64 val; |
1239 | 1239 | ||
1240 | cpuc = &__get_cpu_var(cpu_hw_events); | 1240 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1241 | 1241 | ||
1242 | /* | 1242 | /* |
1243 | * Some chipsets need to unmask the LVTPC in a particular spot | 1243 | * Some chipsets need to unmask the LVTPC in a particular spot |
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) | |||
1646 | */ | 1646 | */ |
1647 | static int x86_pmu_commit_txn(struct pmu *pmu) | 1647 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1648 | { | 1648 | { |
1649 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1649 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1650 | int assign[X86_PMC_IDX_MAX]; | 1650 | int assign[X86_PMC_IDX_MAX]; |
1651 | int n, ret; | 1651 | int n, ret; |
1652 | 1652 | ||
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2005 | if (idx > GDT_ENTRIES) | 2005 | if (idx > GDT_ENTRIES) |
2006 | return 0; | 2006 | return 0; |
2007 | 2007 | ||
2008 | desc = __this_cpu_ptr(&gdt_page.gdt[0]); | 2008 | desc = raw_cpu_ptr(gdt_page.gdt); |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | return get_desc_base(desc + idx); | 2011 | return get_desc_base(desc + idx); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..28926311aac1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void) | |||
699 | 699 | ||
700 | void amd_pmu_enable_virt(void) | 700 | void amd_pmu_enable_virt(void) |
701 | { | 701 | { |
702 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 702 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
703 | 703 | ||
704 | cpuc->perf_ctr_virt_mask = 0; | 704 | cpuc->perf_ctr_virt_mask = 0; |
705 | 705 | ||
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |||
711 | 711 | ||
712 | void amd_pmu_disable_virt(void) | 712 | void amd_pmu_disable_virt(void) |
713 | { | 713 | { |
714 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 714 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
715 | 715 | ||
716 | /* | 716 | /* |
717 | * We only mask out the Host-only bit so that host-only counting works | 717 | * We only mask out the Host-only bit so that host-only counting works |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3851def5057c..a73947c53b65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | |||
1174 | 1174 | ||
1175 | static void intel_pmu_disable_all(void) | 1175 | static void intel_pmu_disable_all(void) |
1176 | { | 1176 | { |
1177 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1177 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1178 | 1178 | ||
1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1179 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1180 | 1180 | ||
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void) | |||
1187 | 1187 | ||
1188 | static void intel_pmu_enable_all(int added) | 1188 | static void intel_pmu_enable_all(int added) |
1189 | { | 1189 | { |
1190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1190 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1191 | 1191 | ||
1192 | intel_pmu_pebs_enable_all(); | 1192 | intel_pmu_pebs_enable_all(); |
1193 | intel_pmu_lbr_enable_all(); | 1193 | intel_pmu_lbr_enable_all(); |
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added) | |||
1221 | */ | 1221 | */ |
1222 | static void intel_pmu_nhm_workaround(void) | 1222 | static void intel_pmu_nhm_workaround(void) |
1223 | { | 1223 | { |
1224 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1224 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1225 | static const unsigned long nhm_magic[4] = { | 1225 | static const unsigned long nhm_magic[4] = { |
1226 | 0x4300B5, | 1226 | 0x4300B5, |
1227 | 0x4300D2, | 1227 | 0x4300D2, |
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event) | |||
1320 | static void intel_pmu_disable_event(struct perf_event *event) | 1320 | static void intel_pmu_disable_event(struct perf_event *event) |
1321 | { | 1321 | { |
1322 | struct hw_perf_event *hwc = &event->hw; | 1322 | struct hw_perf_event *hwc = &event->hw; |
1323 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1323 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1324 | 1324 | ||
1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1325 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1326 | intel_pmu_disable_bts(); | 1326 | intel_pmu_disable_bts(); |
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
1384 | static void intel_pmu_enable_event(struct perf_event *event) | 1384 | static void intel_pmu_enable_event(struct perf_event *event) |
1385 | { | 1385 | { |
1386 | struct hw_perf_event *hwc = &event->hw; | 1386 | struct hw_perf_event *hwc = &event->hw; |
1387 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1387 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1388 | 1388 | ||
1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { | 1389 | if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { |
1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 1390 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1478 | u64 status; | 1478 | u64 status; |
1479 | int handled; | 1479 | int handled; |
1480 | 1480 | ||
1481 | cpuc = &__get_cpu_var(cpu_hw_events); | 1481 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1482 | 1482 | ||
1483 | /* | 1483 | /* |
1484 | * No known reason to not always do late ACK, | 1484 | * No known reason to not always do late ACK, |
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | |||
1910 | 1910 | ||
1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | 1911 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) |
1912 | { | 1912 | { |
1913 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1913 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1914 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1915 | 1915 | ||
1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | 1916 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; |
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | |||
1931 | 1931 | ||
1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | 1932 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) |
1933 | { | 1933 | { |
1934 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1934 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | 1935 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; |
1936 | int idx; | 1936 | int idx; |
1937 | 1937 | ||
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event) | |||
1965 | 1965 | ||
1966 | static void core_pmu_enable_all(int added) | 1966 | static void core_pmu_enable_all(int added) |
1967 | { | 1967 | { |
1968 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1968 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1969 | int idx; | 1969 | int idx; |
1970 | 1970 | ||
1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1971 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b1553d05a5cb..46211bcc813e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config) | |||
474 | 474 | ||
475 | void intel_pmu_disable_bts(void) | 475 | void intel_pmu_disable_bts(void) |
476 | { | 476 | { |
477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 477 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
478 | unsigned long debugctlmsr; | 478 | unsigned long debugctlmsr; |
479 | 479 | ||
480 | if (!cpuc->ds) | 480 | if (!cpuc->ds) |
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void) | |||
491 | 491 | ||
492 | int intel_pmu_drain_bts_buffer(void) | 492 | int intel_pmu_drain_bts_buffer(void) |
493 | { | 493 | { |
494 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 494 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
495 | struct debug_store *ds = cpuc->ds; | 495 | struct debug_store *ds = cpuc->ds; |
496 | struct bts_record { | 496 | struct bts_record { |
497 | u64 from; | 497 | u64 from; |
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) | |||
669 | 669 | ||
670 | void intel_pmu_pebs_enable(struct perf_event *event) | 670 | void intel_pmu_pebs_enable(struct perf_event *event) |
671 | { | 671 | { |
672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 672 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
673 | struct hw_perf_event *hwc = &event->hw; | 673 | struct hw_perf_event *hwc = &event->hw; |
674 | 674 | ||
675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 675 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
684 | 684 | ||
685 | void intel_pmu_pebs_disable(struct perf_event *event) | 685 | void intel_pmu_pebs_disable(struct perf_event *event) |
686 | { | 686 | { |
687 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 687 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
688 | struct hw_perf_event *hwc = &event->hw; | 688 | struct hw_perf_event *hwc = &event->hw; |
689 | 689 | ||
690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); | 690 | cpuc->pebs_enabled &= ~(1ULL << hwc->idx); |
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
702 | 702 | ||
703 | void intel_pmu_pebs_enable_all(void) | 703 | void intel_pmu_pebs_enable_all(void) |
704 | { | 704 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 705 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
706 | 706 | ||
707 | if (cpuc->pebs_enabled) | 707 | if (cpuc->pebs_enabled) |
708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 708 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void) | |||
710 | 710 | ||
711 | void intel_pmu_pebs_disable_all(void) | 711 | void intel_pmu_pebs_disable_all(void) |
712 | { | 712 | { |
713 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 713 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
714 | 714 | ||
715 | if (cpuc->pebs_enabled) | 715 | if (cpuc->pebs_enabled) |
716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | 716 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void) | |||
718 | 718 | ||
719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | 719 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
720 | { | 720 | { |
721 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 721 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
722 | unsigned long from = cpuc->lbr_entries[0].from; | 722 | unsigned long from = cpuc->lbr_entries[0].from; |
723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; | 723 | unsigned long old_to, to = cpuc->lbr_entries[0].to; |
724 | unsigned long ip = regs->ip; | 724 | unsigned long ip = regs->ip; |
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
829 | * We cast to the biggest pebs_record but are careful not to | 829 | * We cast to the biggest pebs_record but are careful not to |
830 | * unconditionally access the 'extra' entries. | 830 | * unconditionally access the 'extra' entries. |
831 | */ | 831 | */ |
832 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 832 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
833 | struct pebs_record_hsw *pebs = __pebs; | 833 | struct pebs_record_hsw *pebs = __pebs; |
834 | struct perf_sample_data data; | 834 | struct perf_sample_data data; |
835 | struct pt_regs regs; | 835 | struct pt_regs regs; |
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
916 | 916 | ||
917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 917 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
918 | { | 918 | { |
919 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 919 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
920 | struct debug_store *ds = cpuc->ds; | 920 | struct debug_store *ds = cpuc->ds; |
921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ | 921 | struct perf_event *event = cpuc->events[0]; /* PMC0 only */ |
922 | struct pebs_record_core *at, *top; | 922 | struct pebs_record_core *at, *top; |
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | |||
957 | 957 | ||
958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) | 958 | static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
959 | { | 959 | { |
960 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 960 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
961 | struct debug_store *ds = cpuc->ds; | 961 | struct debug_store *ds = cpuc->ds; |
962 | struct perf_event *event = NULL; | 962 | struct perf_event *event = NULL; |
963 | void *at, *top; | 963 | void *at, *top; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 4af10617de33..45fa730a5283 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |||
133 | static void __intel_pmu_lbr_enable(void) | 133 | static void __intel_pmu_lbr_enable(void) |
134 | { | 134 | { |
135 | u64 debugctl; | 135 | u64 debugctl; |
136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
137 | 137 | ||
138 | if (cpuc->lbr_sel) | 138 | if (cpuc->lbr_sel) |
139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | 139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); |
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void) | |||
183 | 183 | ||
184 | void intel_pmu_lbr_enable(struct perf_event *event) | 184 | void intel_pmu_lbr_enable(struct perf_event *event) |
185 | { | 185 | { |
186 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 186 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
187 | 187 | ||
188 | if (!x86_pmu.lbr_nr) | 188 | if (!x86_pmu.lbr_nr) |
189 | return; | 189 | return; |
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
203 | 203 | ||
204 | void intel_pmu_lbr_disable(struct perf_event *event) | 204 | void intel_pmu_lbr_disable(struct perf_event *event) |
205 | { | 205 | { |
206 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 206 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
207 | 207 | ||
208 | if (!x86_pmu.lbr_nr) | 208 | if (!x86_pmu.lbr_nr) |
209 | return; | 209 | return; |
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event) | |||
220 | 220 | ||
221 | void intel_pmu_lbr_enable_all(void) | 221 | void intel_pmu_lbr_enable_all(void) |
222 | { | 222 | { |
223 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 223 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
224 | 224 | ||
225 | if (cpuc->lbr_users) | 225 | if (cpuc->lbr_users) |
226 | __intel_pmu_lbr_enable(); | 226 | __intel_pmu_lbr_enable(); |
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void) | |||
228 | 228 | ||
229 | void intel_pmu_lbr_disable_all(void) | 229 | void intel_pmu_lbr_disable_all(void) |
230 | { | 230 | { |
231 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 231 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
232 | 232 | ||
233 | if (cpuc->lbr_users) | 233 | if (cpuc->lbr_users) |
234 | __intel_pmu_lbr_disable(); | 234 | __intel_pmu_lbr_disable(); |
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
332 | 332 | ||
333 | void intel_pmu_lbr_read(void) | 333 | void intel_pmu_lbr_read(void) |
334 | { | 334 | { |
335 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 335 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
336 | 336 | ||
337 | if (!cpuc->lbr_users) | 337 | if (!cpuc->lbr_users) |
338 | return; | 338 | return; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 619f7699487a..d64f275fe274 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v) | |||
135 | * or use ldexp(count, -32). | 135 | * or use ldexp(count, -32). |
136 | * Watts = Joules/Time delta | 136 | * Watts = Joules/Time delta |
137 | */ | 137 | */ |
138 | return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); | 138 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); |
139 | } | 139 | } |
140 | 140 | ||
141 | static u64 rapl_event_update(struct perf_event *event) | 141 | static u64 rapl_event_update(struct perf_event *event) |
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu) | |||
187 | 187 | ||
188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) | 188 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) |
189 | { | 189 | { |
190 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 190 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
191 | struct perf_event *event; | 191 | struct perf_event *event; |
192 | unsigned long flags; | 192 | unsigned long flags; |
193 | 193 | ||
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, | |||
234 | 234 | ||
235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) | 235 | static void rapl_pmu_event_start(struct perf_event *event, int mode) |
236 | { | 236 | { |
237 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 237 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
238 | unsigned long flags; | 238 | unsigned long flags; |
239 | 239 | ||
240 | spin_lock_irqsave(&pmu->lock, flags); | 240 | spin_lock_irqsave(&pmu->lock, flags); |
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode) | |||
244 | 244 | ||
245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) | 245 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) |
246 | { | 246 | { |
247 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 247 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
248 | struct hw_perf_event *hwc = &event->hw; | 248 | struct hw_perf_event *hwc = &event->hw; |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) | |||
278 | 278 | ||
279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) | 279 | static int rapl_pmu_event_add(struct perf_event *event, int mode) |
280 | { | 280 | { |
281 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | 281 | struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); |
282 | struct hw_perf_event *hwc = &event->hw; | 282 | struct hw_perf_event *hwc = &event->hw; |
283 | unsigned long flags; | 283 | unsigned long flags; |
284 | 284 | ||
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void) | |||
696 | return -1; | 696 | return -1; |
697 | } | 697 | } |
698 | 698 | ||
699 | pmu = __get_cpu_var(rapl_pmu); | 699 | pmu = __this_cpu_read(rapl_pmu); |
700 | 700 | ||
701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," | 701 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," |
702 | " API unit is 2^-32 Joules," | 702 | " API unit is 2^-32 Joules," |
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c index 838fa8772c62..5b0c232d1ee6 100644 --- a/arch/x86/kernel/cpu/perf_event_knc.c +++ b/arch/x86/kernel/cpu/perf_event_knc.c | |||
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs) | |||
217 | int bit, loops; | 217 | int bit, loops; |
218 | u64 status; | 218 | u64 status; |
219 | 219 | ||
220 | cpuc = &__get_cpu_var(cpu_hw_events); | 220 | cpuc = this_cpu_ptr(&cpu_hw_events); |
221 | 221 | ||
222 | knc_pmu_disable_all(); | 222 | knc_pmu_disable_all(); |
223 | 223 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 5d466b7d8609..f2e56783af3d 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
915 | 915 | ||
916 | static void p4_pmu_disable_all(void) | 916 | static void p4_pmu_disable_all(void) |
917 | { | 917 | { |
918 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 918 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
919 | int idx; | 919 | int idx; |
920 | 920 | ||
921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 921 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
984 | 984 | ||
985 | static void p4_pmu_enable_all(int added) | 985 | static void p4_pmu_enable_all(int added) |
986 | { | 986 | { |
987 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
988 | int idx; | 988 | int idx; |
989 | 989 | ||
990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 990 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
1004 | int idx, handled = 0; | 1004 | int idx, handled = 0; |
1005 | u64 val; | 1005 | u64 val; |
1006 | 1006 | ||
1007 | cpuc = &__get_cpu_var(cpu_hw_events); | 1007 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1008 | 1008 | ||
1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1009 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1010 | int overflow; | 1010 | int overflow; |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a618fcd2c07d..f5ab56d14287 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -237,7 +237,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, | |||
237 | ced->max_nr_ranges++; | 237 | ced->max_nr_ranges++; |
238 | 238 | ||
239 | /* If crashk_low_res is not 0, another range split possible */ | 239 | /* If crashk_low_res is not 0, another range split possible */ |
240 | if (crashk_low_res.end != 0) | 240 | if (crashk_low_res.end) |
241 | ced->max_nr_ranges++; | 241 | ced->max_nr_ranges++; |
242 | } | 242 | } |
243 | 243 | ||
@@ -335,9 +335,11 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced, | |||
335 | if (ret) | 335 | if (ret) |
336 | return ret; | 336 | return ret; |
337 | 337 | ||
338 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); | 338 | if (crashk_low_res.end) { |
339 | if (ret) | 339 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); |
340 | return ret; | 340 | if (ret) |
341 | return ret; | ||
342 | } | ||
341 | 343 | ||
342 | /* Exclude GART region */ | 344 | /* Exclude GART region */ |
343 | if (ced->gart_end) { | 345 | if (ced->gart_end) { |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 4b0e1dfa2226..b553ed89e5f5 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -449,12 +449,11 @@ sysenter_audit: | |||
449 | jnz syscall_trace_entry | 449 | jnz syscall_trace_entry |
450 | addl $4,%esp | 450 | addl $4,%esp |
451 | CFI_ADJUST_CFA_OFFSET -4 | 451 | CFI_ADJUST_CFA_OFFSET -4 |
452 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | 452 | movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ |
453 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | 453 | movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ |
454 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | 454 | /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ |
455 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | 455 | movl %ebx,%edx /* 2nd arg: 1st syscall arg */ |
456 | movl %eax,%edx /* 2nd arg: syscall number */ | 456 | /* %eax already in %eax 1st arg: syscall number */ |
457 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | ||
458 | call __audit_syscall_entry | 457 | call __audit_syscall_entry |
459 | pushl_cfi %ebx | 458 | pushl_cfi %ebx |
460 | movl PT_EAX(%esp),%eax /* reload syscall number */ | 459 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 5f9cf20cdb68..3d5fb509bdeb 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
108 | int i; | 108 | int i; |
109 | 109 | ||
110 | for (i = 0; i < HBP_NUM; i++) { | 110 | for (i = 0; i < HBP_NUM; i++) { |
111 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 111 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
112 | 112 | ||
113 | if (!*slot) { | 113 | if (!*slot) { |
114 | *slot = bp; | 114 | *slot = bp; |
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
122 | set_debugreg(info->address, i); | 122 | set_debugreg(info->address, i); |
123 | __this_cpu_write(cpu_debugreg[i], info->address); | 123 | __this_cpu_write(cpu_debugreg[i], info->address); |
124 | 124 | ||
125 | dr7 = &__get_cpu_var(cpu_dr7); | 125 | dr7 = this_cpu_ptr(&cpu_dr7); |
126 | *dr7 |= encode_dr7(i, info->len, info->type); | 126 | *dr7 |= encode_dr7(i, info->len, info->type); |
127 | 127 | ||
128 | set_debugreg(*dr7, 7); | 128 | set_debugreg(*dr7, 7); |
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | for (i = 0; i < HBP_NUM; i++) { | 148 | for (i = 0; i < HBP_NUM; i++) { |
149 | struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); | 149 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); |
150 | 150 | ||
151 | if (*slot == bp) { | 151 | if (*slot == bp) { |
152 | *slot = NULL; | 152 | *slot = NULL; |
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) | 157 | if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) |
158 | return; | 158 | return; |
159 | 159 | ||
160 | dr7 = &__get_cpu_var(cpu_dr7); | 160 | dr7 = this_cpu_ptr(&cpu_dr7); |
161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); | 161 | *dr7 &= ~__encode_dr7(i, info->len, info->type); |
162 | 162 | ||
163 | set_debugreg(*dr7, 7); | 163 | set_debugreg(*dr7, 7); |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 4d1c746892eb..e4b503d5558c 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
52 | regs->sp <= curbase + THREAD_SIZE) | 52 | regs->sp <= curbase + THREAD_SIZE) |
53 | return; | 53 | return; |
54 | 54 | ||
55 | irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) + | 55 | irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) + |
56 | STACK_TOP_MARGIN; | 56 | STACK_TOP_MARGIN; |
57 | irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr); | 57 | irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); |
58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) | 58 | if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom) |
59 | return; | 59 | return; |
60 | 60 | ||
61 | oist = &__get_cpu_var(orig_ist); | 61 | oist = this_cpu_ptr(&orig_ist); |
62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; | 62 | estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN; |
63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; | 63 | estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1]; |
64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) | 64 | if (regs->sp >= estack_top && regs->sp <= estack_bottom) |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 9642b9b33655..ca05f86481aa 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include <asm/crash.h> | 27 | #include <asm/crash.h> |
28 | #include <asm/efi.h> | 28 | #include <asm/efi.h> |
29 | #include <asm/kexec-bzimage64.h> | ||
29 | 30 | ||
30 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ | 31 | #define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */ |
31 | 32 | ||
@@ -267,7 +268,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, | |||
267 | return ret; | 268 | return ret; |
268 | } | 269 | } |
269 | 270 | ||
270 | int bzImage64_probe(const char *buf, unsigned long len) | 271 | static int bzImage64_probe(const char *buf, unsigned long len) |
271 | { | 272 | { |
272 | int ret = -ENOEXEC; | 273 | int ret = -ENOEXEC; |
273 | struct setup_header *header; | 274 | struct setup_header *header; |
@@ -325,10 +326,10 @@ int bzImage64_probe(const char *buf, unsigned long len) | |||
325 | return ret; | 326 | return ret; |
326 | } | 327 | } |
327 | 328 | ||
328 | void *bzImage64_load(struct kimage *image, char *kernel, | 329 | static void *bzImage64_load(struct kimage *image, char *kernel, |
329 | unsigned long kernel_len, char *initrd, | 330 | unsigned long kernel_len, char *initrd, |
330 | unsigned long initrd_len, char *cmdline, | 331 | unsigned long initrd_len, char *cmdline, |
331 | unsigned long cmdline_len) | 332 | unsigned long cmdline_len) |
332 | { | 333 | { |
333 | 334 | ||
334 | struct setup_header *header; | 335 | struct setup_header *header; |
@@ -514,7 +515,7 @@ out_free_params: | |||
514 | } | 515 | } |
515 | 516 | ||
516 | /* This cleanup function is called after various segments have been loaded */ | 517 | /* This cleanup function is called after various segments have been loaded */ |
517 | int bzImage64_cleanup(void *loader_data) | 518 | static int bzImage64_cleanup(void *loader_data) |
518 | { | 519 | { |
519 | struct bzimage64_data *ldata = loader_data; | 520 | struct bzimage64_data *ldata = loader_data; |
520 | 521 | ||
@@ -528,7 +529,7 @@ int bzImage64_cleanup(void *loader_data) | |||
528 | } | 529 | } |
529 | 530 | ||
530 | #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG | 531 | #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG |
531 | int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) | 532 | static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) |
532 | { | 533 | { |
533 | bool trusted; | 534 | bool trusted; |
534 | int ret; | 535 | int ret; |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3dd8e2c4d74a..f6945bef2cd1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/kprobes.h> | 36 | #include <linux/kprobes.h> |
37 | #include <linux/debugfs.h> | 37 | #include <linux/debugfs.h> |
38 | #include <linux/nmi.h> | ||
38 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
39 | #include <asm/cpu.h> | 40 | #include <asm/cpu.h> |
40 | #include <asm/traps.h> | 41 | #include <asm/traps.h> |
@@ -243,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void) | |||
243 | { | 244 | { |
244 | u32 reason = 0; | 245 | u32 reason = 0; |
245 | 246 | ||
246 | if (__get_cpu_var(apf_reason).enabled) { | 247 | if (__this_cpu_read(apf_reason.enabled)) { |
247 | reason = __get_cpu_var(apf_reason).reason; | 248 | reason = __this_cpu_read(apf_reason.reason); |
248 | __get_cpu_var(apf_reason).reason = 0; | 249 | __this_cpu_write(apf_reason.reason, 0); |
249 | } | 250 | } |
250 | 251 | ||
251 | return reason; | 252 | return reason; |
@@ -318,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
318 | * there's no need for lock or memory barriers. | 319 | * there's no need for lock or memory barriers. |
319 | * An optimization barrier is implied in apic write. | 320 | * An optimization barrier is implied in apic write. |
320 | */ | 321 | */ |
321 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | 322 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) |
322 | return; | 323 | return; |
323 | apic_write(APIC_EOI, APIC_EOI_ACK); | 324 | apic_write(APIC_EOI, APIC_EOI_ACK); |
324 | } | 325 | } |
@@ -329,13 +330,13 @@ void kvm_guest_cpu_init(void) | |||
329 | return; | 330 | return; |
330 | 331 | ||
331 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | 332 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { |
332 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); | 333 | u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); |
333 | 334 | ||
334 | #ifdef CONFIG_PREEMPT | 335 | #ifdef CONFIG_PREEMPT |
335 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | 336 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; |
336 | #endif | 337 | #endif |
337 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); | 338 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
338 | __get_cpu_var(apf_reason).enabled = 1; | 339 | __this_cpu_write(apf_reason.enabled, 1); |
339 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | 340 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
340 | smp_processor_id()); | 341 | smp_processor_id()); |
341 | } | 342 | } |
@@ -344,8 +345,8 @@ void kvm_guest_cpu_init(void) | |||
344 | unsigned long pa; | 345 | unsigned long pa; |
345 | /* Size alignment is implied but just to make it explicit. */ | 346 | /* Size alignment is implied but just to make it explicit. */ |
346 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | 347 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); |
347 | __get_cpu_var(kvm_apic_eoi) = 0; | 348 | __this_cpu_write(kvm_apic_eoi, 0); |
348 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) | 349 | pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) |
349 | | KVM_MSR_ENABLED; | 350 | | KVM_MSR_ENABLED; |
350 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); | 351 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
351 | } | 352 | } |
@@ -356,11 +357,11 @@ void kvm_guest_cpu_init(void) | |||
356 | 357 | ||
357 | static void kvm_pv_disable_apf(void) | 358 | static void kvm_pv_disable_apf(void) |
358 | { | 359 | { |
359 | if (!__get_cpu_var(apf_reason).enabled) | 360 | if (!__this_cpu_read(apf_reason.enabled)) |
360 | return; | 361 | return; |
361 | 362 | ||
362 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | 363 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); |
363 | __get_cpu_var(apf_reason).enabled = 0; | 364 | __this_cpu_write(apf_reason.enabled, 0); |
364 | 365 | ||
365 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | 366 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", |
366 | smp_processor_id()); | 367 | smp_processor_id()); |
@@ -499,6 +500,13 @@ void __init kvm_guest_init(void) | |||
499 | #else | 500 | #else |
500 | kvm_guest_cpu_init(); | 501 | kvm_guest_cpu_init(); |
501 | #endif | 502 | #endif |
503 | |||
504 | /* | ||
505 | * Hard lockup detection is enabled by default. Disable it, as guests | ||
506 | * can get false positives too easily, for example if the host is | ||
507 | * overcommitted. | ||
508 | */ | ||
509 | watchdog_enable_hardlockup_detector(false); | ||
502 | } | 510 | } |
503 | 511 | ||
504 | static noinline uint32_t __kvm_cpuid_base(void) | 512 | static noinline uint32_t __kvm_cpuid_base(void) |
@@ -716,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | |||
716 | if (in_nmi()) | 724 | if (in_nmi()) |
717 | return; | 725 | return; |
718 | 726 | ||
719 | w = &__get_cpu_var(klock_waiting); | 727 | w = this_cpu_ptr(&klock_waiting); |
720 | cpu = smp_processor_id(); | 728 | cpu = smp_processor_id(); |
721 | start = spin_time_start(); | 729 | start = spin_time_start(); |
722 | 730 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 29576c244699..749b0e423419 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1445,12 +1445,12 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) | |||
1445 | { | 1445 | { |
1446 | #ifdef CONFIG_X86_64 | 1446 | #ifdef CONFIG_X86_64 |
1447 | if (arch == AUDIT_ARCH_X86_64) { | 1447 | if (arch == AUDIT_ARCH_X86_64) { |
1448 | audit_syscall_entry(arch, regs->orig_ax, regs->di, | 1448 | audit_syscall_entry(regs->orig_ax, regs->di, |
1449 | regs->si, regs->dx, regs->r10); | 1449 | regs->si, regs->dx, regs->r10); |
1450 | } else | 1450 | } else |
1451 | #endif | 1451 | #endif |
1452 | { | 1452 | { |
1453 | audit_syscall_entry(arch, regs->orig_ax, regs->bx, | 1453 | audit_syscall_entry(regs->orig_ax, regs->bx, |
1454 | regs->cx, regs->dx, regs->si); | 1454 | regs->cx, regs->dx, regs->si); |
1455 | } | 1455 | } |
1456 | } | 1456 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f7f6a4a157a6..65510f624dfe 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void) | |||
670 | 670 | ||
671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { | 671 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); | 672 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
673 | __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; | 673 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
674 | } | 674 | } |
675 | 675 | ||
676 | 676 | ||
@@ -1313,8 +1313,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1313 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); | 1313 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
1314 | 1314 | ||
1315 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && | 1315 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && |
1316 | svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { | 1316 | svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
1317 | __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; | 1317 | __this_cpu_write(current_tsc_ratio, svm->tsc_ratio); |
1318 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); | 1318 | wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); |
1319 | } | 1319 | } |
1320 | } | 1320 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 04fa1b8298c8..0acac81f198b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -472,6 +472,7 @@ struct vcpu_vmx { | |||
472 | int gs_ldt_reload_needed; | 472 | int gs_ldt_reload_needed; |
473 | int fs_reload_needed; | 473 | int fs_reload_needed; |
474 | u64 msr_host_bndcfgs; | 474 | u64 msr_host_bndcfgs; |
475 | unsigned long vmcs_host_cr4; /* May not match real cr4 */ | ||
475 | } host_state; | 476 | } host_state; |
476 | struct { | 477 | struct { |
477 | int vm86_active; | 478 | int vm86_active; |
@@ -1626,7 +1627,7 @@ static void reload_tss(void) | |||
1626 | /* | 1627 | /* |
1627 | * VT restores TR but not its size. Useless. | 1628 | * VT restores TR but not its size. Useless. |
1628 | */ | 1629 | */ |
1629 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1630 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1630 | struct desc_struct *descs; | 1631 | struct desc_struct *descs; |
1631 | 1632 | ||
1632 | descs = (void *)gdt->address; | 1633 | descs = (void *)gdt->address; |
@@ -1672,7 +1673,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) | |||
1672 | 1673 | ||
1673 | static unsigned long segment_base(u16 selector) | 1674 | static unsigned long segment_base(u16 selector) |
1674 | { | 1675 | { |
1675 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1676 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1676 | struct desc_struct *d; | 1677 | struct desc_struct *d; |
1677 | unsigned long table_base; | 1678 | unsigned long table_base; |
1678 | unsigned long v; | 1679 | unsigned long v; |
@@ -1802,7 +1803,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
1802 | */ | 1803 | */ |
1803 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) | 1804 | if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) |
1804 | stts(); | 1805 | stts(); |
1805 | load_gdt(&__get_cpu_var(host_gdt)); | 1806 | load_gdt(this_cpu_ptr(&host_gdt)); |
1806 | } | 1807 | } |
1807 | 1808 | ||
1808 | static void vmx_load_host_state(struct vcpu_vmx *vmx) | 1809 | static void vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -1832,7 +1833,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1832 | } | 1833 | } |
1833 | 1834 | ||
1834 | if (vmx->loaded_vmcs->cpu != cpu) { | 1835 | if (vmx->loaded_vmcs->cpu != cpu) { |
1835 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); | 1836 | struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); |
1836 | unsigned long sysenter_esp; | 1837 | unsigned long sysenter_esp; |
1837 | 1838 | ||
1838 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | 1839 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
@@ -2771,7 +2772,7 @@ static int hardware_enable(void) | |||
2771 | ept_sync_global(); | 2772 | ept_sync_global(); |
2772 | } | 2773 | } |
2773 | 2774 | ||
2774 | native_store_gdt(&__get_cpu_var(host_gdt)); | 2775 | native_store_gdt(this_cpu_ptr(&host_gdt)); |
2775 | 2776 | ||
2776 | return 0; | 2777 | return 0; |
2777 | } | 2778 | } |
@@ -4267,11 +4268,16 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) | |||
4267 | u32 low32, high32; | 4268 | u32 low32, high32; |
4268 | unsigned long tmpl; | 4269 | unsigned long tmpl; |
4269 | struct desc_ptr dt; | 4270 | struct desc_ptr dt; |
4271 | unsigned long cr4; | ||
4270 | 4272 | ||
4271 | vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ | 4273 | vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ |
4272 | vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ | ||
4273 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ | 4274 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ |
4274 | 4275 | ||
4276 | /* Save the most likely value for this task's CR4 in the VMCS. */ | ||
4277 | cr4 = read_cr4(); | ||
4278 | vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ | ||
4279 | vmx->host_state.vmcs_host_cr4 = cr4; | ||
4280 | |||
4275 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | 4281 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
4276 | #ifdef CONFIG_X86_64 | 4282 | #ifdef CONFIG_X86_64 |
4277 | /* | 4283 | /* |
@@ -7514,7 +7520,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) | |||
7514 | static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | 7520 | static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
7515 | { | 7521 | { |
7516 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7522 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7517 | unsigned long debugctlmsr; | 7523 | unsigned long debugctlmsr, cr4; |
7518 | 7524 | ||
7519 | /* Record the guest's net vcpu time for enforced NMI injections. */ | 7525 | /* Record the guest's net vcpu time for enforced NMI injections. */ |
7520 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) | 7526 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) |
@@ -7540,6 +7546,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
7540 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) | 7546 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) |
7541 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | 7547 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); |
7542 | 7548 | ||
7549 | cr4 = read_cr4(); | ||
7550 | if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { | ||
7551 | vmcs_writel(HOST_CR4, cr4); | ||
7552 | vmx->host_state.vmcs_host_cr4 = cr4; | ||
7553 | } | ||
7554 | |||
7543 | /* When single-stepping over STI and MOV SS, we must clear the | 7555 | /* When single-stepping over STI and MOV SS, we must clear the |
7544 | * corresponding interruptibility bits in the guest state. Otherwise | 7556 | * corresponding interruptibility bits in the guest state. Otherwise |
7545 | * vmentry fails as it then expects bit 14 (BS) in pending debug | 7557 | * vmentry fails as it then expects bit 14 (BS) in pending debug |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5430e4b0af29..34c8f94331f8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1559,7 +1559,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1559 | 1559 | ||
1560 | /* Keep irq disabled to prevent changes to the clock */ | 1560 | /* Keep irq disabled to prevent changes to the clock */ |
1561 | local_irq_save(flags); | 1561 | local_irq_save(flags); |
1562 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | 1562 | this_tsc_khz = __this_cpu_read(cpu_tsc_khz); |
1563 | if (unlikely(this_tsc_khz == 0)) { | 1563 | if (unlikely(this_tsc_khz == 0)) { |
1564 | local_irq_restore(flags); | 1564 | local_irq_restore(flags); |
1565 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | 1565 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index baff1da354e0..af78e50ca6ce 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -86,6 +86,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
86 | pgprot_t prot; | 86 | pgprot_t prot; |
87 | int retval; | 87 | int retval; |
88 | void __iomem *ret_addr; | 88 | void __iomem *ret_addr; |
89 | int ram_region; | ||
89 | 90 | ||
90 | /* Don't allow wraparound or zero size */ | 91 | /* Don't allow wraparound or zero size */ |
91 | last_addr = phys_addr + size - 1; | 92 | last_addr = phys_addr + size - 1; |
@@ -108,12 +109,23 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
108 | /* | 109 | /* |
109 | * Don't allow anybody to remap normal RAM that we're using.. | 110 | * Don't allow anybody to remap normal RAM that we're using.. |
110 | */ | 111 | */ |
111 | pfn = phys_addr >> PAGE_SHIFT; | 112 | /* First check if whole region can be identified as RAM or not */ |
112 | last_pfn = last_addr >> PAGE_SHIFT; | 113 | ram_region = region_is_ram(phys_addr, size); |
113 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | 114 | if (ram_region > 0) { |
114 | __ioremap_check_ram) == 1) | 115 | WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", |
116 | (unsigned long int)phys_addr, | ||
117 | (unsigned long int)last_addr); | ||
115 | return NULL; | 118 | return NULL; |
119 | } | ||
116 | 120 | ||
121 | /* If could not be identified(-1), check page by page */ | ||
122 | if (ram_region < 0) { | ||
123 | pfn = phys_addr >> PAGE_SHIFT; | ||
124 | last_pfn = last_addr >> PAGE_SHIFT; | ||
125 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | ||
126 | __ioremap_check_ram) == 1) | ||
127 | return NULL; | ||
128 | } | ||
117 | /* | 129 | /* |
118 | * Mappings have to be page-aligned | 130 | * Mappings have to be page-aligned |
119 | */ | 131 | */ |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index dd89a13f1051..b4f2e7e9e907 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -140,7 +140,7 @@ static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); | |||
140 | 140 | ||
141 | bool kmemcheck_active(struct pt_regs *regs) | 141 | bool kmemcheck_active(struct pt_regs *regs) |
142 | { | 142 | { |
143 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 143 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
144 | 144 | ||
145 | return data->balance > 0; | 145 | return data->balance > 0; |
146 | } | 146 | } |
@@ -148,7 +148,7 @@ bool kmemcheck_active(struct pt_regs *regs) | |||
148 | /* Save an address that needs to be shown/hidden */ | 148 | /* Save an address that needs to be shown/hidden */ |
149 | static void kmemcheck_save_addr(unsigned long addr) | 149 | static void kmemcheck_save_addr(unsigned long addr) |
150 | { | 150 | { |
151 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 151 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
152 | 152 | ||
153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); | 153 | BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); |
154 | data->addr[data->n_addrs++] = addr; | 154 | data->addr[data->n_addrs++] = addr; |
@@ -156,7 +156,7 @@ static void kmemcheck_save_addr(unsigned long addr) | |||
156 | 156 | ||
157 | static unsigned int kmemcheck_show_all(void) | 157 | static unsigned int kmemcheck_show_all(void) |
158 | { | 158 | { |
159 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 159 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
160 | unsigned int i; | 160 | unsigned int i; |
161 | unsigned int n; | 161 | unsigned int n; |
162 | 162 | ||
@@ -169,7 +169,7 @@ static unsigned int kmemcheck_show_all(void) | |||
169 | 169 | ||
170 | static unsigned int kmemcheck_hide_all(void) | 170 | static unsigned int kmemcheck_hide_all(void) |
171 | { | 171 | { |
172 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 172 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
173 | unsigned int i; | 173 | unsigned int i; |
174 | unsigned int n; | 174 | unsigned int n; |
175 | 175 | ||
@@ -185,7 +185,7 @@ static unsigned int kmemcheck_hide_all(void) | |||
185 | */ | 185 | */ |
186 | void kmemcheck_show(struct pt_regs *regs) | 186 | void kmemcheck_show(struct pt_regs *regs) |
187 | { | 187 | { |
188 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 188 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
189 | 189 | ||
190 | BUG_ON(!irqs_disabled()); | 190 | BUG_ON(!irqs_disabled()); |
191 | 191 | ||
@@ -226,7 +226,7 @@ void kmemcheck_show(struct pt_regs *regs) | |||
226 | */ | 226 | */ |
227 | void kmemcheck_hide(struct pt_regs *regs) | 227 | void kmemcheck_hide(struct pt_regs *regs) |
228 | { | 228 | { |
229 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 229 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
230 | int n; | 230 | int n; |
231 | 231 | ||
232 | BUG_ON(!irqs_disabled()); | 232 | BUG_ON(!irqs_disabled()); |
@@ -528,7 +528,7 @@ static void kmemcheck_access(struct pt_regs *regs, | |||
528 | const uint8_t *insn_primary; | 528 | const uint8_t *insn_primary; |
529 | unsigned int size; | 529 | unsigned int size; |
530 | 530 | ||
531 | struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); | 531 | struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); |
532 | 532 | ||
533 | /* Recursive fault -- ouch. */ | 533 | /* Recursive fault -- ouch. */ |
534 | if (data->busy) { | 534 | if (data->busy) { |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index d221374d5ce8..1a883705a12a 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -463,6 +463,42 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |||
463 | return true; | 463 | return true; |
464 | } | 464 | } |
465 | 465 | ||
466 | static void __init numa_clear_kernel_node_hotplug(void) | ||
467 | { | ||
468 | int i, nid; | ||
469 | nodemask_t numa_kernel_nodes = NODE_MASK_NONE; | ||
470 | unsigned long start, end; | ||
471 | struct memblock_region *r; | ||
472 | |||
473 | /* | ||
474 | * At this time, all memory regions reserved by memblock are | ||
475 | * used by the kernel. Set the nid in memblock.reserved will | ||
476 | * mark out all the nodes the kernel resides in. | ||
477 | */ | ||
478 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
479 | struct numa_memblk *mb = &numa_meminfo.blk[i]; | ||
480 | |||
481 | memblock_set_node(mb->start, mb->end - mb->start, | ||
482 | &memblock.reserved, mb->nid); | ||
483 | } | ||
484 | |||
485 | /* Mark all kernel nodes. */ | ||
486 | for_each_memblock(reserved, r) | ||
487 | node_set(r->nid, numa_kernel_nodes); | ||
488 | |||
489 | /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ | ||
490 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
491 | nid = numa_meminfo.blk[i].nid; | ||
492 | if (!node_isset(nid, numa_kernel_nodes)) | ||
493 | continue; | ||
494 | |||
495 | start = numa_meminfo.blk[i].start; | ||
496 | end = numa_meminfo.blk[i].end; | ||
497 | |||
498 | memblock_clear_hotplug(start, end - start); | ||
499 | } | ||
500 | } | ||
501 | |||
466 | static int __init numa_register_memblks(struct numa_meminfo *mi) | 502 | static int __init numa_register_memblks(struct numa_meminfo *mi) |
467 | { | 503 | { |
468 | unsigned long uninitialized_var(pfn_align); | 504 | unsigned long uninitialized_var(pfn_align); |
@@ -481,6 +517,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
481 | } | 517 | } |
482 | 518 | ||
483 | /* | 519 | /* |
520 | * At very early time, the kernel have to use some memory such as | ||
521 | * loading the kernel image. We cannot prevent this anyway. So any | ||
522 | * node the kernel resides in should be un-hotpluggable. | ||
523 | * | ||
524 | * And when we come here, alloc node data won't fail. | ||
525 | */ | ||
526 | numa_clear_kernel_node_hotplug(); | ||
527 | |||
528 | /* | ||
484 | * If sections array is gonna be used for pfn -> nid mapping, check | 529 | * If sections array is gonna be used for pfn -> nid mapping, check |
485 | * whether its granularity is fine enough. | 530 | * whether its granularity is fine enough. |
486 | */ | 531 | */ |
@@ -548,41 +593,6 @@ static void __init numa_init_array(void) | |||
548 | } | 593 | } |
549 | } | 594 | } |
550 | 595 | ||
551 | static void __init numa_clear_kernel_node_hotplug(void) | ||
552 | { | ||
553 | int i, nid; | ||
554 | nodemask_t numa_kernel_nodes = NODE_MASK_NONE; | ||
555 | unsigned long start, end; | ||
556 | struct memblock_region *r; | ||
557 | |||
558 | /* | ||
559 | * At this time, all memory regions reserved by memblock are | ||
560 | * used by the kernel. Set the nid in memblock.reserved will | ||
561 | * mark out all the nodes the kernel resides in. | ||
562 | */ | ||
563 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
564 | struct numa_memblk *mb = &numa_meminfo.blk[i]; | ||
565 | memblock_set_node(mb->start, mb->end - mb->start, | ||
566 | &memblock.reserved, mb->nid); | ||
567 | } | ||
568 | |||
569 | /* Mark all kernel nodes. */ | ||
570 | for_each_memblock(reserved, r) | ||
571 | node_set(r->nid, numa_kernel_nodes); | ||
572 | |||
573 | /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ | ||
574 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
575 | nid = numa_meminfo.blk[i].nid; | ||
576 | if (!node_isset(nid, numa_kernel_nodes)) | ||
577 | continue; | ||
578 | |||
579 | start = numa_meminfo.blk[i].start; | ||
580 | end = numa_meminfo.blk[i].end; | ||
581 | |||
582 | memblock_clear_hotplug(start, end - start); | ||
583 | } | ||
584 | } | ||
585 | |||
586 | static int __init numa_init(int (*init_func)(void)) | 596 | static int __init numa_init(int (*init_func)(void)) |
587 | { | 597 | { |
588 | int i; | 598 | int i; |
@@ -637,15 +647,6 @@ static int __init numa_init(int (*init_func)(void)) | |||
637 | } | 647 | } |
638 | numa_init_array(); | 648 | numa_init_array(); |
639 | 649 | ||
640 | /* | ||
641 | * At very early time, the kernel have to use some memory such as | ||
642 | * loading the kernel image. We cannot prevent this anyway. So any | ||
643 | * node the kernel resides in should be un-hotpluggable. | ||
644 | * | ||
645 | * And when we come here, numa_init() won't fail. | ||
646 | */ | ||
647 | numa_clear_kernel_node_hotplug(); | ||
648 | |||
649 | return 0; | 650 | return 0; |
650 | } | 651 | } |
651 | 652 | ||
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d56cd1f515bd..3f627345d51c 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -182,12 +182,17 @@ struct jit_context { | |||
182 | bool seen_ld_abs; | 182 | bool seen_ld_abs; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | /* maximum number of bytes emitted while JITing one eBPF insn */ | ||
186 | #define BPF_MAX_INSN_SIZE 128 | ||
187 | #define BPF_INSN_SAFETY 64 | ||
188 | |||
185 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | 189 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, |
186 | int oldproglen, struct jit_context *ctx) | 190 | int oldproglen, struct jit_context *ctx) |
187 | { | 191 | { |
188 | struct bpf_insn *insn = bpf_prog->insnsi; | 192 | struct bpf_insn *insn = bpf_prog->insnsi; |
189 | int insn_cnt = bpf_prog->len; | 193 | int insn_cnt = bpf_prog->len; |
190 | u8 temp[64]; | 194 | bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); |
195 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | ||
191 | int i; | 196 | int i; |
192 | int proglen = 0; | 197 | int proglen = 0; |
193 | u8 *prog = temp; | 198 | u8 *prog = temp; |
@@ -225,7 +230,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | |||
225 | EMIT2(0x31, 0xc0); /* xor eax, eax */ | 230 | EMIT2(0x31, 0xc0); /* xor eax, eax */ |
226 | EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ | 231 | EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ |
227 | 232 | ||
228 | if (ctx->seen_ld_abs) { | 233 | if (seen_ld_abs) { |
229 | /* r9d : skb->len - skb->data_len (headlen) | 234 | /* r9d : skb->len - skb->data_len (headlen) |
230 | * r10 : skb->data | 235 | * r10 : skb->data |
231 | */ | 236 | */ |
@@ -685,7 +690,7 @@ xadd: if (is_imm8(insn->off)) | |||
685 | case BPF_JMP | BPF_CALL: | 690 | case BPF_JMP | BPF_CALL: |
686 | func = (u8 *) __bpf_call_base + imm32; | 691 | func = (u8 *) __bpf_call_base + imm32; |
687 | jmp_offset = func - (image + addrs[i]); | 692 | jmp_offset = func - (image + addrs[i]); |
688 | if (ctx->seen_ld_abs) { | 693 | if (seen_ld_abs) { |
689 | EMIT2(0x41, 0x52); /* push %r10 */ | 694 | EMIT2(0x41, 0x52); /* push %r10 */ |
690 | EMIT2(0x41, 0x51); /* push %r9 */ | 695 | EMIT2(0x41, 0x51); /* push %r9 */ |
691 | /* need to adjust jmp offset, since | 696 | /* need to adjust jmp offset, since |
@@ -699,7 +704,7 @@ xadd: if (is_imm8(insn->off)) | |||
699 | return -EINVAL; | 704 | return -EINVAL; |
700 | } | 705 | } |
701 | EMIT1_off32(0xE8, jmp_offset); | 706 | EMIT1_off32(0xE8, jmp_offset); |
702 | if (ctx->seen_ld_abs) { | 707 | if (seen_ld_abs) { |
703 | EMIT2(0x41, 0x59); /* pop %r9 */ | 708 | EMIT2(0x41, 0x59); /* pop %r9 */ |
704 | EMIT2(0x41, 0x5A); /* pop %r10 */ | 709 | EMIT2(0x41, 0x5A); /* pop %r10 */ |
705 | } | 710 | } |
@@ -804,7 +809,8 @@ emit_jmp: | |||
804 | goto common_load; | 809 | goto common_load; |
805 | case BPF_LD | BPF_ABS | BPF_W: | 810 | case BPF_LD | BPF_ABS | BPF_W: |
806 | func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); | 811 | func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); |
807 | common_load: ctx->seen_ld_abs = true; | 812 | common_load: |
813 | ctx->seen_ld_abs = seen_ld_abs = true; | ||
808 | jmp_offset = func - (image + addrs[i]); | 814 | jmp_offset = func - (image + addrs[i]); |
809 | if (!func || !is_simm32(jmp_offset)) { | 815 | if (!func || !is_simm32(jmp_offset)) { |
810 | pr_err("unsupported bpf func %d addr %p image %p\n", | 816 | pr_err("unsupported bpf func %d addr %p image %p\n", |
@@ -878,6 +884,11 @@ common_load: ctx->seen_ld_abs = true; | |||
878 | } | 884 | } |
879 | 885 | ||
880 | ilen = prog - temp; | 886 | ilen = prog - temp; |
887 | if (ilen > BPF_MAX_INSN_SIZE) { | ||
888 | pr_err("bpf_jit_compile fatal insn size error\n"); | ||
889 | return -EFAULT; | ||
890 | } | ||
891 | |||
881 | if (image) { | 892 | if (image) { |
882 | if (unlikely(proglen + ilen > oldproglen)) { | 893 | if (unlikely(proglen + ilen > oldproglen)) { |
883 | pr_err("bpf_jit_compile fatal error\n"); | 894 | pr_err("bpf_jit_compile fatal error\n"); |
@@ -934,9 +945,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
934 | goto out; | 945 | goto out; |
935 | } | 946 | } |
936 | if (image) { | 947 | if (image) { |
937 | if (proglen != oldproglen) | 948 | if (proglen != oldproglen) { |
938 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", | 949 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
939 | proglen, oldproglen); | 950 | proglen, oldproglen); |
951 | goto out; | ||
952 | } | ||
940 | break; | 953 | break; |
941 | } | 954 | } |
942 | if (proglen == oldproglen) { | 955 | if (proglen == oldproglen) { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 379e8bd0deea..1d2e6392f5fa 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -64,11 +64,11 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | |||
64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) | 64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) |
65 | { | 65 | { |
66 | if (ctr_running) | 66 | if (ctr_running) |
67 | model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); | 67 | model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs)); |
68 | else if (!nmi_enabled) | 68 | else if (!nmi_enabled) |
69 | return NMI_DONE; | 69 | return NMI_DONE; |
70 | else | 70 | else |
71 | model->stop(&__get_cpu_var(cpu_msrs)); | 71 | model->stop(this_cpu_ptr(&cpu_msrs)); |
72 | return NMI_HANDLED; | 72 | return NMI_HANDLED; |
73 | } | 73 | } |
74 | 74 | ||
@@ -91,7 +91,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) | |||
91 | 91 | ||
92 | static void nmi_cpu_start(void *dummy) | 92 | static void nmi_cpu_start(void *dummy) |
93 | { | 93 | { |
94 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 94 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
95 | if (!msrs->controls) | 95 | if (!msrs->controls) |
96 | WARN_ON_ONCE(1); | 96 | WARN_ON_ONCE(1); |
97 | else | 97 | else |
@@ -111,7 +111,7 @@ static int nmi_start(void) | |||
111 | 111 | ||
112 | static void nmi_cpu_stop(void *dummy) | 112 | static void nmi_cpu_stop(void *dummy) |
113 | { | 113 | { |
114 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); | 114 | struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs); |
115 | if (!msrs->controls) | 115 | if (!msrs->controls) |
116 | WARN_ON_ONCE(1); | 116 | WARN_ON_ONCE(1); |
117 | else | 117 | else |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 98ab13058f89..ad1d91f475ab 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -372,7 +372,7 @@ static unsigned int get_stagger(void) | |||
372 | { | 372 | { |
373 | #ifdef CONFIG_SMP | 373 | #ifdef CONFIG_SMP |
374 | int cpu = smp_processor_id(); | 374 | int cpu = smp_processor_id(); |
375 | return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); | 375 | return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); |
376 | #endif | 376 | #endif |
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index c89c93320c12..c6b146e67116 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c | |||
@@ -63,8 +63,8 @@ | |||
63 | 63 | ||
64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; | 64 | static struct uv_hub_nmi_s **uv_hub_nmi_list; |
65 | 65 | ||
66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi); | 66 | DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); |
67 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi); | 67 | EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi); |
68 | 68 | ||
69 | static unsigned long nmi_mmr; | 69 | static unsigned long nmi_mmr; |
70 | static unsigned long nmi_mmr_clear; | 70 | static unsigned long nmi_mmr_clear; |
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) | |||
215 | int nmi = 0; | 215 | int nmi = 0; |
216 | 216 | ||
217 | local64_inc(&uv_nmi_count); | 217 | local64_inc(&uv_nmi_count); |
218 | uv_cpu_nmi.queries++; | 218 | this_cpu_inc(uv_cpu_nmi.queries); |
219 | 219 | ||
220 | do { | 220 | do { |
221 | nmi = atomic_read(&hub_nmi->in_nmi); | 221 | nmi = atomic_read(&hub_nmi->in_nmi); |
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void) | |||
293 | int cpu; | 293 | int cpu; |
294 | 294 | ||
295 | for_each_cpu(cpu, uv_nmi_cpu_mask) | 295 | for_each_cpu(cpu, uv_nmi_cpu_mask) |
296 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1); | 296 | uv_cpu_nmi_per(cpu).pinging = 1; |
297 | 297 | ||
298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); | 298 | apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); |
299 | } | 299 | } |
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void) | |||
304 | int cpu; | 304 | int cpu; |
305 | 305 | ||
306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { | 306 | for_each_cpu(cpu, uv_nmi_cpu_mask) { |
307 | atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0); | 307 | uv_cpu_nmi_per(cpu).pinging = 0; |
308 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT); | 308 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; |
309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); | 309 | cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); |
310 | } | 310 | } |
311 | } | 311 | } |
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first) | |||
328 | int loop_delay = uv_nmi_loop_delay; | 328 | int loop_delay = uv_nmi_loop_delay; |
329 | 329 | ||
330 | for_each_cpu(j, uv_nmi_cpu_mask) { | 330 | for_each_cpu(j, uv_nmi_cpu_mask) { |
331 | if (atomic_read(&uv_cpu_nmi_per(j).state)) { | 331 | if (uv_cpu_nmi_per(j).state) { |
332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); | 332 | cpumask_clear_cpu(j, uv_nmi_cpu_mask); |
333 | if (++k >= n) | 333 | if (++k >= n) |
334 | break; | 334 | break; |
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first) | |||
359 | static void uv_nmi_wait(int master) | 359 | static void uv_nmi_wait(int master) |
360 | { | 360 | { |
361 | /* indicate this cpu is in */ | 361 | /* indicate this cpu is in */ |
362 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN); | 362 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); |
363 | 363 | ||
364 | /* if not the first cpu in (the master), then we are a slave cpu */ | 364 | /* if not the first cpu in (the master), then we are a slave cpu */ |
365 | if (!master) | 365 | if (!master) |
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) | |||
419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); | 419 | "UV:%sNMI process trace for CPU %d\n", dots, cpu); |
420 | show_regs(regs); | 420 | show_regs(regs); |
421 | } | 421 | } |
422 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); | 422 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); |
423 | } | 423 | } |
424 | 424 | ||
425 | /* Trigger a slave cpu to dump it's state */ | 425 | /* Trigger a slave cpu to dump it's state */ |
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu) | |||
427 | { | 427 | { |
428 | int retry = uv_nmi_trigger_delay; | 428 | int retry = uv_nmi_trigger_delay; |
429 | 429 | ||
430 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN) | 430 | if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) |
431 | return; | 431 | return; |
432 | 432 | ||
433 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP); | 433 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; |
434 | do { | 434 | do { |
435 | cpu_relax(); | 435 | cpu_relax(); |
436 | udelay(10); | 436 | udelay(10); |
437 | if (atomic_read(&uv_cpu_nmi_per(cpu).state) | 437 | if (uv_cpu_nmi_per(cpu).state |
438 | != UV_NMI_STATE_DUMP) | 438 | != UV_NMI_STATE_DUMP) |
439 | return; | 439 | return; |
440 | } while (--retry > 0); | 440 | } while (--retry > 0); |
441 | 441 | ||
442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); | 442 | pr_crit("UV: CPU %d stuck in process dump function\n", cpu); |
443 | atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE); | 443 | uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; |
444 | } | 444 | } |
445 | 445 | ||
446 | /* Wait until all cpus ready to exit */ | 446 | /* Wait until all cpus ready to exit */ |
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) | |||
488 | } else { | 488 | } else { |
489 | while (!atomic_read(&uv_nmi_slave_continue)) | 489 | while (!atomic_read(&uv_nmi_slave_continue)) |
490 | cpu_relax(); | 490 | cpu_relax(); |
491 | while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) | 491 | while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) |
492 | cpu_relax(); | 492 | cpu_relax(); |
493 | uv_nmi_dump_state_cpu(cpu, regs); | 493 | uv_nmi_dump_state_cpu(cpu, regs); |
494 | } | 494 | } |
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
615 | local_irq_save(flags); | 615 | local_irq_save(flags); |
616 | 616 | ||
617 | /* If not a UV System NMI, ignore */ | 617 | /* If not a UV System NMI, ignore */ |
618 | if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { | 618 | if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { |
619 | local_irq_restore(flags); | 619 | local_irq_restore(flags); |
620 | return NMI_DONE; | 620 | return NMI_DONE; |
621 | } | 621 | } |
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |||
639 | uv_call_kgdb_kdb(cpu, regs, master); | 639 | uv_call_kgdb_kdb(cpu, regs, master); |
640 | 640 | ||
641 | /* Clear per_cpu "in nmi" flag */ | 641 | /* Clear per_cpu "in nmi" flag */ |
642 | atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT); | 642 | this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); |
643 | 643 | ||
644 | /* Clear MMR NMI flag on each hub */ | 644 | /* Clear MMR NMI flag on each hub */ |
645 | uv_clear_nmi(cpu); | 645 | uv_clear_nmi(cpu); |
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) | |||
666 | { | 666 | { |
667 | int ret; | 667 | int ret; |
668 | 668 | ||
669 | uv_cpu_nmi.queries++; | 669 | this_cpu_inc(uv_cpu_nmi.queries); |
670 | if (!atomic_read(&uv_cpu_nmi.pinging)) { | 670 | if (!this_cpu_read(uv_cpu_nmi.pinging)) { |
671 | local64_inc(&uv_nmi_ping_misses); | 671 | local64_inc(&uv_nmi_ping_misses); |
672 | return NMI_DONE; | 672 | return NMI_DONE; |
673 | } | 673 | } |
674 | 674 | ||
675 | uv_cpu_nmi.pings++; | 675 | this_cpu_inc(uv_cpu_nmi.pings); |
676 | local64_inc(&uv_nmi_ping_count); | 676 | local64_inc(&uv_nmi_ping_count); |
677 | ret = uv_handle_nmi(reason, regs); | 677 | ret = uv_handle_nmi(reason, regs); |
678 | atomic_set(&uv_cpu_nmi.pinging, 0); | 678 | this_cpu_write(uv_cpu_nmi.pinging, 0); |
679 | return ret; | 679 | return ret; |
680 | } | 680 | } |
681 | 681 | ||
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 5c86786bbfd2..a244237f3cfa 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc); | |||
365 | 365 | ||
366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) | 366 | static __init void uv_rtc_register_clockevents(struct work_struct *dummy) |
367 | { | 367 | { |
368 | struct clock_event_device *ced = &__get_cpu_var(cpu_ced); | 368 | struct clock_event_device *ced = this_cpu_ptr(&cpu_ced); |
369 | 369 | ||
370 | *ced = clock_event_device_uv; | 370 | *ced = clock_event_device_uv; |
371 | ced->cpumask = cpumask_of(smp_processor_id()); | 371 | ced->cpumask = cpumask_of(smp_processor_id()); |
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 899dd2454256..f52e033557c9 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile | |||
@@ -18,8 +18,9 @@ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE | |||
18 | 18 | ||
19 | targets += kexec-purgatory.c | 19 | targets += kexec-purgatory.c |
20 | 20 | ||
21 | CMD_BIN2C = $(objtree)/scripts/basic/bin2c | ||
21 | quiet_cmd_bin2c = BIN2C $@ | 22 | quiet_cmd_bin2c = BIN2C $@ |
22 | cmd_bin2c = cat $(obj)/purgatory.ro | $(objtree)/scripts/basic/bin2c kexec_purgatory > $(obj)/kexec-purgatory.c | 23 | cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ |
23 | 24 | ||
24 | $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE | 25 | $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE |
25 | $(call if_changed,bin2c) | 26 | $(call if_changed,bin2c) |
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h index 54f8102ccde5..e59eef20647b 100644 --- a/arch/x86/um/asm/ptrace.h +++ b/arch/x86/um/asm/ptrace.h | |||
@@ -47,8 +47,6 @@ struct user_desc; | |||
47 | 47 | ||
48 | #ifdef CONFIG_X86_32 | 48 | #ifdef CONFIG_X86_32 |
49 | 49 | ||
50 | #define HOST_AUDIT_ARCH AUDIT_ARCH_I386 | ||
51 | |||
52 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, | 50 | extern int ptrace_get_thread_area(struct task_struct *child, int idx, |
53 | struct user_desc __user *user_desc); | 51 | struct user_desc __user *user_desc); |
54 | 52 | ||
@@ -57,8 +55,6 @@ extern int ptrace_set_thread_area(struct task_struct *child, int idx, | |||
57 | 55 | ||
58 | #else | 56 | #else |
59 | 57 | ||
60 | #define HOST_AUDIT_ARCH AUDIT_ARCH_X86_64 | ||
61 | |||
62 | #define PT_REGS_R8(r) UPT_R8(&(r)->regs) | 58 | #define PT_REGS_R8(r) UPT_R8(&(r)->regs) |
63 | #define PT_REGS_R9(r) UPT_R9(&(r)->regs) | 59 | #define PT_REGS_R9(r) UPT_R9(&(r)->regs) |
64 | #define PT_REGS_R10(r) UPT_R10(&(r)->regs) | 60 | #define PT_REGS_R10(r) UPT_R10(&(r)->regs) |
diff --git a/arch/x86/um/asm/syscall.h b/arch/x86/um/asm/syscall.h new file mode 100644 index 000000000000..9fe77b7b5a0e --- /dev/null +++ b/arch/x86/um/asm/syscall.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __UM_ASM_SYSCALL_H | ||
2 | #define __UM_ASM_SYSCALL_H | ||
3 | |||
4 | #include <uapi/linux/audit.h> | ||
5 | |||
6 | static inline int syscall_get_arch(void) | ||
7 | { | ||
8 | #ifdef CONFIG_X86_32 | ||
9 | return AUDIT_ARCH_I386; | ||
10 | #else | ||
11 | return AUDIT_ARCH_X86_64; | ||
12 | #endif | ||
13 | } | ||
14 | |||
15 | #endif /* __UM_ASM_SYSCALL_H */ | ||
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S index 8d0c420465cc..fa4b8b9841ff 100644 --- a/arch/x86/um/checksum_32.S +++ b/arch/x86/um/checksum_32.S | |||
@@ -214,242 +214,3 @@ csum_partial: | |||
214 | ret | 214 | ret |
215 | 215 | ||
216 | #endif | 216 | #endif |
217 | |||
218 | /* | ||
219 | unsigned int csum_partial_copy_generic (const char *src, char *dst, | ||
220 | int len, int sum, int *src_err_ptr, int *dst_err_ptr) | ||
221 | */ | ||
222 | |||
223 | /* | ||
224 | * Copy from ds while checksumming, otherwise like csum_partial | ||
225 | * | ||
226 | * The macros SRC and DST specify the type of access for the instruction. | ||
227 | * thus we can call a custom exception handler for all access types. | ||
228 | * | ||
229 | * FIXME: could someone double-check whether I haven't mixed up some SRC and | ||
230 | * DST definitions? It's damn hard to trigger all cases. I hope I got | ||
231 | * them all but there's no guarantee. | ||
232 | */ | ||
233 | |||
234 | #define SRC(y...) \ | ||
235 | 9999: y; \ | ||
236 | _ASM_EXTABLE(9999b, 6001f) | ||
237 | |||
238 | #define DST(y...) \ | ||
239 | 9999: y; \ | ||
240 | _ASM_EXTABLE(9999b, 6002f) | ||
241 | |||
242 | .align 4 | ||
243 | |||
244 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | ||
245 | |||
246 | #define ARGBASE 16 | ||
247 | #define FP 12 | ||
248 | |||
249 | csum_partial_copy_generic_i386: | ||
250 | subl $4,%esp | ||
251 | pushl %edi | ||
252 | pushl %esi | ||
253 | pushl %ebx | ||
254 | movl ARGBASE+16(%esp),%eax # sum | ||
255 | movl ARGBASE+12(%esp),%ecx # len | ||
256 | movl ARGBASE+4(%esp),%esi # src | ||
257 | movl ARGBASE+8(%esp),%edi # dst | ||
258 | |||
259 | testl $2, %edi # Check alignment. | ||
260 | jz 2f # Jump if alignment is ok. | ||
261 | subl $2, %ecx # Alignment uses up two bytes. | ||
262 | jae 1f # Jump if we had at least two bytes. | ||
263 | addl $2, %ecx # ecx was < 2. Deal with it. | ||
264 | jmp 4f | ||
265 | SRC(1: movw (%esi), %bx ) | ||
266 | addl $2, %esi | ||
267 | DST( movw %bx, (%edi) ) | ||
268 | addl $2, %edi | ||
269 | addw %bx, %ax | ||
270 | adcl $0, %eax | ||
271 | 2: | ||
272 | movl %ecx, FP(%esp) | ||
273 | shrl $5, %ecx | ||
274 | jz 2f | ||
275 | testl %esi, %esi | ||
276 | SRC(1: movl (%esi), %ebx ) | ||
277 | SRC( movl 4(%esi), %edx ) | ||
278 | adcl %ebx, %eax | ||
279 | DST( movl %ebx, (%edi) ) | ||
280 | adcl %edx, %eax | ||
281 | DST( movl %edx, 4(%edi) ) | ||
282 | |||
283 | SRC( movl 8(%esi), %ebx ) | ||
284 | SRC( movl 12(%esi), %edx ) | ||
285 | adcl %ebx, %eax | ||
286 | DST( movl %ebx, 8(%edi) ) | ||
287 | adcl %edx, %eax | ||
288 | DST( movl %edx, 12(%edi) ) | ||
289 | |||
290 | SRC( movl 16(%esi), %ebx ) | ||
291 | SRC( movl 20(%esi), %edx ) | ||
292 | adcl %ebx, %eax | ||
293 | DST( movl %ebx, 16(%edi) ) | ||
294 | adcl %edx, %eax | ||
295 | DST( movl %edx, 20(%edi) ) | ||
296 | |||
297 | SRC( movl 24(%esi), %ebx ) | ||
298 | SRC( movl 28(%esi), %edx ) | ||
299 | adcl %ebx, %eax | ||
300 | DST( movl %ebx, 24(%edi) ) | ||
301 | adcl %edx, %eax | ||
302 | DST( movl %edx, 28(%edi) ) | ||
303 | |||
304 | lea 32(%esi), %esi | ||
305 | lea 32(%edi), %edi | ||
306 | dec %ecx | ||
307 | jne 1b | ||
308 | adcl $0, %eax | ||
309 | 2: movl FP(%esp), %edx | ||
310 | movl %edx, %ecx | ||
311 | andl $0x1c, %edx | ||
312 | je 4f | ||
313 | shrl $2, %edx # This clears CF | ||
314 | SRC(3: movl (%esi), %ebx ) | ||
315 | adcl %ebx, %eax | ||
316 | DST( movl %ebx, (%edi) ) | ||
317 | lea 4(%esi), %esi | ||
318 | lea 4(%edi), %edi | ||
319 | dec %edx | ||
320 | jne 3b | ||
321 | adcl $0, %eax | ||
322 | 4: andl $3, %ecx | ||
323 | jz 7f | ||
324 | cmpl $2, %ecx | ||
325 | jb 5f | ||
326 | SRC( movw (%esi), %cx ) | ||
327 | leal 2(%esi), %esi | ||
328 | DST( movw %cx, (%edi) ) | ||
329 | leal 2(%edi), %edi | ||
330 | je 6f | ||
331 | shll $16,%ecx | ||
332 | SRC(5: movb (%esi), %cl ) | ||
333 | DST( movb %cl, (%edi) ) | ||
334 | 6: addl %ecx, %eax | ||
335 | adcl $0, %eax | ||
336 | 7: | ||
337 | 5000: | ||
338 | |||
339 | # Exception handler: | ||
340 | .section .fixup, "ax" | ||
341 | |||
342 | 6001: | ||
343 | movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
344 | movl $-EFAULT, (%ebx) | ||
345 | |||
346 | # zero the complete destination - computing the rest | ||
347 | # is too much work | ||
348 | movl ARGBASE+8(%esp), %edi # dst | ||
349 | movl ARGBASE+12(%esp), %ecx # len | ||
350 | xorl %eax,%eax | ||
351 | rep ; stosb | ||
352 | |||
353 | jmp 5000b | ||
354 | |||
355 | 6002: | ||
356 | movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
357 | movl $-EFAULT,(%ebx) | ||
358 | jmp 5000b | ||
359 | |||
360 | .previous | ||
361 | |||
362 | popl %ebx | ||
363 | popl %esi | ||
364 | popl %edi | ||
365 | popl %ecx # equivalent to addl $4,%esp | ||
366 | ret | ||
367 | |||
368 | #else | ||
369 | |||
370 | /* Version for PentiumII/PPro */ | ||
371 | |||
372 | #define ROUND1(x) \ | ||
373 | SRC(movl x(%esi), %ebx ) ; \ | ||
374 | addl %ebx, %eax ; \ | ||
375 | DST(movl %ebx, x(%edi) ) ; | ||
376 | |||
377 | #define ROUND(x) \ | ||
378 | SRC(movl x(%esi), %ebx ) ; \ | ||
379 | adcl %ebx, %eax ; \ | ||
380 | DST(movl %ebx, x(%edi) ) ; | ||
381 | |||
382 | #define ARGBASE 12 | ||
383 | |||
384 | csum_partial_copy_generic_i386: | ||
385 | pushl %ebx | ||
386 | pushl %edi | ||
387 | pushl %esi | ||
388 | movl ARGBASE+4(%esp),%esi #src | ||
389 | movl ARGBASE+8(%esp),%edi #dst | ||
390 | movl ARGBASE+12(%esp),%ecx #len | ||
391 | movl ARGBASE+16(%esp),%eax #sum | ||
392 | # movl %ecx, %edx | ||
393 | movl %ecx, %ebx | ||
394 | movl %esi, %edx | ||
395 | shrl $6, %ecx | ||
396 | andl $0x3c, %ebx | ||
397 | negl %ebx | ||
398 | subl %ebx, %esi | ||
399 | subl %ebx, %edi | ||
400 | lea -1(%esi),%edx | ||
401 | andl $-32,%edx | ||
402 | lea 3f(%ebx,%ebx), %ebx | ||
403 | testl %esi, %esi | ||
404 | jmp *%ebx | ||
405 | 1: addl $64,%esi | ||
406 | addl $64,%edi | ||
407 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | ||
408 | ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52) | ||
409 | ROUND (-48) ROUND(-44) ROUND(-40) ROUND(-36) | ||
410 | ROUND (-32) ROUND(-28) ROUND(-24) ROUND(-20) | ||
411 | ROUND (-16) ROUND(-12) ROUND(-8) ROUND(-4) | ||
412 | 3: adcl $0,%eax | ||
413 | addl $64, %edx | ||
414 | dec %ecx | ||
415 | jge 1b | ||
416 | 4: movl ARGBASE+12(%esp),%edx #len | ||
417 | andl $3, %edx | ||
418 | jz 7f | ||
419 | cmpl $2, %edx | ||
420 | jb 5f | ||
421 | SRC( movw (%esi), %dx ) | ||
422 | leal 2(%esi), %esi | ||
423 | DST( movw %dx, (%edi) ) | ||
424 | leal 2(%edi), %edi | ||
425 | je 6f | ||
426 | shll $16,%edx | ||
427 | 5: | ||
428 | SRC( movb (%esi), %dl ) | ||
429 | DST( movb %dl, (%edi) ) | ||
430 | 6: addl %edx, %eax | ||
431 | adcl $0, %eax | ||
432 | 7: | ||
433 | .section .fixup, "ax" | ||
434 | 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr | ||
435 | movl $-EFAULT, (%ebx) | ||
436 | # zero the complete destination (computing the rest is too much work) | ||
437 | movl ARGBASE+8(%esp),%edi # dst | ||
438 | movl ARGBASE+12(%esp),%ecx # len | ||
439 | xorl %eax,%eax | ||
440 | rep; stosb | ||
441 | jmp 7b | ||
442 | 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr | ||
443 | movl $-EFAULT, (%ebx) | ||
444 | jmp 7b | ||
445 | .previous | ||
446 | |||
447 | popl %esi | ||
448 | popl %edi | ||
449 | popl %ebx | ||
450 | ret | ||
451 | |||
452 | #undef ROUND | ||
453 | #undef ROUND1 | ||
454 | |||
455 | #endif | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index acb0effd8077..1a3f0445432a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -821,7 +821,7 @@ static void xen_convert_trap_info(const struct desc_ptr *desc, | |||
821 | 821 | ||
822 | void xen_copy_trap_info(struct trap_info *traps) | 822 | void xen_copy_trap_info(struct trap_info *traps) |
823 | { | 823 | { |
824 | const struct desc_ptr *desc = &__get_cpu_var(idt_desc); | 824 | const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); |
825 | 825 | ||
826 | xen_convert_trap_info(desc, traps); | 826 | xen_convert_trap_info(desc, traps); |
827 | } | 827 | } |
@@ -838,7 +838,7 @@ static void xen_load_idt(const struct desc_ptr *desc) | |||
838 | 838 | ||
839 | spin_lock(&lock); | 839 | spin_lock(&lock); |
840 | 840 | ||
841 | __get_cpu_var(idt_desc) = *desc; | 841 | memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); |
842 | 842 | ||
843 | xen_convert_trap_info(desc, traps); | 843 | xen_convert_trap_info(desc, traps); |
844 | 844 | ||
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 0d82003e76ad..ea54a08d8301 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); | |||
54 | 54 | ||
55 | void xen_mc_flush(void) | 55 | void xen_mc_flush(void) |
56 | { | 56 | { |
57 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 57 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
58 | struct multicall_entry *mc; | 58 | struct multicall_entry *mc; |
59 | int ret = 0; | 59 | int ret = 0; |
60 | unsigned long flags; | 60 | unsigned long flags; |
@@ -131,7 +131,7 @@ void xen_mc_flush(void) | |||
131 | 131 | ||
132 | struct multicall_space __xen_mc_entry(size_t args) | 132 | struct multicall_space __xen_mc_entry(size_t args) |
133 | { | 133 | { |
134 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 134 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
135 | struct multicall_space ret; | 135 | struct multicall_space ret; |
136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); | 136 | unsigned argidx = roundup(b->argidx, sizeof(u64)); |
137 | 137 | ||
@@ -162,7 +162,7 @@ struct multicall_space __xen_mc_entry(size_t args) | |||
162 | 162 | ||
163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) | 163 | struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) |
164 | { | 164 | { |
165 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 165 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
166 | struct multicall_space ret = { NULL, NULL }; | 166 | struct multicall_space ret = { NULL, NULL }; |
167 | 167 | ||
168 | BUG_ON(preemptible()); | 168 | BUG_ON(preemptible()); |
@@ -192,7 +192,7 @@ out: | |||
192 | 192 | ||
193 | void xen_mc_callback(void (*fn)(void *), void *data) | 193 | void xen_mc_callback(void (*fn)(void *), void *data) |
194 | { | 194 | { |
195 | struct mc_buffer *b = &__get_cpu_var(mc_buffer); | 195 | struct mc_buffer *b = this_cpu_ptr(&mc_buffer); |
196 | struct callback *cb; | 196 | struct callback *cb; |
197 | 197 | ||
198 | if (b->cbidx == MC_BATCH) { | 198 | if (b->cbidx == MC_BATCH) { |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 0ba5f3b967f0..23b45eb9a89c 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -109,7 +109,7 @@ static bool xen_pvspin = true; | |||
109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) | 109 | __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) |
110 | { | 110 | { |
111 | int irq = __this_cpu_read(lock_kicker_irq); | 111 | int irq = __this_cpu_read(lock_kicker_irq); |
112 | struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); | 112 | struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); |
113 | int cpu = smp_processor_id(); | 113 | int cpu = smp_processor_id(); |
114 | u64 start; | 114 | u64 start; |
115 | unsigned long flags; | 115 | unsigned long flags; |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5718b0b58b60..a1d430b112b3 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -80,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
80 | 80 | ||
81 | BUG_ON(preemptible()); | 81 | BUG_ON(preemptible()); |
82 | 82 | ||
83 | state = &__get_cpu_var(xen_runstate); | 83 | state = this_cpu_ptr(&xen_runstate); |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * The runstate info is always updated by the hypervisor on | 86 | * The runstate info is always updated by the hypervisor on |
@@ -123,7 +123,7 @@ static void do_stolen_accounting(void) | |||
123 | 123 | ||
124 | WARN_ON(state.state != RUNSTATE_running); | 124 | WARN_ON(state.state != RUNSTATE_running); |
125 | 125 | ||
126 | snap = &__get_cpu_var(xen_runstate_snapshot); | 126 | snap = this_cpu_ptr(&xen_runstate_snapshot); |
127 | 127 | ||
128 | /* work out how much time the VCPU has not been runn*ing* */ | 128 | /* work out how much time the VCPU has not been runn*ing* */ |
129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; | 129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; |
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void) | |||
158 | cycle_t ret; | 158 | cycle_t ret; |
159 | 159 | ||
160 | preempt_disable_notrace(); | 160 | preempt_disable_notrace(); |
161 | src = &__get_cpu_var(xen_vcpu)->time; | 161 | src = this_cpu_ptr(&xen_vcpu->time); |
162 | ret = pvclock_clocksource_read(src); | 162 | ret = pvclock_clocksource_read(src); |
163 | preempt_enable_notrace(); | 163 | preempt_enable_notrace(); |
164 | return ret; | 164 | return ret; |
@@ -397,7 +397,7 @@ static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt. | |||
397 | 397 | ||
398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) | 398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) |
399 | { | 399 | { |
400 | struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; | 400 | struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); |
401 | irqreturn_t ret; | 401 | irqreturn_t ret; |
402 | 402 | ||
403 | ret = IRQ_NONE; | 403 | ret = IRQ_NONE; |
@@ -460,7 +460,7 @@ void xen_setup_cpu_clockevents(void) | |||
460 | { | 460 | { |
461 | BUG_ON(preemptible()); | 461 | BUG_ON(preemptible()); |
462 | 462 | ||
463 | clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); | 463 | clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); |
464 | } | 464 | } |
465 | 465 | ||
466 | void xen_timer_resume(void) | 466 | void xen_timer_resume(void) |