aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c31
-rw-r--r--arch/x86/kernel/entry_64.S14
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/microcode_amd.c8
-rw-r--r--arch/x86/kernel/ptrace.c37
-rw-r--r--arch/x86/kernel/smpboot.c5
8 files changed, 95 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f7e98a2c0d12..1b7d1656a042 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
631 } 631 }
632 } 632 }
633 633
634 /*
635 * The way access filter has a performance penalty on some workloads.
636 * Disable it on the affected CPUs.
637 */
638 if ((c->x86 == 0x15) &&
639 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
640 u64 val;
641
642 if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
643 val |= 0x1E;
644 wrmsrl_safe(0xc0011021, val);
645 }
646 }
647
634 cpu_detect_cache_sizes(c); 648 cpu_detect_cache_sizes(c);
635 649
636 /* Multi core CPU? */ 650 /* Multi core CPU? */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 698b6ec12e0f..1ac581f38dfa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * Written by Jacob Shin - AMD, Inc. 7 * Written by Jacob Shin - AMD, Inc.
8 * 8 *
9 * Support: borislav.petkov@amd.com 9 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * 10 *
11 * April 2006 11 * April 2006
12 * - added support for AMD Family 0x10 processors 12 * - added support for AMD Family 0x10 processors
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 5f88abf07e9c..4f9a3cbfc4a3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,34 +285,39 @@ void cmci_clear(void)
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
286} 286}
287 287
288static long cmci_rediscover_work_func(void *arg)
289{
290 int banks;
291
292 /* Recheck banks in case CPUs don't all have the same */
293 if (cmci_supported(&banks))
294 cmci_discover(banks);
295
296 return 0;
297}
298
288/* 299/*
289 * After a CPU went down cycle through all the others and rediscover 300 * After a CPU went down cycle through all the others and rediscover
290 * Must run in process context. 301 * Must run in process context.
291 */ 302 */
292void cmci_rediscover(int dying) 303void cmci_rediscover(int dying)
293{ 304{
294 int banks; 305 int cpu, banks;
295 int cpu;
296 cpumask_var_t old;
297 306
298 if (!cmci_supported(&banks)) 307 if (!cmci_supported(&banks))
299 return; 308 return;
300 if (!alloc_cpumask_var(&old, GFP_KERNEL))
301 return;
302 cpumask_copy(old, &current->cpus_allowed);
303 309
304 for_each_online_cpu(cpu) { 310 for_each_online_cpu(cpu) {
305 if (cpu == dying) 311 if (cpu == dying)
306 continue; 312 continue;
307 if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) 313
314 if (cpu == smp_processor_id()) {
315 cmci_rediscover_work_func(NULL);
308 continue; 316 continue;
309 /* Recheck banks in case CPUs don't all have the same */ 317 }
310 if (cmci_supported(&banks))
311 cmci_discover(banks);
312 }
313 318
314 set_cpus_allowed_ptr(current, old); 319 work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
315 free_cpumask_var(old); 320 }
316} 321}
317 322
318/* 323/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b51b2c7ee51f..1328fe49a3f1 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -995,8 +995,8 @@ END(interrupt)
995 */ 995 */
996 .p2align CONFIG_X86_L1_CACHE_SHIFT 996 .p2align CONFIG_X86_L1_CACHE_SHIFT
997common_interrupt: 997common_interrupt:
998 ASM_CLAC
999 XCPT_FRAME 998 XCPT_FRAME
999 ASM_CLAC
1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1001 interrupt do_IRQ 1001 interrupt do_IRQ
1002 /* 0(%rsp): old_rsp-ARGOFFSET */ 1002 /* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
1135 */ 1135 */
1136.macro apicinterrupt num sym do_sym 1136.macro apicinterrupt num sym do_sym
1137ENTRY(\sym) 1137ENTRY(\sym)
1138 ASM_CLAC
1139 INTR_FRAME 1138 INTR_FRAME
1139 ASM_CLAC
1140 pushq_cfi $~(\num) 1140 pushq_cfi $~(\num)
1141.Lcommon_\sym: 1141.Lcommon_\sym:
1142 interrupt \do_sym 1142 interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
1190 */ 1190 */
1191.macro zeroentry sym do_sym 1191.macro zeroentry sym do_sym
1192ENTRY(\sym) 1192ENTRY(\sym)
1193 ASM_CLAC
1194 INTR_FRAME 1193 INTR_FRAME
1194 ASM_CLAC
1195 PARAVIRT_ADJUST_EXCEPTION_FRAME 1195 PARAVIRT_ADJUST_EXCEPTION_FRAME
1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1197 subq $ORIG_RAX-R15, %rsp 1197 subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
1208 1208
1209.macro paranoidzeroentry sym do_sym 1209.macro paranoidzeroentry sym do_sym
1210ENTRY(\sym) 1210ENTRY(\sym)
1211 ASM_CLAC
1212 INTR_FRAME 1211 INTR_FRAME
1212 ASM_CLAC
1213 PARAVIRT_ADJUST_EXCEPTION_FRAME 1213 PARAVIRT_ADJUST_EXCEPTION_FRAME
1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1215 subq $ORIG_RAX-R15, %rsp 1215 subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
1227#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) 1227#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1228.macro paranoidzeroentry_ist sym do_sym ist 1228.macro paranoidzeroentry_ist sym do_sym ist
1229ENTRY(\sym) 1229ENTRY(\sym)
1230 ASM_CLAC
1231 INTR_FRAME 1230 INTR_FRAME
1231 ASM_CLAC
1232 PARAVIRT_ADJUST_EXCEPTION_FRAME 1232 PARAVIRT_ADJUST_EXCEPTION_FRAME
1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1234 subq $ORIG_RAX-R15, %rsp 1234 subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
1247 1247
1248.macro errorentry sym do_sym 1248.macro errorentry sym do_sym
1249ENTRY(\sym) 1249ENTRY(\sym)
1250 ASM_CLAC
1251 XCPT_FRAME 1250 XCPT_FRAME
1251 ASM_CLAC
1252 PARAVIRT_ADJUST_EXCEPTION_FRAME 1252 PARAVIRT_ADJUST_EXCEPTION_FRAME
1253 subq $ORIG_RAX-R15, %rsp 1253 subq $ORIG_RAX-R15, %rsp
1254 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1254 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
1266 /* error code is on the stack already */ 1266 /* error code is on the stack already */
1267.macro paranoiderrorentry sym do_sym 1267.macro paranoiderrorentry sym do_sym
1268ENTRY(\sym) 1268ENTRY(\sym)
1269 ASM_CLAC
1270 XCPT_FRAME 1269 XCPT_FRAME
1270 ASM_CLAC
1271 PARAVIRT_ADJUST_EXCEPTION_FRAME 1271 PARAVIRT_ADJUST_EXCEPTION_FRAME
1272 subq $ORIG_RAX-R15, %rsp 1272 subq $ORIG_RAX-R15, %rsp
1273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 957a47aec64e..4dac2f68ed4a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,8 +292,8 @@ default_entry:
292 * be using the global pages. 292 * be using the global pages.
293 * 293 *
294 * NOTE! If we are on a 486 we may have no cr4 at all! 294 * NOTE! If we are on a 486 we may have no cr4 at all!
295 * Specifically, cr4 exists if and only if CPUID exists, 295 * Specifically, cr4 exists if and only if CPUID exists
296 * which in turn exists if and only if EFLAGS.ID exists. 296 * and has flags other than the FPU flag set.
297 */ 297 */
298 movl $X86_EFLAGS_ID,%ecx 298 movl $X86_EFLAGS_ID,%ecx
299 pushl %ecx 299 pushl %ecx
@@ -308,6 +308,11 @@ default_entry:
308 testl %ecx,%eax 308 testl %ecx,%eax
309 jz 6f # No ID flag = no CPUID = no CR4 309 jz 6f # No ID flag = no CPUID = no CR4
310 310
311 movl $1,%eax
312 cpuid
313 andl $~1,%edx # Ignore CPUID.FPU
314 jz 6f # No flags or only CPUID.FPU = no CR4
315
311 movl pa(mmu_cr4_features),%eax 316 movl pa(mmu_cr4_features),%eax
312 movl %eax,%cr4 317 movl %eax,%cr4
313 318
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7720ff5a9ee2..efdec7cd8e01 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -8,8 +8,8 @@
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 * 9 *
10 * Maintainers: 10 * Maintainers:
11 * Andreas Herrmann <andreas.herrmann3@amd.com> 11 * Andreas Herrmann <herrmann.der.user@googlemail.com>
12 * Borislav Petkov <borislav.petkov@amd.com> 12 * Borislav Petkov <bp@alien8.de>
13 * 13 *
14 * This driver allows to upgrade microcode on F10h AMD 14 * This driver allows to upgrade microcode on F10h AMD
15 * CPUs and later. 15 * CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
190#define F1XH_MPB_MAX_SIZE 2048 190#define F1XH_MPB_MAX_SIZE 2048
191#define F14H_MPB_MAX_SIZE 1824 191#define F14H_MPB_MAX_SIZE 1824
192#define F15H_MPB_MAX_SIZE 4096 192#define F15H_MPB_MAX_SIZE 4096
193#define F16H_MPB_MAX_SIZE 3458
193 194
194 switch (c->x86) { 195 switch (c->x86) {
195 case 0x14: 196 case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
198 case 0x15: 199 case 0x15:
199 max_size = F15H_MPB_MAX_SIZE; 200 max_size = F15H_MPB_MAX_SIZE;
200 break; 201 break;
202 case 0x16:
203 max_size = F16H_MPB_MAX_SIZE;
204 break;
201 default: 205 default:
202 max_size = F1XH_MPB_MAX_SIZE; 206 max_size = F1XH_MPB_MAX_SIZE;
203 break; 207 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b00b33a18390..974b67e46dd0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/rcupdate.h> 24#include <linux/rcupdate.h>
25#include <linux/module.h>
25 26
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
166 167
167#define FLAG_MASK FLAG_MASK_32 168#define FLAG_MASK FLAG_MASK_32
168 169
170/*
171 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
172 * when it traps. The previous stack will be directly underneath the saved
173 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
174 *
175 * Now, if the stack is empty, '&regs->sp' is out of range. In this
176 * case we try to take the previous stack. To always return a non-null
177 * stack pointer we fall back to regs as stack if no previous stack
178 * exists.
179 *
180 * This is valid only for kernel mode traps.
181 */
182unsigned long kernel_stack_pointer(struct pt_regs *regs)
183{
184 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
185 unsigned long sp = (unsigned long)&regs->sp;
186 struct thread_info *tinfo;
187
188 if (context == (sp & ~(THREAD_SIZE - 1)))
189 return sp;
190
191 tinfo = (struct thread_info *)context;
192 if (tinfo->previous_esp)
193 return tinfo->previous_esp;
194
195 return (unsigned long)regs;
196}
197EXPORT_SYMBOL_GPL(kernel_stack_pointer);
198
169static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 199static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
170{ 200{
171 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 201 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
@@ -1511,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs)
1511{ 1541{
1512 bool step; 1542 bool step;
1513 1543
1544 /*
1545 * We may come here right after calling schedule_user()
1546 * or do_notify_resume(), in which case we can be in RCU
1547 * user mode.
1548 */
1549 rcu_user_exit();
1550
1514 audit_syscall_exit(regs); 1551 audit_syscall_exit(regs);
1515 1552
1516 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1553 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c80a33bc528b..f3e2ec878b8c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -68,6 +68,8 @@
68#include <asm/mwait.h> 68#include <asm/mwait.h>
69#include <asm/apic.h> 69#include <asm/apic.h>
70#include <asm/io_apic.h> 70#include <asm/io_apic.h>
71#include <asm/i387.h>
72#include <asm/fpu-internal.h>
71#include <asm/setup.h> 73#include <asm/setup.h>
72#include <asm/uv/uv.h> 74#include <asm/uv/uv.h>
73#include <linux/mc146818rtc.h> 75#include <linux/mc146818rtc.h>
@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
818 820
819 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 821 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
820 822
823 /* the FPU context is blank, nobody can own it */
824 __cpu_disable_lazy_restore(cpu);
825
821 err = do_boot_cpu(apicid, cpu, tidle); 826 err = do_boot_cpu(apicid, cpu, tidle);
822 if (err) { 827 if (err) {
823 pr_debug("do_boot_cpu failed %d\n", err); 828 pr_debug("do_boot_cpu failed %d\n", err);