aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/boot/header.S3
-rw-r--r--arch/x86/include/asm/Kbuild3
-rw-r--r--arch/x86/include/asm/fpu-internal.h15
-rw-r--r--arch/x86/include/asm/ptrace.h15
-rw-r--r--arch/x86/include/asm/xen/hypercall.h21
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c31
-rw-r--r--arch/x86/kernel/entry_64.S14
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/microcode_amd.c8
-rw-r--r--arch/x86/kernel/ptrace.c37
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kvm/cpuid.h3
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/ce4100.c13
-rw-r--r--arch/x86/platform/ce4100/ce4100.c24
-rw-r--r--arch/x86/xen/mmu.c21
23 files changed, 192 insertions, 68 deletions
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c760e073963e..e87b0cac14b5 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -12,6 +12,8 @@
12#include <asm/setup.h> 12#include <asm/setup.h>
13#include <asm/desc.h> 13#include <asm/desc.h>
14 14
15#undef memcpy /* Use memcpy from misc.c */
16
15#include "eboot.h" 17#include "eboot.h"
16 18
17static efi_system_table_t *sys_table; 19static efi_system_table_t *sys_table;
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 2a017441b8b2..8c132a625b94 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -476,6 +476,3 @@ die:
476setup_corrupt: 476setup_corrupt:
477 .byte 7 477 .byte 7
478 .string "No setup signature found...\n" 478 .string "No setup signature found...\n"
479
480 .data
481dummy: .long 0
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 66e5f0ef0523..79fd8a3418f9 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -12,6 +12,7 @@ header-y += mce.h
12header-y += msr-index.h 12header-y += msr-index.h
13header-y += msr.h 13header-y += msr.h
14header-y += mtrr.h 14header-y += mtrr.h
15header-y += perf_regs.h
15header-y += posix_types_32.h 16header-y += posix_types_32.h
16header-y += posix_types_64.h 17header-y += posix_types_64.h
17header-y += posix_types_x32.h 18header-y += posix_types_x32.h
@@ -19,8 +20,10 @@ header-y += prctl.h
19header-y += processor-flags.h 20header-y += processor-flags.h
20header-y += ptrace-abi.h 21header-y += ptrace-abi.h
21header-y += sigcontext32.h 22header-y += sigcontext32.h
23header-y += svm.h
22header-y += ucontext.h 24header-y += ucontext.h
23header-y += vm86.h 25header-y += vm86.h
26header-y += vmx.h
24header-y += vsyscall.h 27header-y += vsyscall.h
25 28
26genhdr-y += unistd_32.h 29genhdr-y += unistd_32.h
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 831dbb9c6c02..41ab26ea6564 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk)
399typedef struct { int preload; } fpu_switch_t; 399typedef struct { int preload; } fpu_switch_t;
400 400
401/* 401/*
402 * FIXME! We could do a totally lazy restore, but we need to 402 * Must be run with preemption disabled: this clears the fpu_owner_task,
403 * add a per-cpu "this was the task that last touched the FPU 403 * on this CPU.
404 * on this CPU" variable, and the task needs to have a "I last
405 * touched the FPU on this CPU" and check them.
406 * 404 *
407 * We don't do that yet, so "fpu_lazy_restore()" always returns 405 * This will disable any lazy FPU state restore of the current FPU state,
408 * false, but some day.. 406 * but if the current thread owns the FPU, it will still be saved by.
409 */ 407 */
408static inline void __cpu_disable_lazy_restore(unsigned int cpu)
409{
410 per_cpu(fpu_owner_task, cpu) = NULL;
411}
412
410static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) 413static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
411{ 414{
412 return new == this_cpu_read_stable(fpu_owner_task) && 415 return new == this_cpu_read_stable(fpu_owner_task) &&
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index dcfde52979c3..19f16ebaf4fa 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
205} 205}
206#endif 206#endif
207 207
208/*
209 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
210 * when it traps. The previous stack will be directly underneath the saved
211 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
212 *
213 * This is valid only for kernel mode traps.
214 */
215static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
216{
217#ifdef CONFIG_X86_32 208#ifdef CONFIG_X86_32
218 return (unsigned long)(&regs->sp); 209extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
219#else 210#else
211static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
212{
220 return regs->sp; 213 return regs->sp;
221#endif
222} 214}
215#endif
223 216
224#define GET_IP(regs) ((regs)->ip) 217#define GET_IP(regs) ((regs)->ip)
225#define GET_FP(regs) ((regs)->bp) 218#define GET_FP(regs) ((regs)->bp)
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 59c226d120cd..c20d1ce62dc6 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -359,18 +359,14 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
359 return _hypercall4(int, update_va_mapping, va, 359 return _hypercall4(int, update_va_mapping, va,
360 new_val.pte, new_val.pte >> 32, flags); 360 new_val.pte, new_val.pte >> 32, flags);
361} 361}
362extern int __must_check xen_event_channel_op_compat(int, void *);
362 363
363static inline int 364static inline int
364HYPERVISOR_event_channel_op(int cmd, void *arg) 365HYPERVISOR_event_channel_op(int cmd, void *arg)
365{ 366{
366 int rc = _hypercall2(int, event_channel_op, cmd, arg); 367 int rc = _hypercall2(int, event_channel_op, cmd, arg);
367 if (unlikely(rc == -ENOSYS)) { 368 if (unlikely(rc == -ENOSYS))
368 struct evtchn_op op; 369 rc = xen_event_channel_op_compat(cmd, arg);
369 op.cmd = cmd;
370 memcpy(&op.u, arg, sizeof(op.u));
371 rc = _hypercall1(int, event_channel_op_compat, &op);
372 memcpy(arg, &op.u, sizeof(op.u));
373 }
374 return rc; 370 return rc;
375} 371}
376 372
@@ -386,17 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
386 return _hypercall3(int, console_io, cmd, count, str); 382 return _hypercall3(int, console_io, cmd, count, str);
387} 383}
388 384
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
386
389static inline int 387static inline int
390HYPERVISOR_physdev_op(int cmd, void *arg) 388HYPERVISOR_physdev_op(int cmd, void *arg)
391{ 389{
392 int rc = _hypercall2(int, physdev_op, cmd, arg); 390 int rc = _hypercall2(int, physdev_op, cmd, arg);
393 if (unlikely(rc == -ENOSYS)) { 391 if (unlikely(rc == -ENOSYS))
394 struct physdev_op op; 392 rc = HYPERVISOR_physdev_op_compat(cmd, arg);
395 op.cmd = cmd;
396 memcpy(&op.u, arg, sizeof(op.u));
397 rc = _hypercall1(int, physdev_op_compat, &op);
398 memcpy(arg, &op.u, sizeof(op.u));
399 }
400 return rc; 393 return rc;
401} 394}
402 395
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 66d0fff1ee84..125f344f06a9 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -33,7 +33,6 @@
33#ifndef _ASM_X86_XEN_HYPERVISOR_H 33#ifndef _ASM_X86_XEN_HYPERVISOR_H
34#define _ASM_X86_XEN_HYPERVISOR_H 34#define _ASM_X86_XEN_HYPERVISOR_H
35 35
36/* arch/i386/kernel/setup.c */
37extern struct shared_info *HYPERVISOR_shared_info; 36extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 37extern struct start_info *xen_start_info;
39 38
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f7e98a2c0d12..1b7d1656a042 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
631 } 631 }
632 } 632 }
633 633
634 /*
635 * The way access filter has a performance penalty on some workloads.
636 * Disable it on the affected CPUs.
637 */
638 if ((c->x86 == 0x15) &&
639 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
640 u64 val;
641
642 if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
643 val |= 0x1E;
644 wrmsrl_safe(0xc0011021, val);
645 }
646 }
647
634 cpu_detect_cache_sizes(c); 648 cpu_detect_cache_sizes(c);
635 649
636 /* Multi core CPU? */ 650 /* Multi core CPU? */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 698b6ec12e0f..1ac581f38dfa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * Written by Jacob Shin - AMD, Inc. 7 * Written by Jacob Shin - AMD, Inc.
8 * 8 *
9 * Support: borislav.petkov@amd.com 9 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * 10 *
11 * April 2006 11 * April 2006
12 * - added support for AMD Family 0x10 processors 12 * - added support for AMD Family 0x10 processors
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 5f88abf07e9c..4f9a3cbfc4a3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,34 +285,39 @@ void cmci_clear(void)
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
286} 286}
287 287
288static long cmci_rediscover_work_func(void *arg)
289{
290 int banks;
291
292 /* Recheck banks in case CPUs don't all have the same */
293 if (cmci_supported(&banks))
294 cmci_discover(banks);
295
296 return 0;
297}
298
288/* 299/*
289 * After a CPU went down cycle through all the others and rediscover 300 * After a CPU went down cycle through all the others and rediscover
290 * Must run in process context. 301 * Must run in process context.
291 */ 302 */
292void cmci_rediscover(int dying) 303void cmci_rediscover(int dying)
293{ 304{
294 int banks; 305 int cpu, banks;
295 int cpu;
296 cpumask_var_t old;
297 306
298 if (!cmci_supported(&banks)) 307 if (!cmci_supported(&banks))
299 return; 308 return;
300 if (!alloc_cpumask_var(&old, GFP_KERNEL))
301 return;
302 cpumask_copy(old, &current->cpus_allowed);
303 309
304 for_each_online_cpu(cpu) { 310 for_each_online_cpu(cpu) {
305 if (cpu == dying) 311 if (cpu == dying)
306 continue; 312 continue;
307 if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) 313
314 if (cpu == smp_processor_id()) {
315 cmci_rediscover_work_func(NULL);
308 continue; 316 continue;
309 /* Recheck banks in case CPUs don't all have the same */ 317 }
310 if (cmci_supported(&banks))
311 cmci_discover(banks);
312 }
313 318
314 set_cpus_allowed_ptr(current, old); 319 work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
315 free_cpumask_var(old); 320 }
316} 321}
317 322
318/* 323/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b51b2c7ee51f..1328fe49a3f1 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -995,8 +995,8 @@ END(interrupt)
995 */ 995 */
996 .p2align CONFIG_X86_L1_CACHE_SHIFT 996 .p2align CONFIG_X86_L1_CACHE_SHIFT
997common_interrupt: 997common_interrupt:
998 ASM_CLAC
999 XCPT_FRAME 998 XCPT_FRAME
999 ASM_CLAC
1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1001 interrupt do_IRQ 1001 interrupt do_IRQ
1002 /* 0(%rsp): old_rsp-ARGOFFSET */ 1002 /* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
1135 */ 1135 */
1136.macro apicinterrupt num sym do_sym 1136.macro apicinterrupt num sym do_sym
1137ENTRY(\sym) 1137ENTRY(\sym)
1138 ASM_CLAC
1139 INTR_FRAME 1138 INTR_FRAME
1139 ASM_CLAC
1140 pushq_cfi $~(\num) 1140 pushq_cfi $~(\num)
1141.Lcommon_\sym: 1141.Lcommon_\sym:
1142 interrupt \do_sym 1142 interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
1190 */ 1190 */
1191.macro zeroentry sym do_sym 1191.macro zeroentry sym do_sym
1192ENTRY(\sym) 1192ENTRY(\sym)
1193 ASM_CLAC
1194 INTR_FRAME 1193 INTR_FRAME
1194 ASM_CLAC
1195 PARAVIRT_ADJUST_EXCEPTION_FRAME 1195 PARAVIRT_ADJUST_EXCEPTION_FRAME
1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1197 subq $ORIG_RAX-R15, %rsp 1197 subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
1208 1208
1209.macro paranoidzeroentry sym do_sym 1209.macro paranoidzeroentry sym do_sym
1210ENTRY(\sym) 1210ENTRY(\sym)
1211 ASM_CLAC
1212 INTR_FRAME 1211 INTR_FRAME
1212 ASM_CLAC
1213 PARAVIRT_ADJUST_EXCEPTION_FRAME 1213 PARAVIRT_ADJUST_EXCEPTION_FRAME
1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1215 subq $ORIG_RAX-R15, %rsp 1215 subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
1227#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) 1227#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1228.macro paranoidzeroentry_ist sym do_sym ist 1228.macro paranoidzeroentry_ist sym do_sym ist
1229ENTRY(\sym) 1229ENTRY(\sym)
1230 ASM_CLAC
1231 INTR_FRAME 1230 INTR_FRAME
1231 ASM_CLAC
1232 PARAVIRT_ADJUST_EXCEPTION_FRAME 1232 PARAVIRT_ADJUST_EXCEPTION_FRAME
1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1234 subq $ORIG_RAX-R15, %rsp 1234 subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
1247 1247
1248.macro errorentry sym do_sym 1248.macro errorentry sym do_sym
1249ENTRY(\sym) 1249ENTRY(\sym)
1250 ASM_CLAC
1251 XCPT_FRAME 1250 XCPT_FRAME
1251 ASM_CLAC
1252 PARAVIRT_ADJUST_EXCEPTION_FRAME 1252 PARAVIRT_ADJUST_EXCEPTION_FRAME
1253 subq $ORIG_RAX-R15, %rsp 1253 subq $ORIG_RAX-R15, %rsp
1254 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1254 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
1266 /* error code is on the stack already */ 1266 /* error code is on the stack already */
1267.macro paranoiderrorentry sym do_sym 1267.macro paranoiderrorentry sym do_sym
1268ENTRY(\sym) 1268ENTRY(\sym)
1269 ASM_CLAC
1270 XCPT_FRAME 1269 XCPT_FRAME
1270 ASM_CLAC
1271 PARAVIRT_ADJUST_EXCEPTION_FRAME 1271 PARAVIRT_ADJUST_EXCEPTION_FRAME
1272 subq $ORIG_RAX-R15, %rsp 1272 subq $ORIG_RAX-R15, %rsp
1273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 957a47aec64e..4dac2f68ed4a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,8 +292,8 @@ default_entry:
292 * be using the global pages. 292 * be using the global pages.
293 * 293 *
294 * NOTE! If we are on a 486 we may have no cr4 at all! 294 * NOTE! If we are on a 486 we may have no cr4 at all!
295 * Specifically, cr4 exists if and only if CPUID exists, 295 * Specifically, cr4 exists if and only if CPUID exists
296 * which in turn exists if and only if EFLAGS.ID exists. 296 * and has flags other than the FPU flag set.
297 */ 297 */
298 movl $X86_EFLAGS_ID,%ecx 298 movl $X86_EFLAGS_ID,%ecx
299 pushl %ecx 299 pushl %ecx
@@ -308,6 +308,11 @@ default_entry:
308 testl %ecx,%eax 308 testl %ecx,%eax
309 jz 6f # No ID flag = no CPUID = no CR4 309 jz 6f # No ID flag = no CPUID = no CR4
310 310
311 movl $1,%eax
312 cpuid
313 andl $~1,%edx # Ignore CPUID.FPU
314 jz 6f # No flags or only CPUID.FPU = no CR4
315
311 movl pa(mmu_cr4_features),%eax 316 movl pa(mmu_cr4_features),%eax
312 movl %eax,%cr4 317 movl %eax,%cr4
313 318
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7720ff5a9ee2..efdec7cd8e01 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -8,8 +8,8 @@
8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 8 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
9 * 9 *
10 * Maintainers: 10 * Maintainers:
11 * Andreas Herrmann <andreas.herrmann3@amd.com> 11 * Andreas Herrmann <herrmann.der.user@googlemail.com>
12 * Borislav Petkov <borislav.petkov@amd.com> 12 * Borislav Petkov <bp@alien8.de>
13 * 13 *
14 * This driver allows to upgrade microcode on F10h AMD 14 * This driver allows to upgrade microcode on F10h AMD
15 * CPUs and later. 15 * CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
190#define F1XH_MPB_MAX_SIZE 2048 190#define F1XH_MPB_MAX_SIZE 2048
191#define F14H_MPB_MAX_SIZE 1824 191#define F14H_MPB_MAX_SIZE 1824
192#define F15H_MPB_MAX_SIZE 4096 192#define F15H_MPB_MAX_SIZE 4096
193#define F16H_MPB_MAX_SIZE 3458
193 194
194 switch (c->x86) { 195 switch (c->x86) {
195 case 0x14: 196 case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
198 case 0x15: 199 case 0x15:
199 max_size = F15H_MPB_MAX_SIZE; 200 max_size = F15H_MPB_MAX_SIZE;
200 break; 201 break;
202 case 0x16:
203 max_size = F16H_MPB_MAX_SIZE;
204 break;
201 default: 205 default:
202 max_size = F1XH_MPB_MAX_SIZE; 206 max_size = F1XH_MPB_MAX_SIZE;
203 break; 207 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b00b33a18390..974b67e46dd0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/rcupdate.h> 24#include <linux/rcupdate.h>
25#include <linux/module.h>
25 26
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
166 167
167#define FLAG_MASK FLAG_MASK_32 168#define FLAG_MASK FLAG_MASK_32
168 169
170/*
171 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
172 * when it traps. The previous stack will be directly underneath the saved
173 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
174 *
175 * Now, if the stack is empty, '&regs->sp' is out of range. In this
176 * case we try to take the previous stack. To always return a non-null
177 * stack pointer we fall back to regs as stack if no previous stack
178 * exists.
179 *
180 * This is valid only for kernel mode traps.
181 */
182unsigned long kernel_stack_pointer(struct pt_regs *regs)
183{
184 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
185 unsigned long sp = (unsigned long)&regs->sp;
186 struct thread_info *tinfo;
187
188 if (context == (sp & ~(THREAD_SIZE - 1)))
189 return sp;
190
191 tinfo = (struct thread_info *)context;
192 if (tinfo->previous_esp)
193 return tinfo->previous_esp;
194
195 return (unsigned long)regs;
196}
197EXPORT_SYMBOL_GPL(kernel_stack_pointer);
198
169static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 199static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
170{ 200{
171 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 201 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
@@ -1511,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs)
1511{ 1541{
1512 bool step; 1542 bool step;
1513 1543
1544 /*
1545 * We may come here right after calling schedule_user()
1546 * or do_notify_resume(), in which case we can be in RCU
1547 * user mode.
1548 */
1549 rcu_user_exit();
1550
1514 audit_syscall_exit(regs); 1551 audit_syscall_exit(regs);
1515 1552
1516 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1553 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c80a33bc528b..f3e2ec878b8c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -68,6 +68,8 @@
68#include <asm/mwait.h> 68#include <asm/mwait.h>
69#include <asm/apic.h> 69#include <asm/apic.h>
70#include <asm/io_apic.h> 70#include <asm/io_apic.h>
71#include <asm/i387.h>
72#include <asm/fpu-internal.h>
71#include <asm/setup.h> 73#include <asm/setup.h>
72#include <asm/uv/uv.h> 74#include <asm/uv/uv.h>
73#include <linux/mc146818rtc.h> 75#include <linux/mc146818rtc.h>
@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
818 820
819 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 821 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
820 822
823 /* the FPU context is blank, nobody can own it */
824 __cpu_disable_lazy_restore(cpu);
825
821 err = do_boot_cpu(apicid, cpu, tidle); 826 err = do_boot_cpu(apicid, cpu, tidle);
822 if (err) { 827 if (err) {
823 pr_debug("do_boot_cpu failed %d\n", err); 828 pr_debug("do_boot_cpu failed %d\n", err);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index a10e46016851..58fc51488828 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
24{ 24{
25 struct kvm_cpuid_entry2 *best; 25 struct kvm_cpuid_entry2 *best;
26 26
27 if (!static_cpu_has(X86_FEATURE_XSAVE))
28 return 0;
29
27 best = kvm_find_cpuid_entry(vcpu, 1, 0); 30 best = kvm_find_cpuid_entry(vcpu, 1, 0);
28 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 31 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
29} 32}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 39171cb307ea..bba39bfa1c4b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
426 _ASM_EXTABLE(1b, 3b) \ 426 _ASM_EXTABLE(1b, 3b) \
427 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \ 427 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
428 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \ 428 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
429 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \ 429 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
430 "a" (*rax), "d" (*rdx)); \
431 } while (0) 430 } while (0)
432 431
433/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ 432/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ad6b1dd06f8b..f85815945fc6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6549 } 6549 }
6550 } 6550 }
6551 6551
6552 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6553 /* Exposing INVPCID only when PCID is exposed */ 6552 /* Exposing INVPCID only when PCID is exposed */
6554 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); 6553 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
6555 if (vmx_invpcid_supported() && 6554 if (vmx_invpcid_supported() &&
6556 best && (best->ebx & bit(X86_FEATURE_INVPCID)) && 6555 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
6557 guest_cpuid_has_pcid(vcpu)) { 6556 guest_cpuid_has_pcid(vcpu)) {
6557 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6558 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; 6558 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
6559 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6559 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6560 exec_control); 6560 exec_control);
6561 } else { 6561 } else {
6562 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; 6562 if (cpu_has_secondary_exec_ctrls()) {
6563 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6563 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6564 exec_control); 6564 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
6565 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6566 exec_control);
6567 }
6565 if (best) 6568 if (best)
6566 best->ebx &= ~bit(X86_FEATURE_INVPCID); 6569 best->ebx &= ~bit(X86_FEATURE_INVPCID);
6567 } 6570 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 224a7e78cb6c..4f7641756be2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5781 int pending_vec, max_bits, idx; 5781 int pending_vec, max_bits, idx;
5782 struct desc_ptr dt; 5782 struct desc_ptr dt;
5783 5783
5784 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
5785 return -EINVAL;
5786
5784 dt.size = sregs->idt.limit; 5787 dt.size = sregs->idt.limit;
5785 dt.address = sregs->idt.base; 5788 dt.address = sregs->idt.base;
5786 kvm_x86_ops->set_idt(vcpu, &dt); 5789 kvm_x86_ops->set_idt(vcpu, &dt);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 0777f042e400..60f926cd8b0e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
197 } 197 }
198 198
199 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 199 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
200 || vmflag == VM_HUGETLB) { 200 || vmflag & VM_HUGETLB) {
201 local_flush_tlb(); 201 local_flush_tlb();
202 goto flush_all; 202 goto flush_all;
203 } 203 }
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 41bd2a2d2c50..b914e20b5a00 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
115 reg_read(reg, value); 115 reg_read(reg, value);
116} 116}
117 117
118static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
119{
120 unsigned long flags;
121
122 raw_spin_lock_irqsave(&pci_config_lock, flags);
123 /* force interrupt pin value to 0 */
124 *value = reg->sim_reg.value & 0xfff00ff;
125 raw_spin_unlock_irqrestore(&pci_config_lock, flags);
126}
127
118static struct sim_dev_reg bus1_fixups[] = { 128static struct sim_dev_reg bus1_fixups[] = {
119 DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) 129 DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
120 DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) 130 DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
@@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = {
144 DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) 154 DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
145 DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) 155 DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
146 DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) 156 DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
157 DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
147 DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) 158 DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
148 DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) 159 DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
149 DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) 160 DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
@@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = {
161 DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) 172 DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
162 DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) 173 DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
163 DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) 174 DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
175 DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
164 DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) 176 DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
165 DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) 177 DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
178 DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
166}; 179};
167 180
168static void __init init_sim_regs(void) 181static void __init init_sim_regs(void)
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 4c61b52191eb..92525cb8e54c 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -21,12 +21,25 @@
21#include <asm/i8259.h> 21#include <asm/i8259.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/io_apic.h> 23#include <asm/io_apic.h>
24#include <asm/emergency-restart.h>
24 25
25static int ce4100_i8042_detect(void) 26static int ce4100_i8042_detect(void)
26{ 27{
27 return 0; 28 return 0;
28} 29}
29 30
31/*
32 * The CE4100 platform has an internal 8051 Microcontroller which is
33 * responsible for signaling to the external Power Management Unit the
34 * intention to reset, reboot or power off the system. This 8051 device has
35 * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
36 * to power off the system.
37 */
38static void ce4100_power_off(void)
39{
40 outb(0x4, 0xcf9);
41}
42
30#ifdef CONFIG_SERIAL_8250 43#ifdef CONFIG_SERIAL_8250
31 44
32static unsigned int mem_serial_in(struct uart_port *p, int offset) 45static unsigned int mem_serial_in(struct uart_port *p, int offset)
@@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void)
139 x86_init.mpparse.find_smp_config = x86_init_noop; 152 x86_init.mpparse.find_smp_config = x86_init_noop;
140 x86_init.pci.init = ce4100_pci_init; 153 x86_init.pci.init = ce4100_pci_init;
141 154
155 /*
156 * By default, the reboot method is ACPI which is supported by the
157 * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
158 * the bootloader will however issue a system power off instead of
159 * reboot. By using BOOT_KBD we ensure proper system reboot as
160 * expected.
161 */
162 reboot_type = BOOT_KBD;
163
142#ifdef CONFIG_X86_IO_APIC 164#ifdef CONFIG_X86_IO_APIC
143 x86_init.pci.init_irq = sdv_pci_init; 165 x86_init.pci.init_irq = sdv_pci_init;
144 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; 166 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
145#endif 167#endif
168
169 pm_power_off = ce4100_power_off;
146} 170}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6226c99729b9..dcf5f2dd91ec 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1288,6 +1288,25 @@ unsigned long xen_read_cr2_direct(void)
1288 return this_cpu_read(xen_vcpu_info.arch.cr2); 1288 return this_cpu_read(xen_vcpu_info.arch.cr2);
1289} 1289}
1290 1290
1291void xen_flush_tlb_all(void)
1292{
1293 struct mmuext_op *op;
1294 struct multicall_space mcs;
1295
1296 trace_xen_mmu_flush_tlb_all(0);
1297
1298 preempt_disable();
1299
1300 mcs = xen_mc_entry(sizeof(*op));
1301
1302 op = mcs.args;
1303 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1304 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1305
1306 xen_mc_issue(PARAVIRT_LAZY_MMU);
1307
1308 preempt_enable();
1309}
1291static void xen_flush_tlb(void) 1310static void xen_flush_tlb(void)
1292{ 1311{
1293 struct mmuext_op *op; 1312 struct mmuext_op *op;
@@ -2518,7 +2537,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2518 err = 0; 2537 err = 0;
2519out: 2538out:
2520 2539
2521 flush_tlb_all(); 2540 xen_flush_tlb_all();
2522 2541
2523 return err; 2542 return err;
2524} 2543}