aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-06-28 18:26:17 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-06-28 18:26:17 -0400
commit9f84b6267ccde1bebe3f9cd40a91716b5ece5e20 (patch)
treeb51dcf9fb1b7205ed8134ad1169e73719897163d /arch/x86
parent719038de98bc8479b771c582a1e4a1e86079da22 (diff)
parent5f8c4218148822fde6eebbeefc34bd0a6061e031 (diff)
Merge remote-tracking branch 'origin/x86/fpu' into queue/x86/cpu
Use the union of 3.10 x86/cpu and x86/fpu as baseline. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Kconfig.debug10
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S2
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S2
-rw-r--r--arch/x86/include/asm/cpufeature.h118
-rw-r--r--arch/x86/include/asm/fpu-internal.h4
-rw-r--r--arch/x86/include/asm/inst.h74
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c21
-rw-r--r--arch/x86/kernel/cpu/common.c19
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S21
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/i387.c73
-rw-r--r--arch/x86/kernel/microcode_intel_early.c5
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/xsave.c5
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/pci/mrst.c10
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/smp.c10
-rw-r--r--arch/x86/xen/smp.h1
28 files changed, 318 insertions, 108 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a154a91c7e7..685692c94f05 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,7 +108,6 @@ config X86
108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
109 select GENERIC_TIME_VSYSCALL if X86_64 109 select GENERIC_TIME_VSYSCALL if X86_64
110 select KTIME_SCALAR if X86_32 110 select KTIME_SCALAR if X86_32
111 select ALWAYS_USE_PERSISTENT_CLOCK
112 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
113 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
114 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index b6a770132b67..c963881de0d0 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -303,4 +303,14 @@ config DEBUG_NMI_SELFTEST
303 303
304 If unsure, say N. 304 If unsure, say N.
305 305
306config X86_DEBUG_STATIC_CPU_HAS
307 bool "Debug alternatives"
308 depends on DEBUG_KERNEL
309 ---help---
310 This option causes additional code to be generated which
311 fails if static_cpu_has() is used before alternatives have
312 run.
313
314 If unsure, say N.
315
306endmenu 316endmenu
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 94c27df8a549..f247304299a2 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -240,7 +240,7 @@ fold_64:
240 pand %xmm3, %xmm1 240 pand %xmm3, %xmm1
241 PCLMULQDQ 0x00, CONSTANT, %xmm1 241 PCLMULQDQ 0x00, CONSTANT, %xmm1
242 pxor %xmm2, %xmm1 242 pxor %xmm2, %xmm1
243 pextrd $0x01, %xmm1, %eax 243 PEXTRD 0x01, %xmm1, %eax
244 244
245 ret 245 ret
246ENDPROC(crc32_pclmul_le_16) 246ENDPROC(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 56610c4bf31b..642f15687a0a 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -118,7 +118,7 @@ y2 = %r15d
118 118
119_INP_END_SIZE = 8 119_INP_END_SIZE = 8
120_INP_SIZE = 8 120_INP_SIZE = 8
121_XFER_SIZE = 8 121_XFER_SIZE = 16
122_XMM_SAVE_SIZE = 0 122_XMM_SAVE_SIZE = 0
123 123
124_INP_END = 0 124_INP_END = 0
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index 98d3c391da81..f833b74d902b 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -111,7 +111,7 @@ y2 = %r15d
111 111
112_INP_END_SIZE = 8 112_INP_END_SIZE = 8
113_INP_SIZE = 8 113_INP_SIZE = 8
114_XFER_SIZE = 8 114_XFER_SIZE = 16
115_XMM_SAVE_SIZE = 0 115_XMM_SAVE_SIZE = 0
116 116
117_INP_END = 0 117_INP_END = 0
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e99ac27f95b2..47538a61c91b 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -92,7 +92,7 @@
92#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ 92#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
93#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ 93#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
94#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 94#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
95 /* 21 available, was AMD_C1E */ 95#define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */
96#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 96#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
97#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ 97#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
98#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 98#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
@@ -356,15 +356,36 @@ extern const char * const x86_power_flags[32];
356#endif /* CONFIG_X86_64 */ 356#endif /* CONFIG_X86_64 */
357 357
358#if __GNUC__ >= 4 358#if __GNUC__ >= 4
359extern void warn_pre_alternatives(void);
360extern bool __static_cpu_has_safe(u16 bit);
361
359/* 362/*
360 * Static testing of CPU features. Used the same as boot_cpu_has(). 363 * Static testing of CPU features. Used the same as boot_cpu_has().
361 * These are only valid after alternatives have run, but will statically 364 * These are only valid after alternatives have run, but will statically
362 * patch the target code for additional performance. 365 * patch the target code for additional performance.
363 *
364 */ 366 */
365static __always_inline __pure bool __static_cpu_has(u16 bit) 367static __always_inline __pure bool __static_cpu_has(u16 bit)
366{ 368{
367#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 369#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
370
371#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
372 /*
373 * Catch too early usage of this before alternatives
374 * have run.
375 */
376 asm goto("1: jmp %l[t_warn]\n"
377 "2:\n"
378 ".section .altinstructions,\"a\"\n"
379 " .long 1b - .\n"
380 " .long 0\n" /* no replacement */
381 " .word %P0\n" /* 1: do replace */
382 " .byte 2b - 1b\n" /* source len */
383 " .byte 0\n" /* replacement len */
384 ".previous\n"
385 /* skipping size check since replacement size = 0 */
386 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
387#endif
388
368 asm goto("1: jmp %l[t_no]\n" 389 asm goto("1: jmp %l[t_no]\n"
369 "2:\n" 390 "2:\n"
370 ".section .altinstructions,\"a\"\n" 391 ".section .altinstructions,\"a\"\n"
@@ -379,7 +400,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
379 return true; 400 return true;
380 t_no: 401 t_no:
381 return false; 402 return false;
382#else 403
404#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
405 t_warn:
406 warn_pre_alternatives();
407 return false;
408#endif
409#else /* GCC_VERSION >= 40500 */
383 u8 flag; 410 u8 flag;
384 /* Open-coded due to __stringify() in ALTERNATIVE() */ 411 /* Open-coded due to __stringify() in ALTERNATIVE() */
385 asm volatile("1: movb $0,%0\n" 412 asm volatile("1: movb $0,%0\n"
@@ -411,11 +438,94 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
411 __static_cpu_has(bit) : \ 438 __static_cpu_has(bit) : \
412 boot_cpu_has(bit) \ 439 boot_cpu_has(bit) \
413) 440)
441
442static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
443{
444#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
445/*
446 * We need to spell the jumps to the compiler because, depending on the offset,
447 * the replacement jump can be bigger than the original jump, and this we cannot
448 * have. Thus, we force the jump to the widest, 4-byte, signed relative
449 * offset even though the last would often fit in less bytes.
450 */
451 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
452 "2:\n"
453 ".section .altinstructions,\"a\"\n"
454 " .long 1b - .\n" /* src offset */
455 " .long 3f - .\n" /* repl offset */
456 " .word %P1\n" /* always replace */
457 " .byte 2b - 1b\n" /* src len */
458 " .byte 4f - 3f\n" /* repl len */
459 ".previous\n"
460 ".section .altinstr_replacement,\"ax\"\n"
461 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
462 "4:\n"
463 ".previous\n"
464 ".section .altinstructions,\"a\"\n"
465 " .long 1b - .\n" /* src offset */
466 " .long 0\n" /* no replacement */
467 " .word %P0\n" /* feature bit */
468 " .byte 2b - 1b\n" /* src len */
469 " .byte 0\n" /* repl len */
470 ".previous\n"
471 : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
472 : : t_dynamic, t_no);
473 return true;
474 t_no:
475 return false;
476 t_dynamic:
477 return __static_cpu_has_safe(bit);
478#else /* GCC_VERSION >= 40500 */
479 u8 flag;
480 /* Open-coded due to __stringify() in ALTERNATIVE() */
481 asm volatile("1: movb $2,%0\n"
482 "2:\n"
483 ".section .altinstructions,\"a\"\n"
484 " .long 1b - .\n" /* src offset */
485 " .long 3f - .\n" /* repl offset */
486 " .word %P2\n" /* always replace */
487 " .byte 2b - 1b\n" /* source len */
488 " .byte 4f - 3f\n" /* replacement len */
489 ".previous\n"
490 ".section .discard,\"aw\",@progbits\n"
491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
492 ".previous\n"
493 ".section .altinstr_replacement,\"ax\"\n"
494 "3: movb $0,%0\n"
495 "4:\n"
496 ".previous\n"
497 ".section .altinstructions,\"a\"\n"
498 " .long 1b - .\n" /* src offset */
499 " .long 5f - .\n" /* repl offset */
500 " .word %P1\n" /* feature bit */
501 " .byte 4b - 3b\n" /* src len */
502 " .byte 6f - 5f\n" /* repl len */
503 ".previous\n"
504 ".section .discard,\"aw\",@progbits\n"
505 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
506 ".previous\n"
507 ".section .altinstr_replacement,\"ax\"\n"
508 "5: movb $1,%0\n"
509 "6:\n"
510 ".previous\n"
511 : "=qm" (flag)
512 : "i" (bit), "i" (X86_FEATURE_ALWAYS));
513 return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
514#endif
515}
516
517#define static_cpu_has_safe(bit) \
518( \
519 __builtin_constant_p(boot_cpu_has(bit)) ? \
520 boot_cpu_has(bit) : \
521 _static_cpu_has_safe(bit) \
522)
414#else 523#else
415/* 524/*
416 * gcc 3.x is too stupid to do the static test; fall back to dynamic. 525 * gcc 3.x is too stupid to do the static test; fall back to dynamic.
417 */ 526 */
418#define static_cpu_has(bit) boot_cpu_has(bit) 527#define static_cpu_has(bit) boot_cpu_has(bit)
528#define static_cpu_has_safe(bit) boot_cpu_has(bit)
419#endif 529#endif
420 530
421#define cpu_has_bug(c, bit) cpu_has(c, (bit)) 531#define cpu_has_bug(c, bit) cpu_has(c, (bit))
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e25cc33ec54d..4d0bda7b11e3 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -62,10 +62,8 @@ extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
62#define xstateregs_active fpregs_active 62#define xstateregs_active fpregs_active
63 63
64#ifdef CONFIG_MATH_EMULATION 64#ifdef CONFIG_MATH_EMULATION
65# define HAVE_HWFP (boot_cpu_data.hard_math)
66extern void finit_soft_fpu(struct i387_soft_struct *soft); 65extern void finit_soft_fpu(struct i387_soft_struct *soft);
67#else 66#else
68# define HAVE_HWFP 1
69static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} 67static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
70#endif 68#endif
71 69
@@ -345,7 +343,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)
345 343
346static inline void __thread_fpu_begin(struct task_struct *tsk) 344static inline void __thread_fpu_begin(struct task_struct *tsk)
347{ 345{
348 if (!use_eager_fpu()) 346 if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
349 clts(); 347 clts();
350 __thread_set_has_fpu(tsk); 348 __thread_set_has_fpu(tsk);
351} 349}
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 280bf7fb6aba..3e115273ed88 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -9,12 +9,68 @@
9 9
10#define REG_NUM_INVALID 100 10#define REG_NUM_INVALID 100
11 11
12#define REG_TYPE_R64 0 12#define REG_TYPE_R32 0
13#define REG_TYPE_XMM 1 13#define REG_TYPE_R64 1
14#define REG_TYPE_XMM 2
14#define REG_TYPE_INVALID 100 15#define REG_TYPE_INVALID 100
15 16
17 .macro R32_NUM opd r32
18 \opd = REG_NUM_INVALID
19 .ifc \r32,%eax
20 \opd = 0
21 .endif
22 .ifc \r32,%ecx
23 \opd = 1
24 .endif
25 .ifc \r32,%edx
26 \opd = 2
27 .endif
28 .ifc \r32,%ebx
29 \opd = 3
30 .endif
31 .ifc \r32,%esp
32 \opd = 4
33 .endif
34 .ifc \r32,%ebp
35 \opd = 5
36 .endif
37 .ifc \r32,%esi
38 \opd = 6
39 .endif
40 .ifc \r32,%edi
41 \opd = 7
42 .endif
43#ifdef CONFIG_X86_64
44 .ifc \r32,%r8d
45 \opd = 8
46 .endif
47 .ifc \r32,%r9d
48 \opd = 9
49 .endif
50 .ifc \r32,%r10d
51 \opd = 10
52 .endif
53 .ifc \r32,%r11d
54 \opd = 11
55 .endif
56 .ifc \r32,%r12d
57 \opd = 12
58 .endif
59 .ifc \r32,%r13d
60 \opd = 13
61 .endif
62 .ifc \r32,%r14d
63 \opd = 14
64 .endif
65 .ifc \r32,%r15d
66 \opd = 15
67 .endif
68#endif
69 .endm
70
16 .macro R64_NUM opd r64 71 .macro R64_NUM opd r64
17 \opd = REG_NUM_INVALID 72 \opd = REG_NUM_INVALID
73#ifdef CONFIG_X86_64
18 .ifc \r64,%rax 74 .ifc \r64,%rax
19 \opd = 0 75 \opd = 0
20 .endif 76 .endif
@@ -63,6 +119,7 @@
63 .ifc \r64,%r15 119 .ifc \r64,%r15
64 \opd = 15 120 \opd = 15
65 .endif 121 .endif
122#endif
66 .endm 123 .endm
67 124
68 .macro XMM_NUM opd xmm 125 .macro XMM_NUM opd xmm
@@ -118,10 +175,13 @@
118 .endm 175 .endm
119 176
120 .macro REG_TYPE type reg 177 .macro REG_TYPE type reg
178 R32_NUM reg_type_r32 \reg
121 R64_NUM reg_type_r64 \reg 179 R64_NUM reg_type_r64 \reg
122 XMM_NUM reg_type_xmm \reg 180 XMM_NUM reg_type_xmm \reg
123 .if reg_type_r64 <> REG_NUM_INVALID 181 .if reg_type_r64 <> REG_NUM_INVALID
124 \type = REG_TYPE_R64 182 \type = REG_TYPE_R64
183 .elseif reg_type_r32 <> REG_NUM_INVALID
184 \type = REG_TYPE_R32
125 .elseif reg_type_xmm <> REG_NUM_INVALID 185 .elseif reg_type_xmm <> REG_NUM_INVALID
126 \type = REG_TYPE_XMM 186 \type = REG_TYPE_XMM
127 .else 187 .else
@@ -162,6 +222,16 @@
162 .byte \imm8 222 .byte \imm8
163 .endm 223 .endm
164 224
225 .macro PEXTRD imm8 xmm gpr
226 R32_NUM extrd_opd1 \gpr
227 XMM_NUM extrd_opd2 \xmm
228 PFX_OPD_SIZE
229 PFX_REX extrd_opd1 extrd_opd2
230 .byte 0x0f, 0x3a, 0x16
231 MODRM 0xc0 extrd_opd1 extrd_opd2
232 .byte \imm8
233 .endm
234
165 .macro AESKEYGENASSIST rcon xmm1 xmm2 235 .macro AESKEYGENASSIST rcon xmm1 xmm2
166 XMM_NUM aeskeygen_opd1 \xmm1 236 XMM_NUM aeskeygen_opd1 \xmm1
167 XMM_NUM aeskeygen_opd2 \xmm2 237 XMM_NUM aeskeygen_opd2 \xmm2
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5b87d52eed0b..29937c4f6ff8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,9 +89,9 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hard_math;
93 char rfu; 92 char rfu;
94 char pad0; 93 char pad0;
94 char pad1;
95#else 95#else
96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
97 int x86_tlbsize; 97 int x86_tlbsize;
@@ -164,6 +164,7 @@ extern const struct seq_operations cpuinfo_op;
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165 165
166extern void cpu_detect(struct cpuinfo_x86 *c); 166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c);
167 168
168extern void early_cpu_init(void); 169extern void early_cpu_init(void);
169extern void identify_boot_cpu(void); 170extern void identify_boot_cpu(void);
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 0ef4bba2acb7..d67c4be3e8b1 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -28,7 +28,6 @@ void foo(void)
28 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); 28 OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
29 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); 29 OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
30 OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); 30 OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
31 OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
32 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); 31 OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
33 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); 32 OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
34 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); 33 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4112be9a4659..03445346ee0a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_387(char *s)
21{
22 boot_cpu_data.hard_math = 0;
23 write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
24 return 1;
25}
26
27__setup("no387", no_387);
28
29static double __initdata x = 4195835.0; 20static double __initdata x = 4195835.0;
30static double __initdata y = 3145727.0; 21static double __initdata y = 3145727.0;
31 22
@@ -44,15 +35,6 @@ static void __init check_fpu(void)
44{ 35{
45 s32 fdiv_bug; 36 s32 fdiv_bug;
46 37
47 if (!boot_cpu_data.hard_math) {
48#ifndef CONFIG_MATH_EMULATION
49 pr_emerg("No coprocessor found and no math emulation present\n");
50 pr_emerg("Giving up\n");
51 for (;;) ;
52#endif
53 return;
54 }
55
56 kernel_fpu_begin(); 38 kernel_fpu_begin();
57 39
58 /* 40 /*
@@ -107,5 +89,6 @@ void __init check_bugs(void)
107 * kernel_fpu_begin/end() in check_fpu() relies on the patched 89 * kernel_fpu_begin/end() in check_fpu() relies on the patched
108 * alternative instructions. 90 * alternative instructions.
109 */ 91 */
110 check_fpu(); 92 if (cpu_has_fpu)
93 check_fpu();
111} 94}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 22018f70a671..a4a07c0acb1f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -711,10 +711,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
711 return; 711 return;
712 712
713 cpu_detect(c); 713 cpu_detect(c);
714
715 get_cpu_vendor(c); 714 get_cpu_vendor(c);
716
717 get_cpu_cap(c); 715 get_cpu_cap(c);
716 fpu_detect(c);
718 717
719 if (this_cpu->c_early_init) 718 if (this_cpu->c_early_init)
720 this_cpu->c_early_init(c); 719 this_cpu->c_early_init(c);
@@ -724,6 +723,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
724 723
725 if (this_cpu->c_bsp_init) 724 if (this_cpu->c_bsp_init)
726 this_cpu->c_bsp_init(c); 725 this_cpu->c_bsp_init(c);
726
727 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
727} 728}
728 729
729void __init early_cpu_init(void) 730void __init early_cpu_init(void)
@@ -1363,3 +1364,17 @@ void __cpuinit cpu_init(void)
1363 fpu_init(); 1364 fpu_init();
1364} 1365}
1365#endif 1366#endif
1367
1368#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
1369void warn_pre_alternatives(void)
1370{
1371 WARN(1, "You're using static_cpu_has before alternatives have run!\n");
1372}
1373EXPORT_SYMBOL_GPL(warn_pre_alternatives);
1374#endif
1375
1376inline bool __static_cpu_has_safe(u16 bit)
1377{
1378 return boot_cpu_has(bit);
1379}
1380EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d048d5ca43c1..7582f475b163 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -333,7 +333,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
333 switch (dir0_lsn) { 333 switch (dir0_lsn) {
334 case 0xd: /* either a 486SLC or DLC w/o DEVID */ 334 case 0xd: /* either a 486SLC or DLC w/o DEVID */
335 dir0_msn = 0; 335 dir0_msn = 0;
336 p = Cx486_name[(c->hard_math) ? 1 : 0]; 336 p = Cx486_name[(cpu_has_fpu ? 1 : 0)];
337 break; 337 break;
338 338
339 case 0xe: /* a 486S A step */ 339 case 0xe: /* a 486S A step */
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 37a198bd48c8..aee6317b902f 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -37,8 +37,8 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", 37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", 38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", 39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
40 c->hard_math ? "yes" : "no", 40 static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
41 c->hard_math ? "yes" : "no", 41 static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
42 c->cpuid_level, 42 c->cpuid_level,
43 c->wp_works_ok ? "yes" : "no"); 43 c->wp_works_ok ? "yes" : "no");
44} 44}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index dab95a85f7f8..55b67614ed94 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -34,7 +34,7 @@
34extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 34extern pgd_t early_level4_pgt[PTRS_PER_PGD];
35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
36static unsigned int __initdata next_early_pgt = 2; 36static unsigned int __initdata next_early_pgt = 2;
37pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 37pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
38 38
39/* Wipe all early page tables except for the kernel symbol map */ 39/* Wipe all early page tables except for the kernel symbol map */
40static void __init reset_early_page_tables(void) 40static void __init reset_early_page_tables(void)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 73afd11799ca..e65ddc62e113 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -444,7 +444,6 @@ is486:
444 orl %ecx,%eax 444 orl %ecx,%eax
445 movl %eax,%cr0 445 movl %eax,%cr0
446 446
447 call check_x87
448 lgdt early_gdt_descr 447 lgdt early_gdt_descr
449 lidt idt_descr 448 lidt idt_descr
450 ljmp $(__KERNEL_CS),$1f 449 ljmp $(__KERNEL_CS),$1f
@@ -467,26 +466,6 @@ is486:
467 pushl $0 # fake return address for unwinder 466 pushl $0 # fake return address for unwinder
468 jmp *(initial_code) 467 jmp *(initial_code)
469 468
470/*
471 * We depend on ET to be correct. This checks for 287/387.
472 */
473check_x87:
474 movb $0,X86_HARD_MATH
475 clts
476 fninit
477 fstsw %ax
478 cmpb $0,%al
479 je 1f
480 movl %cr0,%eax /* no coprocessor: have to set bits */
481 xorl $4,%eax /* set EM */
482 movl %eax,%cr0
483 ret
484 ALIGN
4851: movb $1,X86_HARD_MATH
486 .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */
487 ret
488
489
490#include "verify_cpu.S" 469#include "verify_cpu.S"
491 470
492/* 471/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 08f7e8039099..321d65ebaffe 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -115,8 +115,10 @@ startup_64:
115 movq %rdi, %rax 115 movq %rdi, %rax
116 shrq $PUD_SHIFT, %rax 116 shrq $PUD_SHIFT, %rax
117 andl $(PTRS_PER_PUD-1), %eax 117 andl $(PTRS_PER_PUD-1), %eax
118 movq %rdx, (4096+0)(%rbx,%rax,8) 118 movq %rdx, 4096(%rbx,%rax,8)
119 movq %rdx, (4096+8)(%rbx,%rax,8) 119 incl %eax
120 andl $(PTRS_PER_PUD-1), %eax
121 movq %rdx, 4096(%rbx,%rax,8)
120 122
121 addq $8192, %rbx 123 addq $8192, %rbx
122 movq %rdi, %rax 124 movq %rdi, %rax
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 245a71db401a..b627746f6b1a 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -22,23 +22,19 @@
22/* 22/*
23 * Were we in an interrupt that interrupted kernel mode? 23 * Were we in an interrupt that interrupted kernel mode?
24 * 24 *
25 * For now, with eagerfpu we will return interrupted kernel FPU
26 * state as not-idle. TBD: Ideally we can change the return value
27 * to something like __thread_has_fpu(current). But we need to
28 * be careful of doing __thread_clear_has_fpu() before saving
29 * the FPU etc for supporting nested uses etc. For now, take
30 * the simple route!
31 *
32 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that 25 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
33 * pair does nothing at all: the thread must not have fpu (so 26 * pair does nothing at all: the thread must not have fpu (so
34 * that we don't try to save the FPU state), and TS must 27 * that we don't try to save the FPU state), and TS must
35 * be set (so that the clts/stts pair does nothing that is 28 * be set (so that the clts/stts pair does nothing that is
36 * visible in the interrupted kernel thread). 29 * visible in the interrupted kernel thread).
30 *
31 * Except for the eagerfpu case when we return 1 unless we've already
32 * been eager and saved the state in kernel_fpu_begin().
37 */ 33 */
38static inline bool interrupted_kernel_fpu_idle(void) 34static inline bool interrupted_kernel_fpu_idle(void)
39{ 35{
40 if (use_eager_fpu()) 36 if (use_eager_fpu())
41 return 0; 37 return __thread_has_fpu(current);
42 38
43 return !__thread_has_fpu(current) && 39 return !__thread_has_fpu(current) &&
44 (read_cr0() & X86_CR0_TS); 40 (read_cr0() & X86_CR0_TS);
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void)
78 struct task_struct *me = current; 74 struct task_struct *me = current;
79 75
80 if (__thread_has_fpu(me)) { 76 if (__thread_has_fpu(me)) {
81 __save_init_fpu(me);
82 __thread_clear_has_fpu(me); 77 __thread_clear_has_fpu(me);
78 __save_init_fpu(me);
83 /* We do 'stts()' in __kernel_fpu_end() */ 79 /* We do 'stts()' in __kernel_fpu_end() */
84 } else if (!use_eager_fpu()) { 80 } else if (!use_eager_fpu()) {
85 this_cpu_write(fpu_owner_task, NULL); 81 this_cpu_write(fpu_owner_task, NULL);
@@ -135,7 +131,7 @@ static void __cpuinit init_thread_xstate(void)
135 * xsave_init(). 131 * xsave_init().
136 */ 132 */
137 133
138 if (!HAVE_HWFP) { 134 if (!cpu_has_fpu) {
139 /* 135 /*
140 * Disable xsave as we do not support it if i387 136 * Disable xsave as we do not support it if i387
141 * emulation is enabled. 137 * emulation is enabled.
@@ -162,6 +158,14 @@ void __cpuinit fpu_init(void)
162 unsigned long cr0; 158 unsigned long cr0;
163 unsigned long cr4_mask = 0; 159 unsigned long cr4_mask = 0;
164 160
161#ifndef CONFIG_MATH_EMULATION
162 if (!cpu_has_fpu) {
163 pr_emerg("No FPU found and no math emulation present\n");
164 pr_emerg("Giving up\n");
165 for (;;)
166 asm volatile("hlt");
167 }
168#endif
165 if (cpu_has_fxsr) 169 if (cpu_has_fxsr)
166 cr4_mask |= X86_CR4_OSFXSR; 170 cr4_mask |= X86_CR4_OSFXSR;
167 if (cpu_has_xmm) 171 if (cpu_has_xmm)
@@ -171,7 +175,7 @@ void __cpuinit fpu_init(void)
171 175
172 cr0 = read_cr0(); 176 cr0 = read_cr0();
173 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ 177 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
174 if (!HAVE_HWFP) 178 if (!cpu_has_fpu)
175 cr0 |= X86_CR0_EM; 179 cr0 |= X86_CR0_EM;
176 write_cr0(cr0); 180 write_cr0(cr0);
177 181
@@ -189,7 +193,7 @@ void __cpuinit fpu_init(void)
189 193
190void fpu_finit(struct fpu *fpu) 194void fpu_finit(struct fpu *fpu)
191{ 195{
192 if (!HAVE_HWFP) { 196 if (!cpu_has_fpu) {
193 finit_soft_fpu(&fpu->state->soft); 197 finit_soft_fpu(&fpu->state->soft);
194 return; 198 return;
195 } 199 }
@@ -218,7 +222,7 @@ int init_fpu(struct task_struct *tsk)
218 int ret; 222 int ret;
219 223
220 if (tsk_used_math(tsk)) { 224 if (tsk_used_math(tsk)) {
221 if (HAVE_HWFP && tsk == current) 225 if (cpu_has_fpu && tsk == current)
222 unlazy_fpu(tsk); 226 unlazy_fpu(tsk);
223 tsk->thread.fpu.last_cpu = ~0; 227 tsk->thread.fpu.last_cpu = ~0;
224 return 0; 228 return 0;
@@ -515,14 +519,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
515 if (ret) 519 if (ret)
516 return ret; 520 return ret;
517 521
518 if (!HAVE_HWFP) 522 if (!static_cpu_has(X86_FEATURE_FPU))
519 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 523 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
520 524
521 if (!cpu_has_fxsr) { 525 if (!cpu_has_fxsr)
522 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 526 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
523 &target->thread.fpu.state->fsave, 0, 527 &target->thread.fpu.state->fsave, 0,
524 -1); 528 -1);
525 }
526 529
527 sanitize_i387_state(target); 530 sanitize_i387_state(target);
528 531
@@ -549,13 +552,13 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
549 552
550 sanitize_i387_state(target); 553 sanitize_i387_state(target);
551 554
552 if (!HAVE_HWFP) 555 if (!static_cpu_has(X86_FEATURE_FPU))
553 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); 556 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
554 557
555 if (!cpu_has_fxsr) { 558 if (!cpu_has_fxsr)
556 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 559 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
557 &target->thread.fpu.state->fsave, 0, -1); 560 &target->thread.fpu.state->fsave, 0,
558 } 561 -1);
559 562
560 if (pos > 0 || count < sizeof(env)) 563 if (pos > 0 || count < sizeof(env))
561 convert_from_fxsr(&env, target); 564 convert_from_fxsr(&env, target);
@@ -596,3 +599,33 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
596EXPORT_SYMBOL(dump_fpu); 599EXPORT_SYMBOL(dump_fpu);
597 600
598#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ 601#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
602
603static int __init no_387(char *s)
604{
605 setup_clear_cpu_cap(X86_FEATURE_FPU);
606 return 1;
607}
608
609__setup("no387", no_387);
610
611void __cpuinit fpu_detect(struct cpuinfo_x86 *c)
612{
613 unsigned long cr0;
614 u16 fsw, fcw;
615
616 fsw = fcw = 0xffff;
617
618 cr0 = read_cr0();
619 cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
620 write_cr0(cr0);
621
622 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
623 : "+m" (fsw), "+m" (fcw));
624
625 if (fsw == 0 && (fcw & 0x103f) == 0x003f)
626 set_cpu_cap(c, X86_FEATURE_FPU);
627 else
628 clear_cpu_cap(c, X86_FEATURE_FPU);
629
630 /* The final cr0 value is set in fpu_init() */
631}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index d893e8ed8ac9..2e9e12871c2b 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
487#endif 487#endif
488 488
489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
490static DEFINE_MUTEX(x86_cpu_microcode_mutex);
490/* 491/*
491 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is 492 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
492 * hot added or resumes. 493 * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
507 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in 508 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
508 * hotplug. 509 * hotplug.
509 */ 510 */
510 cpu_hotplug_driver_lock(); 511 mutex_lock(&x86_cpu_microcode_mutex);
511 512
512 mc_saved_count_init = mc_saved_data.mc_saved_count; 513 mc_saved_count_init = mc_saved_data.mc_saved_count;
513 mc_saved_count = mc_saved_data.mc_saved_count; 514 mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
544 } 545 }
545 546
546out: 547out:
547 cpu_hotplug_driver_unlock(); 548 mutex_unlock(&x86_cpu_microcode_mutex);
548 549
549 return ret; 550 return ret;
550} 551}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 607af0d4d5ef..4e7a37ff03ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
312{ 312{
313 if (cpuidle_idle_call()) 313 if (cpuidle_idle_call())
314 x86_idle(); 314 x86_idle();
315 else
316 local_irq_enable();
315} 317}
316 318
317/* 319/*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
368 */ 370 */
369static void amd_e400_idle(void) 371static void amd_e400_idle(void)
370{ 372{
371 if (need_resched())
372 return;
373
374 if (!amd_e400_c1e_detected) { 373 if (!amd_e400_c1e_detected) {
375 u32 lo, hi; 374 u32 lo, hi;
376 375
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index ada87a329edc..d6c28acdf99c 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -243,7 +243,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
243 if (!access_ok(VERIFY_WRITE, buf, size)) 243 if (!access_ok(VERIFY_WRITE, buf, size))
244 return -EACCES; 244 return -EACCES;
245 245
246 if (!HAVE_HWFP) 246 if (!static_cpu_has(X86_FEATURE_FPU))
247 return fpregs_soft_get(current, NULL, 0, 247 return fpregs_soft_get(current, NULL, 0,
248 sizeof(struct user_i387_ia32_struct), NULL, 248 sizeof(struct user_i387_ia32_struct), NULL,
249 (struct _fpstate_ia32 __user *) buf) ? -1 : 1; 249 (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
@@ -350,11 +350,10 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
350 if (!used_math() && init_fpu(tsk)) 350 if (!used_math() && init_fpu(tsk))
351 return -1; 351 return -1;
352 352
353 if (!HAVE_HWFP) { 353 if (!static_cpu_has(X86_FEATURE_FPU))
354 return fpregs_soft_set(current, NULL, 354 return fpregs_soft_set(current, NULL,
355 0, sizeof(struct user_i387_ia32_struct), 355 0, sizeof(struct user_i387_ia32_struct),
356 NULL, buf) != 0; 356 NULL, buf) != 0;
357 }
358 357
359 if (use_xsave()) { 358 if (use_xsave()) {
360 struct _fpx_sw_bytes fx_sw_user; 359 struct _fpx_sw_bytes fx_sw_user;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7114c63f047d..d482bcaf61c1 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1410,7 +1410,7 @@ __init void lguest_init(void)
1410 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1410 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1411 1411
1412 /* Math is always hard! */ 1412 /* Math is always hard! */
1413 new_cpu_data.hard_math = 1; 1413 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1414 1414
1415 /* We don't have features. We have puppies! Puppies! */ 1415 /* We don't have features. We have puppies! Puppies! */
1416#ifdef CONFIG_X86_MCE 1416#ifdef CONFIG_X86_MCE
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fdc5dca14fb3..eaac1743def7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
359} 359}
360 360
361/* 361/*
362 * would have hole in the middle or ends, and only ram parts will be mapped. 362 * We need to iterate through the E820 memory map and create direct mappings
363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
364 * create direct mappings for all pfns from [0 to max_low_pfn) and
365 * [4GB to max_pfn) because of possible memory holes in high addresses
366 * that cannot be marked as UC by fixed/variable range MTRRs.
367 * Depending on the alignment of E820 ranges, this may possibly result
368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
369 *
370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
371 * That range would have hole in the middle or ends, and only ram parts
372 * will be mapped in init_range_memory_mapping().
363 */ 373 */
364static unsigned long __init init_range_memory_mapping( 374static unsigned long __init init_range_memory_mapping(
365 unsigned long r_start, 375 unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
419 max_pfn_mapped = 0; /* will get exact value next */ 429 max_pfn_mapped = 0; /* will get exact value next */
420 min_pfn_mapped = real_end >> PAGE_SHIFT; 430 min_pfn_mapped = real_end >> PAGE_SHIFT;
421 last_start = start = real_end; 431 last_start = start = real_end;
432
433 /*
434 * We start from the top (end of memory) and go to the bottom.
435 * The memblock_find_in_range() gets us a block of RAM from the
436 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
437 * for page table.
438 */
422 while (last_start > ISA_END_ADDRESS) { 439 while (last_start > ISA_END_ADDRESS) {
423 if (last_start > step_size) { 440 if (last_start > step_size) {
424 start = round_down(last_start - 1, step_size); 441 start = round_down(last_start - 1, step_size);
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 0e0fabf17342..6eb18c42a28a 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
141 */ 141 */
142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) 142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
143{ 143{
144 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
145 || devfn == PCI_DEVFN(0, 0)
146 || devfn == PCI_DEVFN(3, 0)))
147 return 1;
148
149 /* This is a workaround for A0 LNC bug where PCI status register does 144 /* This is a workaround for A0 LNC bug where PCI status register does
150 * not have new CAP bit set. can not be written by SW either. 145 * not have new CAP bit set. can not be written by SW either.
151 * 146 *
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
155 */ 150 */
156 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) 151 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
157 return 0; 152 return 0;
158 153 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
154 || devfn == PCI_DEVFN(0, 0)
155 || devfn == PCI_DEVFN(3, 0)))
156 return 1;
159 return 0; /* langwell on others */ 157 return 0; /* langwell on others */
160} 158}
161 159
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 55856b2310d3..82089d8b1954 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -206,7 +206,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
206 } 206 }
207 207
208 if (boot_used_size && !finished) { 208 if (boot_used_size && !finished) {
209 unsigned long size; 209 unsigned long size = 0;
210 u32 attr; 210 u32 attr;
211 efi_status_t s; 211 efi_status_t s;
212 void *tmp; 212 void *tmp;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a492be2635ac..2fa02bc50034 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1557,7 +1557,7 @@ asmlinkage void __init xen_start_kernel(void)
1557#ifdef CONFIG_X86_32 1557#ifdef CONFIG_X86_32
1558 /* set up basic CPUID stuff */ 1558 /* set up basic CPUID stuff */
1559 cpu_detect(&new_cpu_data); 1559 cpu_detect(&new_cpu_data);
1560 new_cpu_data.hard_math = 1; 1560 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1561 new_cpu_data.wp_works_ok = 1; 1561 new_cpu_data.wp_works_ok = 1;
1562 new_cpu_data.x86_capability[0] = cpuid_edx(1); 1562 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1563#endif 1563#endif
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8ff37995d54e..fb44426fe931 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -576,24 +576,22 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
576{ 576{
577 unsigned cpu; 577 unsigned cpu;
578 unsigned int this_cpu = smp_processor_id(); 578 unsigned int this_cpu = smp_processor_id();
579 int xen_vector = xen_map_vector(vector);
579 580
580 if (!(num_online_cpus() > 1)) 581 if (!(num_online_cpus() > 1) || (xen_vector < 0))
581 return; 582 return;
582 583
583 for_each_cpu_and(cpu, mask, cpu_online_mask) { 584 for_each_cpu_and(cpu, mask, cpu_online_mask) {
584 if (this_cpu == cpu) 585 if (this_cpu == cpu)
585 continue; 586 continue;
586 587
587 xen_smp_send_call_function_single_ipi(cpu); 588 xen_send_IPI_one(cpu, xen_vector);
588 } 589 }
589} 590}
590 591
591void xen_send_IPI_allbutself(int vector) 592void xen_send_IPI_allbutself(int vector)
592{ 593{
593 int xen_vector = xen_map_vector(vector); 594 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
594
595 if (xen_vector >= 0)
596 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
597} 595}
598 596
599static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 597static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 8981a76d081a..c7c2d89efd76 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -5,7 +5,6 @@ extern void xen_send_IPI_mask(const struct cpumask *mask,
5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, 5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
6 int vector); 6 int vector);
7extern void xen_send_IPI_allbutself(int vector); 7extern void xen_send_IPI_allbutself(int vector);
8extern void physflat_send_IPI_allbutself(int vector);
9extern void xen_send_IPI_all(int vector); 8extern void xen_send_IPI_all(int vector);
10extern void xen_send_IPI_self(int vector); 9extern void xen_send_IPI_self(int vector);
11 10