diff options
Diffstat (limited to 'arch')
94 files changed, 2981 insertions, 1065 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ed92864d1325..6b8fc9003ecc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -29,6 +29,7 @@ config X86 | |||
29 | select HAVE_FTRACE | 29 | select HAVE_FTRACE |
30 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) | 30 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
31 | select HAVE_ARCH_KGDB if !X86_VOYAGER | 31 | select HAVE_ARCH_KGDB if !X86_VOYAGER |
32 | select HAVE_ARCH_TRACEHOOK | ||
32 | select HAVE_GENERIC_DMA_COHERENT if X86_32 | 33 | select HAVE_GENERIC_DMA_COHERENT if X86_32 |
33 | select HAVE_EFFICIENT_UNALIGNED_ACCESS | 34 | select HAVE_EFFICIENT_UNALIGNED_ACCESS |
34 | 35 | ||
@@ -1643,6 +1644,14 @@ config DMAR_FLOPPY_WA | |||
1643 | workaround will setup a 1:1 mapping for the first | 1644 | workaround will setup a 1:1 mapping for the first |
1644 | 16M to make floppy (an ISA device) work. | 1645 | 16M to make floppy (an ISA device) work. |
1645 | 1646 | ||
1647 | config INTR_REMAP | ||
1648 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | ||
1649 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | ||
1650 | help | ||
1651 | Supports Interrupt remapping for IO-APIC and MSI devices. | ||
1652 | To use x2apic mode in the CPU's which support x2APIC enhancements or | ||
1653 | to support platforms with CPU's having > 8 bit APIC ID, say Y. | ||
1654 | |||
1646 | source "drivers/pci/pcie/Kconfig" | 1655 | source "drivers/pci/pcie/Kconfig" |
1647 | 1656 | ||
1648 | source "drivers/pci/Kconfig" | 1657 | source "drivers/pci/Kconfig" |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index b225219c448c..1b03fe6919ae 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -418,3 +418,73 @@ config X86_MINIMUM_CPU_FAMILY | |||
418 | config X86_DEBUGCTLMSR | 418 | config X86_DEBUGCTLMSR |
419 | def_bool y | 419 | def_bool y |
420 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) | 420 | depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) |
421 | |||
422 | menuconfig PROCESSOR_SELECT | ||
423 | default y | ||
424 | bool "Supported processor vendors" if EMBEDDED | ||
425 | help | ||
426 | This lets you choose what x86 vendor support code your kernel | ||
427 | will include. | ||
428 | |||
429 | config CPU_SUP_INTEL_32 | ||
430 | default y | ||
431 | bool "Support Intel processors" if PROCESSOR_SELECT | ||
432 | depends on !64BIT | ||
433 | help | ||
434 | This enables extended support for Intel processors | ||
435 | |||
436 | config CPU_SUP_INTEL_64 | ||
437 | default y | ||
438 | bool "Support Intel processors" if PROCESSOR_SELECT | ||
439 | depends on 64BIT | ||
440 | help | ||
441 | This enables extended support for Intel processors | ||
442 | |||
443 | config CPU_SUP_CYRIX_32 | ||
444 | default y | ||
445 | bool "Support Cyrix processors" if PROCESSOR_SELECT | ||
446 | depends on !64BIT | ||
447 | help | ||
448 | This enables extended support for Cyrix processors | ||
449 | |||
450 | config CPU_SUP_AMD_32 | ||
451 | default y | ||
452 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
453 | depends on !64BIT | ||
454 | help | ||
455 | This enables extended support for AMD processors | ||
456 | |||
457 | config CPU_SUP_AMD_64 | ||
458 | default y | ||
459 | bool "Support AMD processors" if PROCESSOR_SELECT | ||
460 | depends on 64BIT | ||
461 | help | ||
462 | This enables extended support for AMD processors | ||
463 | |||
464 | config CPU_SUP_CENTAUR_32 | ||
465 | default y | ||
466 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
467 | depends on !64BIT | ||
468 | help | ||
469 | This enables extended support for Centaur processors | ||
470 | |||
471 | config CPU_SUP_CENTAUR_64 | ||
472 | default y | ||
473 | bool "Support Centaur processors" if PROCESSOR_SELECT | ||
474 | depends on 64BIT | ||
475 | help | ||
476 | This enables extended support for Centaur processors | ||
477 | |||
478 | config CPU_SUP_TRANSMETA_32 | ||
479 | default y | ||
480 | bool "Support Transmeta processors" if PROCESSOR_SELECT | ||
481 | depends on !64BIT | ||
482 | help | ||
483 | This enables extended support for Transmeta processors | ||
484 | |||
485 | config CPU_SUP_UMC_32 | ||
486 | default y | ||
487 | bool "Support UMC processors" if PROCESSOR_SELECT | ||
488 | depends on !64BIT | ||
489 | help | ||
490 | This enables extended support for UMC processors | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index ba7736cf2ec7..29c5fbf08392 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -137,14 +137,15 @@ relocated: | |||
137 | */ | 137 | */ |
138 | movl output_len(%ebx), %eax | 138 | movl output_len(%ebx), %eax |
139 | pushl %eax | 139 | pushl %eax |
140 | # push arguments for decompress_kernel: | ||
140 | pushl %ebp # output address | 141 | pushl %ebp # output address |
141 | movl input_len(%ebx), %eax | 142 | movl input_len(%ebx), %eax |
142 | pushl %eax # input_len | 143 | pushl %eax # input_len |
143 | leal input_data(%ebx), %eax | 144 | leal input_data(%ebx), %eax |
144 | pushl %eax # input_data | 145 | pushl %eax # input_data |
145 | leal boot_heap(%ebx), %eax | 146 | leal boot_heap(%ebx), %eax |
146 | pushl %eax # heap area as third argument | 147 | pushl %eax # heap area |
147 | pushl %esi # real mode pointer as second arg | 148 | pushl %esi # real mode pointer |
148 | call decompress_kernel | 149 | call decompress_kernel |
149 | addl $20, %esp | 150 | addl $20, %esp |
150 | popl %ecx | 151 | popl %ecx |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 9fea73706479..5780d361105b 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | #undef CONFIG_PARAVIRT | 17 | #undef CONFIG_PARAVIRT |
18 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
19 | #define _ASM_DESC_H_ 1 | 19 | #define ASM_X86__DESC_H 1 |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_X86_64 | 22 | #ifdef CONFIG_X86_64 |
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <linux/screen_info.h> | 28 | #include <linux/screen_info.h> |
29 | #include <linux/elf.h> | 29 | #include <linux/elf.h> |
30 | #include <asm/io.h> | 30 | #include <linux/io.h> |
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <asm/boot.h> | 32 | #include <asm/boot.h> |
33 | #include <asm/bootparam.h> | 33 | #include <asm/bootparam.h> |
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s) | |||
251 | y--; | 251 | y--; |
252 | } | 252 | } |
253 | } else { | 253 | } else { |
254 | vidmem [(x + cols * y) * 2] = c; | 254 | vidmem[(x + cols * y) * 2] = c; |
255 | if (++x >= cols) { | 255 | if (++x >= cols) { |
256 | x = 0; | 256 | x = 0; |
257 | if (++y >= lines) { | 257 | if (++y >= lines) { |
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n) | |||
277 | int i; | 277 | int i; |
278 | char *ss = s; | 278 | char *ss = s; |
279 | 279 | ||
280 | for (i = 0; i < n; i++) ss[i] = c; | 280 | for (i = 0; i < n; i++) |
281 | ss[i] = c; | ||
281 | return s; | 282 | return s; |
282 | } | 283 | } |
283 | 284 | ||
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n) | |||
287 | const char *s = src; | 288 | const char *s = src; |
288 | char *d = dest; | 289 | char *d = dest; |
289 | 290 | ||
290 | for (i = 0; i < n; i++) d[i] = s[i]; | 291 | for (i = 0; i < n; i++) |
292 | d[i] = s[i]; | ||
291 | return dest; | 293 | return dest; |
292 | } | 294 | } |
293 | 295 | ||
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c index bbe76953bae9..4589caa3e9d1 100644 --- a/arch/x86/boot/mkcpustr.c +++ b/arch/x86/boot/mkcpustr.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <stdio.h> | 16 | #include <stdio.h> |
17 | 17 | ||
18 | #include "../kernel/cpu/feature_names.c" | 18 | #include "../kernel/cpu/capflags.c" |
19 | 19 | ||
20 | #if NCAPFLAGS > 8 | 20 | #if NCAPFLAGS > 8 |
21 | # error "Need to adjust the boot code handling of CPUID strings" | 21 | # error "Need to adjust the boot code handling of CPUID strings" |
diff --git a/arch/x86/mach-es7000/Makefile b/arch/x86/es7000/Makefile index 3ef8b43b62fc..3ef8b43b62fc 100644 --- a/arch/x86/mach-es7000/Makefile +++ b/arch/x86/es7000/Makefile | |||
diff --git a/arch/x86/mach-es7000/es7000.h b/arch/x86/es7000/es7000.h index c8d5aa132fa0..4e62f6fa95b8 100644 --- a/arch/x86/mach-es7000/es7000.h +++ b/arch/x86/es7000/es7000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Written by: Garry Forsgren, Unisys Corporation | 2 | * Written by: Garry Forsgren, Unisys Corporation |
3 | * Natalie Protasevich, Unisys Corporation | 3 | * Natalie Protasevich, Unisys Corporation |
4 | * This file contains the code to configure and interface | 4 | * This file contains the code to configure and interface |
5 | * with Unisys ES7000 series hardware system manager. | 5 | * with Unisys ES7000 series hardware system manager. |
6 | * | 6 | * |
7 | * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. | 7 | * Copyright (c) 2003 Unisys Corporation. All Rights Reserved. |
@@ -18,7 +18,7 @@ | |||
18 | * with this program; if not, write the Free Software Foundation, Inc., 59 | 18 | * with this program; if not, write the Free Software Foundation, Inc., 59 |
19 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | 19 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. |
20 | * | 20 | * |
21 | * Contact information: Unisys Corporation, Township Line & Union Meeting | 21 | * Contact information: Unisys Corporation, Township Line & Union Meeting |
22 | * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: | 22 | * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or: |
23 | * | 23 | * |
24 | * http://www.unisys.com | 24 | * http://www.unisys.com |
@@ -41,7 +41,7 @@ | |||
41 | #define MIP_VALID 0x0100000000000000ULL | 41 | #define MIP_VALID 0x0100000000000000ULL |
42 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) | 42 | #define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff) |
43 | 43 | ||
44 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) | 44 | #define MIP_RD_LO(VALUE) (VALUE & 0xffffffff) |
45 | 45 | ||
46 | struct mip_reg_info { | 46 | struct mip_reg_info { |
47 | unsigned long long mip_info; | 47 | unsigned long long mip_info; |
@@ -51,11 +51,11 @@ struct mip_reg_info { | |||
51 | }; | 51 | }; |
52 | 52 | ||
53 | struct part_info { | 53 | struct part_info { |
54 | unsigned char type; | 54 | unsigned char type; |
55 | unsigned char length; | 55 | unsigned char length; |
56 | unsigned char part_id; | 56 | unsigned char part_id; |
57 | unsigned char apic_mode; | 57 | unsigned char apic_mode; |
58 | unsigned long snum; | 58 | unsigned long snum; |
59 | char ptype[16]; | 59 | char ptype[16]; |
60 | char sname[64]; | 60 | char sname[64]; |
61 | char pname[64]; | 61 | char pname[64]; |
@@ -68,11 +68,11 @@ struct psai { | |||
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct es7000_mem_info { | 70 | struct es7000_mem_info { |
71 | unsigned char type; | 71 | unsigned char type; |
72 | unsigned char length; | 72 | unsigned char length; |
73 | unsigned char resv[6]; | 73 | unsigned char resv[6]; |
74 | unsigned long long start; | 74 | unsigned long long start; |
75 | unsigned long long size; | 75 | unsigned long long size; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | struct es7000_oem_table { | 78 | struct es7000_oem_table { |
@@ -106,7 +106,7 @@ struct mip_reg { | |||
106 | }; | 106 | }; |
107 | 107 | ||
108 | #define MIP_SW_APIC 0x1020b | 108 | #define MIP_SW_APIC 0x1020b |
109 | #define MIP_FUNC(VALUE) (VALUE & 0xff) | 109 | #define MIP_FUNC(VALUE) (VALUE & 0xff) |
110 | 110 | ||
111 | extern int parse_unisys_oem (char *oemptr); | 111 | extern int parse_unisys_oem (char *oemptr); |
112 | extern void setup_unisys(void); | 112 | extern void setup_unisys(void); |
diff --git a/arch/x86/mach-es7000/es7000plat.c b/arch/x86/es7000/es7000plat.c index 50189af14b85..7789fde13c3f 100644 --- a/arch/x86/mach-es7000/es7000plat.c +++ b/arch/x86/es7000/es7000plat.c | |||
@@ -72,7 +72,7 @@ es7000_rename_gsi(int ioapic, int gsi) | |||
72 | base += nr_ioapic_registers[i]; | 72 | base += nr_ioapic_registers[i]; |
73 | } | 73 | } |
74 | 74 | ||
75 | if (!ioapic && (gsi < 16)) | 75 | if (!ioapic && (gsi < 16)) |
76 | gsi += base; | 76 | gsi += base; |
77 | return gsi; | 77 | return gsi; |
78 | } | 78 | } |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index a0e1dbe67dc1..127ec3f07214 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | |||
85 | dump->regs.ax = regs->ax; | 85 | dump->regs.ax = regs->ax; |
86 | dump->regs.ds = current->thread.ds; | 86 | dump->regs.ds = current->thread.ds; |
87 | dump->regs.es = current->thread.es; | 87 | dump->regs.es = current->thread.es; |
88 | asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; | 88 | savesegment(fs, fs); |
89 | asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; | 89 | dump->regs.fs = fs; |
90 | savesegment(gs, gs); | ||
91 | dump->regs.gs = gs; | ||
90 | dump->regs.orig_ax = regs->orig_ax; | 92 | dump->regs.orig_ax = regs->orig_ax; |
91 | dump->regs.ip = regs->ip; | 93 | dump->regs.ip = regs->ip; |
92 | dump->regs.cs = regs->cs; | 94 | dump->regs.cs = regs->cs; |
@@ -430,8 +432,9 @@ beyond_if: | |||
430 | current->mm->start_stack = | 432 | current->mm->start_stack = |
431 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); | 433 | (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); |
432 | /* start thread */ | 434 | /* start thread */ |
433 | asm volatile("movl %0,%%fs" :: "r" (0)); \ | 435 | loadsegment(fs, 0); |
434 | asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); | 436 | loadsegment(ds, __USER32_DS); |
437 | loadsegment(es, __USER32_DS); | ||
435 | load_gs_index(0); | 438 | load_gs_index(0); |
436 | (regs)->ip = ex.a_entry; | 439 | (regs)->ip = ex.a_entry; |
437 | (regs)->sp = current->mm->start_stack; | 440 | (regs)->sp = current->mm->start_stack; |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 20af4c79579a..e47bed2440ee 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -179,9 +179,10 @@ struct sigframe | |||
179 | u32 pretcode; | 179 | u32 pretcode; |
180 | int sig; | 180 | int sig; |
181 | struct sigcontext_ia32 sc; | 181 | struct sigcontext_ia32 sc; |
182 | struct _fpstate_ia32 fpstate; | 182 | struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */ |
183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | 183 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; |
184 | char retcode[8]; | 184 | char retcode[8]; |
185 | /* fp state follows here */ | ||
185 | }; | 186 | }; |
186 | 187 | ||
187 | struct rt_sigframe | 188 | struct rt_sigframe |
@@ -192,8 +193,8 @@ struct rt_sigframe | |||
192 | u32 puc; | 193 | u32 puc; |
193 | compat_siginfo_t info; | 194 | compat_siginfo_t info; |
194 | struct ucontext_ia32 uc; | 195 | struct ucontext_ia32 uc; |
195 | struct _fpstate_ia32 fpstate; | ||
196 | char retcode[8]; | 196 | char retcode[8]; |
197 | /* fp state follows here */ | ||
197 | }; | 198 | }; |
198 | 199 | ||
199 | #define COPY(x) { \ | 200 | #define COPY(x) { \ |
@@ -206,7 +207,7 @@ struct rt_sigframe | |||
206 | { unsigned int cur; \ | 207 | { unsigned int cur; \ |
207 | unsigned short pre; \ | 208 | unsigned short pre; \ |
208 | err |= __get_user(pre, &sc->seg); \ | 209 | err |= __get_user(pre, &sc->seg); \ |
209 | asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ | 210 | savesegment(seg, cur); \ |
210 | pre |= mask; \ | 211 | pre |= mask; \ |
211 | if (pre != cur) loadsegment(seg, pre); } | 212 | if (pre != cur) loadsegment(seg, pre); } |
212 | 213 | ||
@@ -215,7 +216,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
215 | unsigned int *peax) | 216 | unsigned int *peax) |
216 | { | 217 | { |
217 | unsigned int tmpflags, gs, oldgs, err = 0; | 218 | unsigned int tmpflags, gs, oldgs, err = 0; |
218 | struct _fpstate_ia32 __user *buf; | 219 | void __user *buf; |
219 | u32 tmp; | 220 | u32 tmp; |
220 | 221 | ||
221 | /* Always make any pending restarted system calls return -EINTR */ | 222 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -235,7 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
235 | */ | 236 | */ |
236 | err |= __get_user(gs, &sc->gs); | 237 | err |= __get_user(gs, &sc->gs); |
237 | gs |= 3; | 238 | gs |= 3; |
238 | asm("movl %%gs,%0" : "=r" (oldgs)); | 239 | savesegment(gs, oldgs); |
239 | if (gs != oldgs) | 240 | if (gs != oldgs) |
240 | load_gs_index(gs); | 241 | load_gs_index(gs); |
241 | 242 | ||
@@ -259,26 +260,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, | |||
259 | 260 | ||
260 | err |= __get_user(tmp, &sc->fpstate); | 261 | err |= __get_user(tmp, &sc->fpstate); |
261 | buf = compat_ptr(tmp); | 262 | buf = compat_ptr(tmp); |
262 | if (buf) { | 263 | err |= restore_i387_xstate_ia32(buf); |
263 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
264 | goto badframe; | ||
265 | err |= restore_i387_ia32(buf); | ||
266 | } else { | ||
267 | struct task_struct *me = current; | ||
268 | |||
269 | if (used_math()) { | ||
270 | clear_fpu(me); | ||
271 | clear_used_math(); | ||
272 | } | ||
273 | } | ||
274 | 264 | ||
275 | err |= __get_user(tmp, &sc->ax); | 265 | err |= __get_user(tmp, &sc->ax); |
276 | *peax = tmp; | 266 | *peax = tmp; |
277 | 267 | ||
278 | return err; | 268 | return err; |
279 | |||
280 | badframe: | ||
281 | return 1; | ||
282 | } | 269 | } |
283 | 270 | ||
284 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) | 271 | asmlinkage long sys32_sigreturn(struct pt_regs *regs) |
@@ -350,19 +337,18 @@ badframe: | |||
350 | */ | 337 | */ |
351 | 338 | ||
352 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | 339 | static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, |
353 | struct _fpstate_ia32 __user *fpstate, | 340 | void __user *fpstate, |
354 | struct pt_regs *regs, unsigned int mask) | 341 | struct pt_regs *regs, unsigned int mask) |
355 | { | 342 | { |
356 | int tmp, err = 0; | 343 | int tmp, err = 0; |
357 | 344 | ||
358 | tmp = 0; | 345 | savesegment(gs, tmp); |
359 | __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp)); | ||
360 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); | 346 | err |= __put_user(tmp, (unsigned int __user *)&sc->gs); |
361 | __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); | 347 | savesegment(fs, tmp); |
362 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); | 348 | err |= __put_user(tmp, (unsigned int __user *)&sc->fs); |
363 | __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); | 349 | savesegment(ds, tmp); |
364 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); | 350 | err |= __put_user(tmp, (unsigned int __user *)&sc->ds); |
365 | __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); | 351 | savesegment(es, tmp); |
366 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); | 352 | err |= __put_user(tmp, (unsigned int __user *)&sc->es); |
367 | 353 | ||
368 | err |= __put_user((u32)regs->di, &sc->di); | 354 | err |= __put_user((u32)regs->di, &sc->di); |
@@ -381,7 +367,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
381 | err |= __put_user((u32)regs->flags, &sc->flags); | 367 | err |= __put_user((u32)regs->flags, &sc->flags); |
382 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); | 368 | err |= __put_user((u32)regs->sp, &sc->sp_at_signal); |
383 | 369 | ||
384 | tmp = save_i387_ia32(fpstate); | 370 | tmp = save_i387_xstate_ia32(fpstate); |
385 | if (tmp < 0) | 371 | if (tmp < 0) |
386 | err = -EFAULT; | 372 | err = -EFAULT; |
387 | else { | 373 | else { |
@@ -402,7 +388,8 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, | |||
402 | * Determine which stack to use.. | 388 | * Determine which stack to use.. |
403 | */ | 389 | */ |
404 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 390 | static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
405 | size_t frame_size) | 391 | size_t frame_size, |
392 | void **fpstate) | ||
406 | { | 393 | { |
407 | unsigned long sp; | 394 | unsigned long sp; |
408 | 395 | ||
@@ -421,6 +408,11 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | |||
421 | ka->sa.sa_restorer) | 408 | ka->sa.sa_restorer) |
422 | sp = (unsigned long) ka->sa.sa_restorer; | 409 | sp = (unsigned long) ka->sa.sa_restorer; |
423 | 410 | ||
411 | if (used_math()) { | ||
412 | sp = sp - sig_xstate_ia32_size; | ||
413 | *fpstate = (struct _fpstate_ia32 *) sp; | ||
414 | } | ||
415 | |||
424 | sp -= frame_size; | 416 | sp -= frame_size; |
425 | /* Align the stack pointer according to the i386 ABI, | 417 | /* Align the stack pointer according to the i386 ABI, |
426 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | 418 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ |
@@ -434,6 +426,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
434 | struct sigframe __user *frame; | 426 | struct sigframe __user *frame; |
435 | void __user *restorer; | 427 | void __user *restorer; |
436 | int err = 0; | 428 | int err = 0; |
429 | void __user *fpstate = NULL; | ||
437 | 430 | ||
438 | /* copy_to_user optimizes that into a single 8 byte store */ | 431 | /* copy_to_user optimizes that into a single 8 byte store */ |
439 | static const struct { | 432 | static const struct { |
@@ -448,25 +441,21 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
448 | 0, | 441 | 0, |
449 | }; | 442 | }; |
450 | 443 | ||
451 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 444 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
452 | 445 | ||
453 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 446 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
454 | goto give_sigsegv; | 447 | return -EFAULT; |
455 | 448 | ||
456 | err |= __put_user(sig, &frame->sig); | 449 | if (__put_user(sig, &frame->sig)) |
457 | if (err) | 450 | return -EFAULT; |
458 | goto give_sigsegv; | ||
459 | 451 | ||
460 | err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, | 452 | if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
461 | set->sig[0]); | 453 | return -EFAULT; |
462 | if (err) | ||
463 | goto give_sigsegv; | ||
464 | 454 | ||
465 | if (_COMPAT_NSIG_WORDS > 1) { | 455 | if (_COMPAT_NSIG_WORDS > 1) { |
466 | err |= __copy_to_user(frame->extramask, &set->sig[1], | 456 | if (__copy_to_user(frame->extramask, &set->sig[1], |
467 | sizeof(frame->extramask)); | 457 | sizeof(frame->extramask))) |
468 | if (err) | 458 | return -EFAULT; |
469 | goto give_sigsegv; | ||
470 | } | 459 | } |
471 | 460 | ||
472 | if (ka->sa.sa_flags & SA_RESTORER) { | 461 | if (ka->sa.sa_flags & SA_RESTORER) { |
@@ -487,7 +476,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
487 | */ | 476 | */ |
488 | err |= __copy_to_user(frame->retcode, &code, 8); | 477 | err |= __copy_to_user(frame->retcode, &code, 8); |
489 | if (err) | 478 | if (err) |
490 | goto give_sigsegv; | 479 | return -EFAULT; |
491 | 480 | ||
492 | /* Set up registers for signal handler */ | 481 | /* Set up registers for signal handler */ |
493 | regs->sp = (unsigned long) frame; | 482 | regs->sp = (unsigned long) frame; |
@@ -498,8 +487,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
498 | regs->dx = 0; | 487 | regs->dx = 0; |
499 | regs->cx = 0; | 488 | regs->cx = 0; |
500 | 489 | ||
501 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 490 | loadsegment(ds, __USER32_DS); |
502 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 491 | loadsegment(es, __USER32_DS); |
503 | 492 | ||
504 | regs->cs = __USER32_CS; | 493 | regs->cs = __USER32_CS; |
505 | regs->ss = __USER32_DS; | 494 | regs->ss = __USER32_DS; |
@@ -510,10 +499,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, | |||
510 | #endif | 499 | #endif |
511 | 500 | ||
512 | return 0; | 501 | return 0; |
513 | |||
514 | give_sigsegv: | ||
515 | force_sigsegv(sig, current); | ||
516 | return -EFAULT; | ||
517 | } | 502 | } |
518 | 503 | ||
519 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 504 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
@@ -522,6 +507,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
522 | struct rt_sigframe __user *frame; | 507 | struct rt_sigframe __user *frame; |
523 | void __user *restorer; | 508 | void __user *restorer; |
524 | int err = 0; | 509 | int err = 0; |
510 | void __user *fpstate = NULL; | ||
525 | 511 | ||
526 | /* __copy_to_user optimizes that into a single 8 byte store */ | 512 | /* __copy_to_user optimizes that into a single 8 byte store */ |
527 | static const struct { | 513 | static const struct { |
@@ -537,30 +523,33 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
537 | 0, | 523 | 0, |
538 | }; | 524 | }; |
539 | 525 | ||
540 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 526 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
541 | 527 | ||
542 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 528 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
543 | goto give_sigsegv; | 529 | return -EFAULT; |
544 | 530 | ||
545 | err |= __put_user(sig, &frame->sig); | 531 | err |= __put_user(sig, &frame->sig); |
546 | err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); | 532 | err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); |
547 | err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); | 533 | err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); |
548 | err |= copy_siginfo_to_user32(&frame->info, info); | 534 | err |= copy_siginfo_to_user32(&frame->info, info); |
549 | if (err) | 535 | if (err) |
550 | goto give_sigsegv; | 536 | return -EFAULT; |
551 | 537 | ||
552 | /* Create the ucontext. */ | 538 | /* Create the ucontext. */ |
553 | err |= __put_user(0, &frame->uc.uc_flags); | 539 | if (cpu_has_xsave) |
540 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
541 | else | ||
542 | err |= __put_user(0, &frame->uc.uc_flags); | ||
554 | err |= __put_user(0, &frame->uc.uc_link); | 543 | err |= __put_user(0, &frame->uc.uc_link); |
555 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 544 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
556 | err |= __put_user(sas_ss_flags(regs->sp), | 545 | err |= __put_user(sas_ss_flags(regs->sp), |
557 | &frame->uc.uc_stack.ss_flags); | 546 | &frame->uc.uc_stack.ss_flags); |
558 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 547 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
559 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 548 | err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
560 | regs, set->sig[0]); | 549 | regs, set->sig[0]); |
561 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 550 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
562 | if (err) | 551 | if (err) |
563 | goto give_sigsegv; | 552 | return -EFAULT; |
564 | 553 | ||
565 | if (ka->sa.sa_flags & SA_RESTORER) | 554 | if (ka->sa.sa_flags & SA_RESTORER) |
566 | restorer = ka->sa.sa_restorer; | 555 | restorer = ka->sa.sa_restorer; |
@@ -575,7 +564,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
575 | */ | 564 | */ |
576 | err |= __copy_to_user(frame->retcode, &code, 8); | 565 | err |= __copy_to_user(frame->retcode, &code, 8); |
577 | if (err) | 566 | if (err) |
578 | goto give_sigsegv; | 567 | return -EFAULT; |
579 | 568 | ||
580 | /* Set up registers for signal handler */ | 569 | /* Set up registers for signal handler */ |
581 | regs->sp = (unsigned long) frame; | 570 | regs->sp = (unsigned long) frame; |
@@ -591,8 +580,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
591 | regs->dx = (unsigned long) &frame->info; | 580 | regs->dx = (unsigned long) &frame->info; |
592 | regs->cx = (unsigned long) &frame->uc; | 581 | regs->cx = (unsigned long) &frame->uc; |
593 | 582 | ||
594 | asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); | 583 | loadsegment(ds, __USER32_DS); |
595 | asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); | 584 | loadsegment(es, __USER32_DS); |
596 | 585 | ||
597 | regs->cs = __USER32_CS; | 586 | regs->cs = __USER32_CS; |
598 | regs->ss = __USER32_DS; | 587 | regs->ss = __USER32_DS; |
@@ -603,8 +592,4 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
603 | #endif | 592 | #endif |
604 | 593 | ||
605 | return 0; | 594 | return 0; |
606 | |||
607 | give_sigsegv: | ||
608 | force_sigsegv(sig, current); | ||
609 | return -EFAULT; | ||
610 | } | 595 | } |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3db651fc8ec5..d6ea91abaebc 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o | |||
38 | 38 | ||
39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | 39 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o |
40 | obj-y += process.o | 40 | obj-y += process.o |
41 | obj-y += i387.o | 41 | obj-y += i387.o xsave.o |
42 | obj-y += ptrace.o | 42 | obj-y += ptrace.o |
43 | obj-y += ds.o | 43 | obj-y += ds.o |
44 | obj-$(CONFIG_X86_32) += tls.o | 44 | obj-$(CONFIG_X86_32) += tls.o |
@@ -104,6 +104,8 @@ obj-$(CONFIG_OLPC) += olpc.o | |||
104 | ifeq ($(CONFIG_X86_64),y) | 104 | ifeq ($(CONFIG_X86_64),y) |
105 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o | 105 | obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o |
106 | obj-y += bios_uv.o | 106 | obj-y += bios_uv.o |
107 | obj-y += genx2apic_cluster.o | ||
108 | obj-y += genx2apic_phys.o | ||
107 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o | 109 | obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o |
108 | obj-$(CONFIG_AUDIT) += audit_64.o | 110 | obj-$(CONFIG_AUDIT) += audit_64.o |
109 | 111 | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index bfd10fd211cd..267e684f33a7 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled); | |||
58 | #ifdef CONFIG_X86_64 | 58 | #ifdef CONFIG_X86_64 |
59 | 59 | ||
60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
61 | #include <asm/genapic.h> | ||
62 | 61 | ||
63 | #else /* X86 */ | 62 | #else /* X86 */ |
64 | 63 | ||
@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | |||
97 | #warning ACPI uses CMPXCHG, i486 and later hardware | 96 | #warning ACPI uses CMPXCHG, i486 and later hardware |
98 | #endif | 97 | #endif |
99 | 98 | ||
100 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | ||
101 | |||
102 | /* -------------------------------------------------------------------------- | 99 | /* -------------------------------------------------------------------------- |
103 | Boot-time Configuration | 100 | Boot-time Configuration |
104 | -------------------------------------------------------------------------- */ | 101 | -------------------------------------------------------------------------- */ |
@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size) | |||
160 | struct acpi_mcfg_allocation *pci_mmcfg_config; | 157 | struct acpi_mcfg_allocation *pci_mmcfg_config; |
161 | int pci_mmcfg_config_num; | 158 | int pci_mmcfg_config_num; |
162 | 159 | ||
160 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | ||
161 | |||
163 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) | 162 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) |
164 | { | 163 | { |
165 | if (!strcmp(mcfg->header.oem_id, "SGI")) | 164 | if (!strcmp(mcfg->header.oem_id, "SGI")) |
@@ -775,7 +774,7 @@ static void __init acpi_register_lapic_address(unsigned long address) | |||
775 | 774 | ||
776 | set_fixmap_nocache(FIX_APIC_BASE, address); | 775 | set_fixmap_nocache(FIX_APIC_BASE, address); |
777 | if (boot_cpu_physical_apicid == -1U) { | 776 | if (boot_cpu_physical_apicid == -1U) { |
778 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 777 | boot_cpu_physical_apicid = read_apic_id(); |
779 | #ifdef CONFIG_X86_32 | 778 | #ifdef CONFIG_X86_32 |
780 | apic_version[boot_cpu_physical_apicid] = | 779 | apic_version[boot_cpu_physical_apicid] = |
781 | GET_APIC_VERSION(apic_read(APIC_LVR)); | 780 | GET_APIC_VERSION(apic_read(APIC_LVR)); |
@@ -1351,7 +1350,9 @@ static void __init acpi_process_madt(void) | |||
1351 | acpi_ioapic = 1; | 1350 | acpi_ioapic = 1; |
1352 | 1351 | ||
1353 | smp_found_config = 1; | 1352 | smp_found_config = 1; |
1353 | #ifdef CONFIG_X86_32 | ||
1354 | setup_apic_routing(); | 1354 | setup_apic_routing(); |
1355 | #endif | ||
1355 | } | 1356 | } |
1356 | } | 1357 | } |
1357 | if (error == -EINVAL) { | 1358 | if (error == -EINVAL) { |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index f88bd0d982b0..44cae65e32ef 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -145,13 +145,18 @@ static int modern_apic(void) | |||
145 | return lapic_get_version() >= 0x14; | 145 | return lapic_get_version() >= 0x14; |
146 | } | 146 | } |
147 | 147 | ||
148 | void apic_wait_icr_idle(void) | 148 | /* |
149 | * Paravirt kernels also might be using these below ops. So we still | ||
150 | * use generic apic_read()/apic_write(), which might be pointing to different | ||
151 | * ops in PARAVIRT case. | ||
152 | */ | ||
153 | void xapic_wait_icr_idle(void) | ||
149 | { | 154 | { |
150 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | 155 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) |
151 | cpu_relax(); | 156 | cpu_relax(); |
152 | } | 157 | } |
153 | 158 | ||
154 | u32 safe_apic_wait_icr_idle(void) | 159 | u32 safe_xapic_wait_icr_idle(void) |
155 | { | 160 | { |
156 | u32 send_status; | 161 | u32 send_status; |
157 | int timeout; | 162 | int timeout; |
@@ -167,6 +172,34 @@ u32 safe_apic_wait_icr_idle(void) | |||
167 | return send_status; | 172 | return send_status; |
168 | } | 173 | } |
169 | 174 | ||
175 | void xapic_icr_write(u32 low, u32 id) | ||
176 | { | ||
177 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); | ||
178 | apic_write(APIC_ICR, low); | ||
179 | } | ||
180 | |||
181 | u64 xapic_icr_read(void) | ||
182 | { | ||
183 | u32 icr1, icr2; | ||
184 | |||
185 | icr2 = apic_read(APIC_ICR2); | ||
186 | icr1 = apic_read(APIC_ICR); | ||
187 | |||
188 | return icr1 | ((u64)icr2 << 32); | ||
189 | } | ||
190 | |||
191 | static struct apic_ops xapic_ops = { | ||
192 | .read = native_apic_mem_read, | ||
193 | .write = native_apic_mem_write, | ||
194 | .icr_read = xapic_icr_read, | ||
195 | .icr_write = xapic_icr_write, | ||
196 | .wait_icr_idle = xapic_wait_icr_idle, | ||
197 | .safe_wait_icr_idle = safe_xapic_wait_icr_idle, | ||
198 | }; | ||
199 | |||
200 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | ||
201 | EXPORT_SYMBOL_GPL(apic_ops); | ||
202 | |||
170 | /** | 203 | /** |
171 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | 204 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 |
172 | */ | 205 | */ |
@@ -1205,7 +1238,7 @@ void __init init_apic_mappings(void) | |||
1205 | * default configuration (or the MP table is broken). | 1238 | * default configuration (or the MP table is broken). |
1206 | */ | 1239 | */ |
1207 | if (boot_cpu_physical_apicid == -1U) | 1240 | if (boot_cpu_physical_apicid == -1U) |
1208 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1241 | boot_cpu_physical_apicid = read_apic_id(); |
1209 | 1242 | ||
1210 | } | 1243 | } |
1211 | 1244 | ||
@@ -1242,7 +1275,7 @@ int __init APIC_init_uniprocessor(void) | |||
1242 | * might be zero if read from MP tables. Get it from LAPIC. | 1275 | * might be zero if read from MP tables. Get it from LAPIC. |
1243 | */ | 1276 | */ |
1244 | #ifdef CONFIG_CRASH_DUMP | 1277 | #ifdef CONFIG_CRASH_DUMP |
1245 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1278 | boot_cpu_physical_apicid = read_apic_id(); |
1246 | #endif | 1279 | #endif |
1247 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); | 1280 | physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); |
1248 | 1281 | ||
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index 446c062e831c..b6256587f99e 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/clockchips.h> | 27 | #include <linux/clockchips.h> |
28 | #include <linux/acpi_pmtmr.h> | 28 | #include <linux/acpi_pmtmr.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/dmar.h> | ||
30 | 31 | ||
31 | #include <asm/atomic.h> | 32 | #include <asm/atomic.h> |
32 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/proto.h> | 40 | #include <asm/proto.h> |
40 | #include <asm/timex.h> | 41 | #include <asm/timex.h> |
41 | #include <asm/apic.h> | 42 | #include <asm/apic.h> |
43 | #include <asm/i8259.h> | ||
42 | 44 | ||
43 | #include <mach_ipi.h> | 45 | #include <mach_ipi.h> |
44 | #include <mach_apic.h> | 46 | #include <mach_apic.h> |
@@ -46,6 +48,11 @@ | |||
46 | static int disable_apic_timer __cpuinitdata; | 48 | static int disable_apic_timer __cpuinitdata; |
47 | static int apic_calibrate_pmtmr __initdata; | 49 | static int apic_calibrate_pmtmr __initdata; |
48 | int disable_apic; | 50 | int disable_apic; |
51 | int disable_x2apic; | ||
52 | int x2apic; | ||
53 | |||
54 | /* x2apic enabled before OS handover */ | ||
55 | int x2apic_preenabled; | ||
49 | 56 | ||
50 | /* Local APIC timer works in C2 */ | 57 | /* Local APIC timer works in C2 */ |
51 | int local_apic_timer_c2_ok; | 58 | int local_apic_timer_c2_ok; |
@@ -118,13 +125,13 @@ static int modern_apic(void) | |||
118 | return lapic_get_version() >= 0x14; | 125 | return lapic_get_version() >= 0x14; |
119 | } | 126 | } |
120 | 127 | ||
121 | void apic_wait_icr_idle(void) | 128 | void xapic_wait_icr_idle(void) |
122 | { | 129 | { |
123 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) | 130 | while (apic_read(APIC_ICR) & APIC_ICR_BUSY) |
124 | cpu_relax(); | 131 | cpu_relax(); |
125 | } | 132 | } |
126 | 133 | ||
127 | u32 safe_apic_wait_icr_idle(void) | 134 | u32 safe_xapic_wait_icr_idle(void) |
128 | { | 135 | { |
129 | u32 send_status; | 136 | u32 send_status; |
130 | int timeout; | 137 | int timeout; |
@@ -140,6 +147,69 @@ u32 safe_apic_wait_icr_idle(void) | |||
140 | return send_status; | 147 | return send_status; |
141 | } | 148 | } |
142 | 149 | ||
150 | void xapic_icr_write(u32 low, u32 id) | ||
151 | { | ||
152 | apic_write(APIC_ICR2, id << 24); | ||
153 | apic_write(APIC_ICR, low); | ||
154 | } | ||
155 | |||
156 | u64 xapic_icr_read(void) | ||
157 | { | ||
158 | u32 icr1, icr2; | ||
159 | |||
160 | icr2 = apic_read(APIC_ICR2); | ||
161 | icr1 = apic_read(APIC_ICR); | ||
162 | |||
163 | return (icr1 | ((u64)icr2 << 32)); | ||
164 | } | ||
165 | |||
166 | static struct apic_ops xapic_ops = { | ||
167 | .read = native_apic_mem_read, | ||
168 | .write = native_apic_mem_write, | ||
169 | .icr_read = xapic_icr_read, | ||
170 | .icr_write = xapic_icr_write, | ||
171 | .wait_icr_idle = xapic_wait_icr_idle, | ||
172 | .safe_wait_icr_idle = safe_xapic_wait_icr_idle, | ||
173 | }; | ||
174 | |||
175 | struct apic_ops __read_mostly *apic_ops = &xapic_ops; | ||
176 | |||
177 | EXPORT_SYMBOL_GPL(apic_ops); | ||
178 | |||
179 | static void x2apic_wait_icr_idle(void) | ||
180 | { | ||
181 | /* no need to wait for icr idle in x2apic */ | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | static u32 safe_x2apic_wait_icr_idle(void) | ||
186 | { | ||
187 | /* no need to wait for icr idle in x2apic */ | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | void x2apic_icr_write(u32 low, u32 id) | ||
192 | { | ||
193 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); | ||
194 | } | ||
195 | |||
196 | u64 x2apic_icr_read(void) | ||
197 | { | ||
198 | unsigned long val; | ||
199 | |||
200 | rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val); | ||
201 | return val; | ||
202 | } | ||
203 | |||
204 | static struct apic_ops x2apic_ops = { | ||
205 | .read = native_apic_msr_read, | ||
206 | .write = native_apic_msr_write, | ||
207 | .icr_read = x2apic_icr_read, | ||
208 | .icr_write = x2apic_icr_write, | ||
209 | .wait_icr_idle = x2apic_wait_icr_idle, | ||
210 | .safe_wait_icr_idle = safe_x2apic_wait_icr_idle, | ||
211 | }; | ||
212 | |||
143 | /** | 213 | /** |
144 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 | 214 | * enable_NMI_through_LVT0 - enable NMI through local vector table 0 |
145 | */ | 215 | */ |
@@ -629,10 +699,10 @@ int __init verify_local_APIC(void) | |||
629 | /* | 699 | /* |
630 | * The ID register is read/write in a real APIC. | 700 | * The ID register is read/write in a real APIC. |
631 | */ | 701 | */ |
632 | reg0 = read_apic_id(); | 702 | reg0 = apic_read(APIC_ID); |
633 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); | 703 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); |
634 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); | 704 | apic_write(APIC_ID, reg0 ^ APIC_ID_MASK); |
635 | reg1 = read_apic_id(); | 705 | reg1 = apic_read(APIC_ID); |
636 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); | 706 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1); |
637 | apic_write(APIC_ID, reg0); | 707 | apic_write(APIC_ID, reg0); |
638 | if (reg1 != (reg0 ^ APIC_ID_MASK)) | 708 | if (reg1 != (reg0 ^ APIC_ID_MASK)) |
@@ -833,6 +903,125 @@ void __cpuinit end_local_APIC_setup(void) | |||
833 | apic_pm_activate(); | 903 | apic_pm_activate(); |
834 | } | 904 | } |
835 | 905 | ||
906 | void check_x2apic(void) | ||
907 | { | ||
908 | int msr, msr2; | ||
909 | |||
910 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
911 | |||
912 | if (msr & X2APIC_ENABLE) { | ||
913 | printk("x2apic enabled by BIOS, switching to x2apic ops\n"); | ||
914 | x2apic_preenabled = x2apic = 1; | ||
915 | apic_ops = &x2apic_ops; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | void enable_x2apic(void) | ||
920 | { | ||
921 | int msr, msr2; | ||
922 | |||
923 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | ||
924 | if (!(msr & X2APIC_ENABLE)) { | ||
925 | printk("Enabling x2apic\n"); | ||
926 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | void enable_IR_x2apic(void) | ||
931 | { | ||
932 | #ifdef CONFIG_INTR_REMAP | ||
933 | int ret; | ||
934 | unsigned long flags; | ||
935 | |||
936 | if (!cpu_has_x2apic) | ||
937 | return; | ||
938 | |||
939 | if (!x2apic_preenabled && disable_x2apic) { | ||
940 | printk(KERN_INFO | ||
941 | "Skipped enabling x2apic and Interrupt-remapping " | ||
942 | "because of nox2apic\n"); | ||
943 | return; | ||
944 | } | ||
945 | |||
946 | if (x2apic_preenabled && disable_x2apic) | ||
947 | panic("Bios already enabled x2apic, can't enforce nox2apic"); | ||
948 | |||
949 | if (!x2apic_preenabled && skip_ioapic_setup) { | ||
950 | printk(KERN_INFO | ||
951 | "Skipped enabling x2apic and Interrupt-remapping " | ||
952 | "because of skipping io-apic setup\n"); | ||
953 | return; | ||
954 | } | ||
955 | |||
956 | ret = dmar_table_init(); | ||
957 | if (ret) { | ||
958 | printk(KERN_INFO | ||
959 | "dmar_table_init() failed with %d:\n", ret); | ||
960 | |||
961 | if (x2apic_preenabled) | ||
962 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
963 | else | ||
964 | printk(KERN_INFO | ||
965 | "Not enabling x2apic,Intr-remapping\n"); | ||
966 | return; | ||
967 | } | ||
968 | |||
969 | local_irq_save(flags); | ||
970 | mask_8259A(); | ||
971 | save_mask_IO_APIC_setup(); | ||
972 | |||
973 | ret = enable_intr_remapping(1); | ||
974 | |||
975 | if (ret && x2apic_preenabled) { | ||
976 | local_irq_restore(flags); | ||
977 | panic("x2apic enabled by bios. But IR enabling failed"); | ||
978 | } | ||
979 | |||
980 | if (ret) | ||
981 | goto end; | ||
982 | |||
983 | if (!x2apic) { | ||
984 | x2apic = 1; | ||
985 | apic_ops = &x2apic_ops; | ||
986 | enable_x2apic(); | ||
987 | } | ||
988 | end: | ||
989 | if (ret) | ||
990 | /* | ||
991 | * IR enabling failed | ||
992 | */ | ||
993 | restore_IO_APIC_setup(); | ||
994 | else | ||
995 | reinit_intr_remapped_IO_APIC(x2apic_preenabled); | ||
996 | |||
997 | unmask_8259A(); | ||
998 | local_irq_restore(flags); | ||
999 | |||
1000 | if (!ret) { | ||
1001 | if (!x2apic_preenabled) | ||
1002 | printk(KERN_INFO | ||
1003 | "Enabled x2apic and interrupt-remapping\n"); | ||
1004 | else | ||
1005 | printk(KERN_INFO | ||
1006 | "Enabled Interrupt-remapping\n"); | ||
1007 | } else | ||
1008 | printk(KERN_ERR | ||
1009 | "Failed to enable Interrupt-remapping and x2apic\n"); | ||
1010 | #else | ||
1011 | if (!cpu_has_x2apic) | ||
1012 | return; | ||
1013 | |||
1014 | if (x2apic_preenabled) | ||
1015 | panic("x2apic enabled prior OS handover," | ||
1016 | " enable CONFIG_INTR_REMAP"); | ||
1017 | |||
1018 | printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping " | ||
1019 | " and x2apic\n"); | ||
1020 | #endif | ||
1021 | |||
1022 | return; | ||
1023 | } | ||
1024 | |||
836 | /* | 1025 | /* |
837 | * Detect and enable local APICs on non-SMP boards. | 1026 | * Detect and enable local APICs on non-SMP boards. |
838 | * Original code written by Keir Fraser. | 1027 | * Original code written by Keir Fraser. |
@@ -872,7 +1061,7 @@ void __init early_init_lapic_mapping(void) | |||
872 | * Fetch the APIC ID of the BSP in case we have a | 1061 | * Fetch the APIC ID of the BSP in case we have a |
873 | * default configuration (or the MP table is broken). | 1062 | * default configuration (or the MP table is broken). |
874 | */ | 1063 | */ |
875 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1064 | boot_cpu_physical_apicid = read_apic_id(); |
876 | } | 1065 | } |
877 | 1066 | ||
878 | /** | 1067 | /** |
@@ -880,6 +1069,11 @@ void __init early_init_lapic_mapping(void) | |||
880 | */ | 1069 | */ |
881 | void __init init_apic_mappings(void) | 1070 | void __init init_apic_mappings(void) |
882 | { | 1071 | { |
1072 | if (x2apic) { | ||
1073 | boot_cpu_physical_apicid = read_apic_id(); | ||
1074 | return; | ||
1075 | } | ||
1076 | |||
883 | /* | 1077 | /* |
884 | * If no local APIC can be found then set up a fake all | 1078 | * If no local APIC can be found then set up a fake all |
885 | * zeroes page to simulate the local APIC and another | 1079 | * zeroes page to simulate the local APIC and another |
@@ -899,7 +1093,7 @@ void __init init_apic_mappings(void) | |||
899 | * Fetch the APIC ID of the BSP in case we have a | 1093 | * Fetch the APIC ID of the BSP in case we have a |
900 | * default configuration (or the MP table is broken). | 1094 | * default configuration (or the MP table is broken). |
901 | */ | 1095 | */ |
902 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 1096 | boot_cpu_physical_apicid = read_apic_id(); |
903 | } | 1097 | } |
904 | 1098 | ||
905 | /* | 1099 | /* |
@@ -918,6 +1112,9 @@ int __init APIC_init_uniprocessor(void) | |||
918 | return -1; | 1112 | return -1; |
919 | } | 1113 | } |
920 | 1114 | ||
1115 | enable_IR_x2apic(); | ||
1116 | setup_apic_routing(); | ||
1117 | |||
921 | verify_local_APIC(); | 1118 | verify_local_APIC(); |
922 | 1119 | ||
923 | connect_bsp_APIC(); | 1120 | connect_bsp_APIC(); |
@@ -1093,6 +1290,11 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1093 | cpu_set(cpu, cpu_present_map); | 1290 | cpu_set(cpu, cpu_present_map); |
1094 | } | 1291 | } |
1095 | 1292 | ||
1293 | int hard_smp_processor_id(void) | ||
1294 | { | ||
1295 | return read_apic_id(); | ||
1296 | } | ||
1297 | |||
1096 | /* | 1298 | /* |
1097 | * Power management | 1299 | * Power management |
1098 | */ | 1300 | */ |
@@ -1129,7 +1331,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1129 | 1331 | ||
1130 | maxlvt = lapic_get_maxlvt(); | 1332 | maxlvt = lapic_get_maxlvt(); |
1131 | 1333 | ||
1132 | apic_pm_state.apic_id = read_apic_id(); | 1334 | apic_pm_state.apic_id = apic_read(APIC_ID); |
1133 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | 1335 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); |
1134 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | 1336 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); |
1135 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | 1337 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); |
@@ -1164,10 +1366,14 @@ static int lapic_resume(struct sys_device *dev) | |||
1164 | maxlvt = lapic_get_maxlvt(); | 1366 | maxlvt = lapic_get_maxlvt(); |
1165 | 1367 | ||
1166 | local_irq_save(flags); | 1368 | local_irq_save(flags); |
1167 | rdmsr(MSR_IA32_APICBASE, l, h); | 1369 | if (!x2apic) { |
1168 | l &= ~MSR_IA32_APICBASE_BASE; | 1370 | rdmsr(MSR_IA32_APICBASE, l, h); |
1169 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | 1371 | l &= ~MSR_IA32_APICBASE_BASE; |
1170 | wrmsr(MSR_IA32_APICBASE, l, h); | 1372 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; |
1373 | wrmsr(MSR_IA32_APICBASE, l, h); | ||
1374 | } else | ||
1375 | enable_x2apic(); | ||
1376 | |||
1171 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | 1377 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); |
1172 | apic_write(APIC_ID, apic_pm_state.apic_id); | 1378 | apic_write(APIC_ID, apic_pm_state.apic_id); |
1173 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | 1379 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); |
@@ -1307,6 +1513,15 @@ __cpuinit int apic_is_clustered_box(void) | |||
1307 | return (clusters > 2); | 1513 | return (clusters > 2); |
1308 | } | 1514 | } |
1309 | 1515 | ||
1516 | static __init int setup_nox2apic(char *str) | ||
1517 | { | ||
1518 | disable_x2apic = 1; | ||
1519 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC); | ||
1520 | return 0; | ||
1521 | } | ||
1522 | early_param("nox2apic", setup_nox2apic); | ||
1523 | |||
1524 | |||
1310 | /* | 1525 | /* |
1311 | * APIC command line parameters | 1526 | * APIC command line parameters |
1312 | */ | 1527 | */ |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 9ee24e6bc4b0..b93d069aea72 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -228,7 +228,6 @@ | |||
228 | #include <linux/suspend.h> | 228 | #include <linux/suspend.h> |
229 | #include <linux/kthread.h> | 229 | #include <linux/kthread.h> |
230 | #include <linux/jiffies.h> | 230 | #include <linux/jiffies.h> |
231 | #include <linux/smp_lock.h> | ||
232 | 231 | ||
233 | #include <asm/system.h> | 232 | #include <asm/system.h> |
234 | #include <asm/uaccess.h> | 233 | #include <asm/uaccess.h> |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index aa89387006fe..505543a75a56 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | #define __NO_STUBS 1 | 23 | #define __NO_STUBS 1 |
24 | #undef __SYSCALL | 24 | #undef __SYSCALL |
25 | #undef _ASM_X86_64_UNISTD_H_ | 25 | #undef ASM_X86__UNISTD_64_H |
26 | #define __SYSCALL(nr, sym) [nr] = 1, | 26 | #define __SYSCALL(nr, sym) [nr] = 1, |
27 | static char syscalls[] = { | 27 | static char syscalls[] = { |
28 | #include <asm/unistd.h> | 28 | #include <asm/unistd.h> |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index c639bd55391c..fdd585f9c53d 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -25,11 +25,11 @@ x86_bios_strerror(long status) | |||
25 | { | 25 | { |
26 | const char *str; | 26 | const char *str; |
27 | switch (status) { | 27 | switch (status) { |
28 | case 0: str = "Call completed without error"; break; | 28 | case 0: str = "Call completed without error"; break; |
29 | case -1: str = "Not implemented"; break; | 29 | case -1: str = "Not implemented"; break; |
30 | case -2: str = "Invalid argument"; break; | 30 | case -2: str = "Invalid argument"; break; |
31 | case -3: str = "Call completed with error"; break; | 31 | case -3: str = "Call completed with error"; break; |
32 | default: str = "Unknown BIOS status code"; break; | 32 | default: str = "Unknown BIOS status code"; break; |
33 | } | 33 | } |
34 | return str; | 34 | return str; |
35 | } | 35 | } |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ee76eaad3001..3ede19a4e0b2 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -3,22 +3,32 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o | 5 | obj-y := intel_cacheinfo.o addon_cpuid_features.o |
6 | obj-y += proc.o feature_names.o | 6 | obj-y += proc.o capflags.o powerflags.o |
7 | 7 | ||
8 | obj-$(CONFIG_X86_32) += common.o bugs.o | 8 | obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o |
9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o | 9 | obj-$(CONFIG_X86_64) += common_64.o bugs_64.o |
10 | obj-$(CONFIG_X86_32) += amd.o | 10 | |
11 | obj-$(CONFIG_X86_64) += amd_64.o | 11 | obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o |
12 | obj-$(CONFIG_X86_32) += cyrix.o | 12 | obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o |
13 | obj-$(CONFIG_X86_32) += centaur.o | 13 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
14 | obj-$(CONFIG_X86_64) += centaur_64.o | 14 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o |
15 | obj-$(CONFIG_X86_32) += transmeta.o | 15 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o |
16 | obj-$(CONFIG_X86_32) += intel.o | 16 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
17 | obj-$(CONFIG_X86_64) += intel_64.o | 17 | obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o |
18 | obj-$(CONFIG_X86_32) += umc.o | 18 | obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o |
19 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_X86_MCE) += mcheck/ | 21 | obj-$(CONFIG_X86_MCE) += mcheck/ |
21 | obj-$(CONFIG_MTRR) += mtrr/ | 22 | obj-$(CONFIG_MTRR) += mtrr/ |
22 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | 23 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ |
23 | 24 | ||
24 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 25 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
26 | |||
27 | quiet_cmd_mkcapflags = MKCAP $@ | ||
28 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | ||
29 | |||
30 | cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h | ||
31 | |||
32 | targets += capflags.c | ||
33 | $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE | ||
34 | $(call if_changed,mkcapflags) | ||
diff --git a/arch/x86/kernel/cpu/cmpxchg.c b/arch/x86/kernel/cpu/cmpxchg.c new file mode 100644 index 000000000000..2056ccf572cc --- /dev/null +++ b/arch/x86/kernel/cpu/cmpxchg.c | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * cmpxchg*() fallbacks for CPU not supporting these instructions | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | #ifndef CONFIG_X86_CMPXCHG | ||
10 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
11 | { | ||
12 | u8 prev; | ||
13 | unsigned long flags; | ||
14 | |||
15 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
16 | local_irq_save(flags); | ||
17 | prev = *(u8 *)ptr; | ||
18 | if (prev == old) | ||
19 | *(u8 *)ptr = new; | ||
20 | local_irq_restore(flags); | ||
21 | return prev; | ||
22 | } | ||
23 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
24 | |||
25 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
26 | { | ||
27 | u16 prev; | ||
28 | unsigned long flags; | ||
29 | |||
30 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
31 | local_irq_save(flags); | ||
32 | prev = *(u16 *)ptr; | ||
33 | if (prev == old) | ||
34 | *(u16 *)ptr = new; | ||
35 | local_irq_restore(flags); | ||
36 | return prev; | ||
37 | } | ||
38 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
39 | |||
40 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
41 | { | ||
42 | u32 prev; | ||
43 | unsigned long flags; | ||
44 | |||
45 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
46 | local_irq_save(flags); | ||
47 | prev = *(u32 *)ptr; | ||
48 | if (prev == old) | ||
49 | *(u32 *)ptr = new; | ||
50 | local_irq_restore(flags); | ||
51 | return prev; | ||
52 | } | ||
53 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
54 | #endif | ||
55 | |||
56 | #ifndef CONFIG_X86_CMPXCHG64 | ||
57 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
58 | { | ||
59 | u64 prev; | ||
60 | unsigned long flags; | ||
61 | |||
62 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
63 | local_irq_save(flags); | ||
64 | prev = *(u64 *)ptr; | ||
65 | if (prev == old) | ||
66 | *(u64 *)ptr = new; | ||
67 | local_irq_restore(flags); | ||
68 | return prev; | ||
69 | } | ||
70 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
71 | #endif | ||
72 | |||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4e456bd955bb..8260d930eabc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -355,6 +355,35 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
355 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 355 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
356 | } | 356 | } |
357 | 357 | ||
358 | /* | ||
359 | * The NOPL instruction is supposed to exist on all CPUs with | ||
360 | * family >= 6, unfortunately, that's not true in practice because | ||
361 | * of early VIA chips and (more importantly) broken virtualizers that | ||
362 | * are not easy to detect. Hence, probe for it based on first | ||
363 | * principles. | ||
364 | */ | ||
365 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
366 | { | ||
367 | const u32 nopl_signature = 0x888c53b1; /* Random number */ | ||
368 | u32 has_nopl = nopl_signature; | ||
369 | |||
370 | clear_cpu_cap(c, X86_FEATURE_NOPL); | ||
371 | if (c->x86 >= 6) { | ||
372 | asm volatile("\n" | ||
373 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ | ||
374 | "2:\n" | ||
375 | " .section .fixup,\"ax\"\n" | ||
376 | "3: xor %0,%0\n" | ||
377 | " jmp 2b\n" | ||
378 | " .previous\n" | ||
379 | _ASM_EXTABLE(1b,3b) | ||
380 | : "+a" (has_nopl)); | ||
381 | |||
382 | if (has_nopl == nopl_signature) | ||
383 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
384 | } | ||
385 | } | ||
386 | |||
358 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 387 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
359 | { | 388 | { |
360 | u32 tfms, xlvl; | 389 | u32 tfms, xlvl; |
@@ -723,9 +752,20 @@ void __cpuinit cpu_init(void) | |||
723 | /* | 752 | /* |
724 | * Force FPU initialization: | 753 | * Force FPU initialization: |
725 | */ | 754 | */ |
726 | current_thread_info()->status = 0; | 755 | if (cpu_has_xsave) |
756 | current_thread_info()->status = TS_XSAVE; | ||
757 | else | ||
758 | current_thread_info()->status = 0; | ||
727 | clear_used_math(); | 759 | clear_used_math(); |
728 | mxcsr_feature_mask_init(); | 760 | mxcsr_feature_mask_init(); |
761 | |||
762 | /* | ||
763 | * Boot processor to setup the FP and extended state context info. | ||
764 | */ | ||
765 | if (!smp_processor_id()) | ||
766 | init_thread_xstate(); | ||
767 | |||
768 | xsave_init(); | ||
729 | } | 769 | } |
730 | 770 | ||
731 | #ifdef CONFIG_HOTPLUG_CPU | 771 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index a11f5d4477cd..35d11efdf1fe 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -636,6 +636,8 @@ void __cpuinit cpu_init(void) | |||
636 | barrier(); | 636 | barrier(); |
637 | 637 | ||
638 | check_efer(); | 638 | check_efer(); |
639 | if (cpu != 0 && x2apic) | ||
640 | enable_x2apic(); | ||
639 | 641 | ||
640 | /* | 642 | /* |
641 | * set up and load the per-CPU TSS | 643 | * set up and load the per-CPU TSS |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 898a5a2002ed..13f8fa16b815 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 121 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
122 | 122 | ||
123 | /* Load/Store Serialize to mem access disable (=reorder it) */ | 123 | /* Load/Store Serialize to mem access disable (=reorder it) */ |
124 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80); | 124 | setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); |
125 | /* set load/store serialize from 1GB to 4GB */ | 125 | /* set load/store serialize from 1GB to 4GB */ |
126 | ccr3 |= 0xe0; | 126 | ccr3 |= 0xe0; |
127 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
@@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void) | |||
132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
133 | 133 | ||
134 | /* CCR2 bit 2: unlock NW bit */ | 134 | /* CCR2 bit 2: unlock NW bit */ |
135 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); | 135 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); |
136 | /* set 'Not Write-through' */ | 136 | /* set 'Not Write-through' */ |
137 | write_cr0(read_cr0() | X86_CR0_NW); | 137 | write_cr0(read_cr0() | X86_CR0_NW); |
138 | /* CCR2 bit 2: lock NW bit and set WT1 */ | 138 | /* CCR2 bit 2: lock NW bit and set WT1 */ |
139 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 139 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | 142 | /* |
@@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void) | |||
150 | local_irq_save(flags); | 150 | local_irq_save(flags); |
151 | 151 | ||
152 | /* Suspend on halt power saving and enable #SUSP pin */ | 152 | /* Suspend on halt power saving and enable #SUSP pin */ |
153 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); | 153 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); |
154 | 154 | ||
155 | ccr3 = getCx86(CX86_CCR3); | 155 | ccr3 = getCx86(CX86_CCR3); |
156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 156 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
157 | 157 | ||
158 | 158 | ||
159 | /* FPU fast, DTE cache, Mem bypass */ | 159 | /* FPU fast, DTE cache, Mem bypass */ |
160 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38); | 160 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); |
161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 161 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
162 | 162 | ||
163 | set_cx86_memwb(); | 163 | set_cx86_memwb(); |
@@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
291 | /* GXm supports extended cpuid levels 'ala' AMD */ | 291 | /* GXm supports extended cpuid levels 'ala' AMD */ |
292 | if (c->cpuid_level == 2) { | 292 | if (c->cpuid_level == 2) { |
293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ | 293 | /* Enable cxMMX extensions (GX1 Datasheet 54) */ |
294 | setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1); | 294 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); |
295 | 295 | ||
296 | /* | 296 | /* |
297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 | 297 | * GXm : 0x30 ... 0x5f GXm datasheet 51 |
@@ -314,7 +314,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
314 | if (dir1 > 7) { | 314 | if (dir1 > 7) { |
315 | dir0_msn++; /* M II */ | 315 | dir0_msn++; /* M II */ |
316 | /* Enable MMX extensions (App note 108) */ | 316 | /* Enable MMX extensions (App note 108) */ |
317 | setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1); | 317 | setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); |
318 | } else { | 318 | } else { |
319 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ | 319 | c->coma_bug = 1; /* 6x86MX, it has the bug. */ |
320 | } | 320 | } |
@@ -429,7 +429,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
429 | local_irq_save(flags); | 429 | local_irq_save(flags); |
430 | ccr3 = getCx86(CX86_CCR3); | 430 | ccr3 = getCx86(CX86_CCR3); |
431 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 431 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
432 | setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */ | 432 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ |
433 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 433 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
434 | local_irq_restore(flags); | 434 | local_irq_restore(flags); |
435 | } | 435 | } |
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c deleted file mode 100644 index c9017799497c..000000000000 --- a/arch/x86/kernel/cpu/feature_names.c +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 capability flags. | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | /* | ||
10 | * These flag bits must match the definitions in <asm/cpufeature.h>. | ||
11 | * NULL means this bit is undefined or reserved; either way it doesn't | ||
12 | * have meaning as far as Linux is concerned. Note that it's important | ||
13 | * to realize there is a difference between this table and CPUID -- if | ||
14 | * applications want to get the raw CPUID data, they should access | ||
15 | * /dev/cpu/<cpu_nr>/cpuid instead. | ||
16 | */ | ||
17 | const char * const x86_cap_flags[NCAPINTS*32] = { | ||
18 | /* Intel-defined */ | ||
19 | "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", | ||
20 | "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", | ||
21 | "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx", | ||
22 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | ||
23 | |||
24 | /* AMD-defined */ | ||
25 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
26 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | ||
27 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, | ||
28 | NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", | ||
29 | "3dnowext", "3dnow", | ||
30 | |||
31 | /* Transmeta-defined */ | ||
32 | "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, | ||
33 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
34 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
35 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
36 | |||
37 | /* Other (Linux-defined) */ | ||
38 | "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", | ||
39 | NULL, NULL, NULL, NULL, | ||
40 | "constant_tsc", "up", NULL, "arch_perfmon", | ||
41 | "pebs", "bts", NULL, NULL, | ||
42 | "rep_good", NULL, NULL, NULL, | ||
43 | "nopl", NULL, NULL, NULL, | ||
44 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
45 | |||
46 | /* Intel-defined (#2) */ | ||
47 | "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", | ||
48 | "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, | ||
49 | NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt", | ||
50 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
51 | |||
52 | /* VIA/Cyrix/Centaur-defined */ | ||
53 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
54 | "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL, | ||
55 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
56 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
57 | |||
58 | /* AMD-defined (#2) */ | ||
59 | "lahf_lm", "cmp_legacy", "svm", "extapic", | ||
60 | "cr8_legacy", "abm", "sse4a", "misalignsse", | ||
61 | "3dnowprefetch", "osvw", "ibs", "sse5", | ||
62 | "skinit", "wdt", NULL, NULL, | ||
63 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
64 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
65 | |||
66 | /* Auxiliary (Linux-defined) */ | ||
67 | "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
68 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
69 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
70 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
71 | }; | ||
72 | |||
73 | const char *const x86_power_flags[32] = { | ||
74 | "ts", /* temperature sensor */ | ||
75 | "fid", /* frequency id control */ | ||
76 | "vid", /* voltage id control */ | ||
77 | "ttp", /* thermal trip */ | ||
78 | "tm", | ||
79 | "stc", | ||
80 | "100mhzsteps", | ||
81 | "hwpstate", | ||
82 | "", /* tsc invariant mapped to constant_tsc */ | ||
83 | /* nothing */ | ||
84 | }; | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b75f2569b8f8..77618c717d76 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -23,13 +23,6 @@ | |||
23 | #include <mach_apic.h> | 23 | #include <mach_apic.h> |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
27 | /* | ||
28 | * Alignment at which movsl is preferred for bulk memory copies. | ||
29 | */ | ||
30 | struct movsl_mask movsl_mask __read_mostly; | ||
31 | #endif | ||
32 | |||
33 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 26 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
34 | { | 27 | { |
35 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | 28 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ |
@@ -314,69 +307,5 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = { | |||
314 | 307 | ||
315 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); | 308 | cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); |
316 | 309 | ||
317 | #ifndef CONFIG_X86_CMPXCHG | ||
318 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | ||
319 | { | ||
320 | u8 prev; | ||
321 | unsigned long flags; | ||
322 | |||
323 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
324 | local_irq_save(flags); | ||
325 | prev = *(u8 *)ptr; | ||
326 | if (prev == old) | ||
327 | *(u8 *)ptr = new; | ||
328 | local_irq_restore(flags); | ||
329 | return prev; | ||
330 | } | ||
331 | EXPORT_SYMBOL(cmpxchg_386_u8); | ||
332 | |||
333 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | ||
334 | { | ||
335 | u16 prev; | ||
336 | unsigned long flags; | ||
337 | |||
338 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
339 | local_irq_save(flags); | ||
340 | prev = *(u16 *)ptr; | ||
341 | if (prev == old) | ||
342 | *(u16 *)ptr = new; | ||
343 | local_irq_restore(flags); | ||
344 | return prev; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cmpxchg_386_u16); | ||
347 | |||
348 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | ||
349 | { | ||
350 | u32 prev; | ||
351 | unsigned long flags; | ||
352 | |||
353 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | ||
354 | local_irq_save(flags); | ||
355 | prev = *(u32 *)ptr; | ||
356 | if (prev == old) | ||
357 | *(u32 *)ptr = new; | ||
358 | local_irq_restore(flags); | ||
359 | return prev; | ||
360 | } | ||
361 | EXPORT_SYMBOL(cmpxchg_386_u32); | ||
362 | #endif | ||
363 | |||
364 | #ifndef CONFIG_X86_CMPXCHG64 | ||
365 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | ||
366 | { | ||
367 | u64 prev; | ||
368 | unsigned long flags; | ||
369 | |||
370 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | ||
371 | local_irq_save(flags); | ||
372 | prev = *(u64 *)ptr; | ||
373 | if (prev == old) | ||
374 | *(u64 *)ptr = new; | ||
375 | local_irq_restore(flags); | ||
376 | return prev; | ||
377 | } | ||
378 | EXPORT_SYMBOL(cmpxchg_486_u64); | ||
379 | #endif | ||
380 | |||
381 | /* arch_initcall(intel_cpu_init); */ | 310 | /* arch_initcall(intel_cpu_init); */ |
382 | 311 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6b0a10b002f1..3f46afbb1cf1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Routines to indentify caches on Intel CPU. | 2 | * Routines to indentify caches on Intel CPU. |
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/pci.h> | ||
16 | 17 | ||
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
@@ -130,9 +131,18 @@ struct _cpuid4_info { | |||
130 | union _cpuid4_leaf_ebx ebx; | 131 | union _cpuid4_leaf_ebx ebx; |
131 | union _cpuid4_leaf_ecx ecx; | 132 | union _cpuid4_leaf_ecx ecx; |
132 | unsigned long size; | 133 | unsigned long size; |
134 | unsigned long can_disable; | ||
133 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 135 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ |
134 | }; | 136 | }; |
135 | 137 | ||
138 | #ifdef CONFIG_PCI | ||
139 | static struct pci_device_id k8_nb_id[] = { | ||
140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | ||
141 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | ||
142 | {} | ||
143 | }; | ||
144 | #endif | ||
145 | |||
136 | unsigned short num_cache_leaves; | 146 | unsigned short num_cache_leaves; |
137 | 147 | ||
138 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 148 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = { | |||
182 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 192 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
183 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 193 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
184 | 194 | ||
185 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 195 | static void __cpuinit |
186 | union _cpuid4_leaf_ebx *ebx, | 196 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
187 | union _cpuid4_leaf_ecx *ecx) | 197 | union _cpuid4_leaf_ebx *ebx, |
198 | union _cpuid4_leaf_ecx *ecx) | ||
188 | { | 199 | { |
189 | unsigned dummy; | 200 | unsigned dummy; |
190 | unsigned line_size, lines_per_tag, assoc, size_in_kb; | 201 | unsigned line_size, lines_per_tag, assoc, size_in_kb; |
@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
251 | (ebx->split.ways_of_associativity + 1) - 1; | 262 | (ebx->split.ways_of_associativity + 1) - 1; |
252 | } | 263 | } |
253 | 264 | ||
254 | static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 265 | static void __cpuinit |
266 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | ||
267 | { | ||
268 | if (index < 3) | ||
269 | return; | ||
270 | this_leaf->can_disable = 1; | ||
271 | } | ||
272 | |||
273 | static int | ||
274 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
255 | { | 275 | { |
256 | union _cpuid4_leaf_eax eax; | 276 | union _cpuid4_leaf_eax eax; |
257 | union _cpuid4_leaf_ebx ebx; | 277 | union _cpuid4_leaf_ebx ebx; |
258 | union _cpuid4_leaf_ecx ecx; | 278 | union _cpuid4_leaf_ecx ecx; |
259 | unsigned edx; | 279 | unsigned edx; |
260 | 280 | ||
261 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 281 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
262 | amd_cpuid4(index, &eax, &ebx, &ecx); | 282 | amd_cpuid4(index, &eax, &ebx, &ecx); |
263 | else | 283 | if (boot_cpu_data.x86 >= 0x10) |
264 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 284 | amd_check_l3_disable(index, this_leaf); |
285 | } else { | ||
286 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | ||
287 | } | ||
288 | |||
265 | if (eax.split.type == CACHE_TYPE_NULL) | 289 | if (eax.split.type == CACHE_TYPE_NULL) |
266 | return -EIO; /* better error ? */ | 290 | return -EIO; /* better error ? */ |
267 | 291 | ||
268 | this_leaf->eax = eax; | 292 | this_leaf->eax = eax; |
269 | this_leaf->ebx = ebx; | 293 | this_leaf->ebx = ebx; |
270 | this_leaf->ecx = ecx; | 294 | this_leaf->ecx = ecx; |
271 | this_leaf->size = (ecx.split.number_of_sets + 1) * | 295 | this_leaf->size = (ecx.split.number_of_sets + 1) * |
272 | (ebx.split.coherency_line_size + 1) * | 296 | (ebx.split.coherency_line_size + 1) * |
273 | (ebx.split.physical_line_partition + 1) * | 297 | (ebx.split.physical_line_partition + 1) * |
274 | (ebx.split.ways_of_associativity + 1); | 298 | (ebx.split.ways_of_associativity + 1); |
275 | return 0; | 299 | return 0; |
276 | } | 300 | } |
277 | 301 | ||
@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
453 | 477 | ||
454 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 478 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
455 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 479 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
456 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 480 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
457 | 481 | ||
458 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
459 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 483 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
490 | 514 | ||
491 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 515 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
492 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 516 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { |
493 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 517 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
494 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 518 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
495 | } | 519 | } |
496 | } | 520 | } |
@@ -572,7 +596,7 @@ struct _index_kobject { | |||
572 | 596 | ||
573 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 597 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
574 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 598 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
575 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 599 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
576 | 600 | ||
577 | #define show_one_plus(file_name, object, val) \ | 601 | #define show_one_plus(file_name, object, val) \ |
578 | static ssize_t show_##file_name \ | 602 | static ssize_t show_##file_name \ |
@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) { | |||
637 | } | 661 | } |
638 | } | 662 | } |
639 | 663 | ||
664 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
665 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
666 | |||
667 | #ifdef CONFIG_PCI | ||
668 | static struct pci_dev *get_k8_northbridge(int node) | ||
669 | { | ||
670 | struct pci_dev *dev = NULL; | ||
671 | int i; | ||
672 | |||
673 | for (i = 0; i <= node; i++) { | ||
674 | do { | ||
675 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
676 | if (!dev) | ||
677 | break; | ||
678 | } while (!pci_match_id(&k8_nb_id[0], dev)); | ||
679 | if (!dev) | ||
680 | break; | ||
681 | } | ||
682 | return dev; | ||
683 | } | ||
684 | #else | ||
685 | static struct pci_dev *get_k8_northbridge(int node) | ||
686 | { | ||
687 | return NULL; | ||
688 | } | ||
689 | #endif | ||
690 | |||
691 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||
692 | { | ||
693 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
694 | struct pci_dev *dev = NULL; | ||
695 | ssize_t ret = 0; | ||
696 | int i; | ||
697 | |||
698 | if (!this_leaf->can_disable) | ||
699 | return sprintf(buf, "Feature not enabled\n"); | ||
700 | |||
701 | dev = get_k8_northbridge(node); | ||
702 | if (!dev) { | ||
703 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
704 | return -EINVAL; | ||
705 | } | ||
706 | |||
707 | for (i = 0; i < 2; i++) { | ||
708 | unsigned int reg; | ||
709 | |||
710 | pci_read_config_dword(dev, 0x1BC + i * 4, ®); | ||
711 | |||
712 | ret += sprintf(buf, "%sEntry: %d\n", buf, i); | ||
713 | ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", | ||
714 | buf, | ||
715 | reg & 0x80000000 ? "Disabled" : "Allowed", | ||
716 | reg & 0x40000000 ? "Disabled" : "Allowed"); | ||
717 | ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", | ||
718 | buf, (reg & 0x30000) >> 16, reg & 0xfff); | ||
719 | } | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static ssize_t | ||
724 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | ||
725 | size_t count) | ||
726 | { | ||
727 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||
728 | struct pci_dev *dev = NULL; | ||
729 | unsigned int ret, index, val; | ||
730 | |||
731 | if (!this_leaf->can_disable) | ||
732 | return 0; | ||
733 | |||
734 | if (strlen(buf) > 15) | ||
735 | return -EINVAL; | ||
736 | |||
737 | ret = sscanf(buf, "%x %x", &index, &val); | ||
738 | if (ret != 2) | ||
739 | return -EINVAL; | ||
740 | if (index > 1) | ||
741 | return -EINVAL; | ||
742 | |||
743 | val |= 0xc0000000; | ||
744 | dev = get_k8_northbridge(node); | ||
745 | if (!dev) { | ||
746 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | |||
750 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | ||
751 | wbinvd(); | ||
752 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | ||
753 | |||
754 | return 1; | ||
755 | } | ||
756 | |||
640 | struct _cache_attr { | 757 | struct _cache_attr { |
641 | struct attribute attr; | 758 | struct attribute attr; |
642 | ssize_t (*show)(struct _cpuid4_info *, char *); | 759 | ssize_t (*show)(struct _cpuid4_info *, char *); |
@@ -657,6 +774,8 @@ define_one_ro(size); | |||
657 | define_one_ro(shared_cpu_map); | 774 | define_one_ro(shared_cpu_map); |
658 | define_one_ro(shared_cpu_list); | 775 | define_one_ro(shared_cpu_list); |
659 | 776 | ||
777 | static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); | ||
778 | |||
660 | static struct attribute * default_attrs[] = { | 779 | static struct attribute * default_attrs[] = { |
661 | &type.attr, | 780 | &type.attr, |
662 | &level.attr, | 781 | &level.attr, |
@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = { | |||
667 | &size.attr, | 786 | &size.attr, |
668 | &shared_cpu_map.attr, | 787 | &shared_cpu_map.attr, |
669 | &shared_cpu_list.attr, | 788 | &shared_cpu_list.attr, |
789 | &cache_disable.attr, | ||
670 | NULL | 790 | NULL |
671 | }; | 791 | }; |
672 | 792 | ||
673 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
674 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
675 | |||
676 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 793 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) |
677 | { | 794 | { |
678 | struct _cache_attr *fattr = to_attr(attr); | 795 | struct _cache_attr *fattr = to_attr(attr); |
@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
682 | ret = fattr->show ? | 799 | ret = fattr->show ? |
683 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 800 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
684 | buf) : | 801 | buf) : |
685 | 0; | 802 | 0; |
686 | return ret; | 803 | return ret; |
687 | } | 804 | } |
688 | 805 | ||
689 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 806 | static ssize_t store(struct kobject * kobj, struct attribute * attr, |
690 | const char * buf, size_t count) | 807 | const char * buf, size_t count) |
691 | { | 808 | { |
692 | return 0; | 809 | struct _cache_attr *fattr = to_attr(attr); |
810 | struct _index_kobject *this_leaf = to_object(kobj); | ||
811 | ssize_t ret; | ||
812 | |||
813 | ret = fattr->store ? | ||
814 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
815 | buf, count) : | ||
816 | 0; | ||
817 | return ret; | ||
693 | } | 818 | } |
694 | 819 | ||
695 | static struct sysfs_ops sysfs_ops = { | 820 | static struct sysfs_ops sysfs_ops = { |
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl new file mode 100644 index 000000000000..dfea390e1608 --- /dev/null +++ b/arch/x86/kernel/cpu/mkcapflags.pl | |||
@@ -0,0 +1,32 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h | ||
4 | # | ||
5 | |||
6 | ($in, $out) = @ARGV; | ||
7 | |||
8 | open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n"; | ||
9 | open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n"; | ||
10 | |||
11 | print OUT "#include <asm/cpufeature.h>\n\n"; | ||
12 | print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n"; | ||
13 | |||
14 | while (defined($line = <IN>)) { | ||
15 | if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) { | ||
16 | $macro = $1; | ||
17 | $feature = $2; | ||
18 | $tail = $3; | ||
19 | if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) { | ||
20 | $feature = $1; | ||
21 | } | ||
22 | |||
23 | if ($feature ne '') { | ||
24 | printf OUT "\t%-32s = \"%s\",\n", | ||
25 | "[$macro]", "\L$feature"; | ||
26 | } | ||
27 | } | ||
28 | } | ||
29 | print OUT "};\n"; | ||
30 | |||
31 | close(IN); | ||
32 | close(OUT); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index b117d7f8a564..58ac5d3d4361 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -729,7 +729,7 @@ struct var_mtrr_range_state { | |||
729 | mtrr_type type; | 729 | mtrr_type type; |
730 | }; | 730 | }; |
731 | 731 | ||
732 | struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | 732 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
733 | static int __initdata debug_print; | 733 | static int __initdata debug_print; |
734 | 734 | ||
735 | static int __init | 735 | static int __init |
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c new file mode 100644 index 000000000000..5abbea297e0c --- /dev/null +++ b/arch/x86/kernel/cpu/powerflags.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Strings for the various x86 power flags | ||
3 | * | ||
4 | * This file must not contain any executable code. | ||
5 | */ | ||
6 | |||
7 | #include <asm/cpufeature.h> | ||
8 | |||
9 | const char *const x86_power_flags[32] = { | ||
10 | "ts", /* temperature sensor */ | ||
11 | "fid", /* frequency id control */ | ||
12 | "vid", /* voltage id control */ | ||
13 | "ttp", /* thermal trip */ | ||
14 | "tm", | ||
15 | "stc", | ||
16 | "100mhzsteps", | ||
17 | "hwpstate", | ||
18 | "", /* tsc invariant mapped to constant_tsc */ | ||
19 | /* nothing */ | ||
20 | }; | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 8e9cd6a8ec12..6a44d6465991 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/smp_lock.h> | 36 | #include <linux/smp_lock.h> |
37 | #include <linux/major.h> | 37 | #include <linux/major.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/smp_lock.h> | ||
40 | #include <linux/device.h> | 39 | #include <linux/device.h> |
41 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
42 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 15e6c6bc4a46..d3e524c84527 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c | |||
@@ -7,9 +7,8 @@ | |||
7 | 7 | ||
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/crash_dump.h> | 9 | #include <linux/crash_dump.h> |
10 | 10 | #include <linux/uaccess.h> | |
11 | #include <asm/uaccess.h> | 11 | #include <linux/io.h> |
12 | #include <asm/io.h> | ||
13 | 12 | ||
14 | /** | 13 | /** |
15 | * copy_oldmem_page - copy one page from "oldmem" | 14 | * copy_oldmem_page - copy one page from "oldmem" |
@@ -25,7 +24,7 @@ | |||
25 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 24 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
26 | */ | 25 | */ |
27 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 26 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
28 | size_t csize, unsigned long offset, int userbuf) | 27 | size_t csize, unsigned long offset, int userbuf) |
29 | { | 28 | { |
30 | void *vaddr; | 29 | void *vaddr; |
31 | 30 | ||
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index eaff0bbb1444..6c9bfc9e1e95 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c | |||
@@ -16,87 +16,63 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/hardirq.h> | 18 | #include <linux/hardirq.h> |
19 | #include <linux/dmar.h> | ||
19 | 20 | ||
20 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
21 | #include <asm/ipi.h> | 22 | #include <asm/ipi.h> |
22 | #include <asm/genapic.h> | 23 | #include <asm/genapic.h> |
23 | 24 | ||
24 | #ifdef CONFIG_ACPI | 25 | extern struct genapic apic_flat; |
25 | #include <acpi/acpi_bus.h> | 26 | extern struct genapic apic_physflat; |
26 | #endif | 27 | extern struct genapic apic_x2xpic_uv_x; |
27 | 28 | extern struct genapic apic_x2apic_phys; | |
28 | DEFINE_PER_CPU(int, x2apic_extra_bits); | 29 | extern struct genapic apic_x2apic_cluster; |
29 | 30 | ||
30 | struct genapic __read_mostly *genapic = &apic_flat; | 31 | struct genapic __read_mostly *genapic = &apic_flat; |
31 | 32 | ||
32 | static enum uv_system_type uv_system_type; | 33 | static struct genapic *apic_probe[] __initdata = { |
34 | &apic_x2apic_uv_x, | ||
35 | &apic_x2apic_phys, | ||
36 | &apic_x2apic_cluster, | ||
37 | &apic_physflat, | ||
38 | NULL, | ||
39 | }; | ||
33 | 40 | ||
34 | /* | 41 | /* |
35 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. | 42 | * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. |
36 | */ | 43 | */ |
37 | void __init setup_apic_routing(void) | 44 | void __init setup_apic_routing(void) |
38 | { | 45 | { |
39 | if (uv_system_type == UV_NON_UNIQUE_APIC) | 46 | if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) { |
40 | genapic = &apic_x2apic_uv_x; | 47 | if (!intr_remapping_enabled) |
41 | else | 48 | genapic = &apic_flat; |
42 | #ifdef CONFIG_ACPI | 49 | } |
43 | /* | ||
44 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
45 | * regardless of how many processors are present (x86_64 ES7000 | ||
46 | * is an example). | ||
47 | */ | ||
48 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
49 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) | ||
50 | genapic = &apic_physflat; | ||
51 | else | ||
52 | #endif | ||
53 | |||
54 | if (max_physical_apicid < 8) | ||
55 | genapic = &apic_flat; | ||
56 | else | ||
57 | genapic = &apic_physflat; | ||
58 | 50 | ||
59 | printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); | 51 | if (genapic == &apic_flat) { |
52 | if (max_physical_apicid >= 8) | ||
53 | genapic = &apic_physflat; | ||
54 | printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name); | ||
55 | } | ||
60 | } | 56 | } |
61 | 57 | ||
62 | /* Same for both flat and physical. */ | 58 | /* Same for both flat and physical. */ |
63 | 59 | ||
64 | void send_IPI_self(int vector) | 60 | void apic_send_IPI_self(int vector) |
65 | { | 61 | { |
66 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); | 62 | __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); |
67 | } | 63 | } |
68 | 64 | ||
69 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 65 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
70 | { | 66 | { |
71 | if (!strcmp(oem_id, "SGI")) { | 67 | int i; |
72 | if (!strcmp(oem_table_id, "UVL")) | 68 | |
73 | uv_system_type = UV_LEGACY_APIC; | 69 | for (i = 0; apic_probe[i]; ++i) { |
74 | else if (!strcmp(oem_table_id, "UVX")) | 70 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { |
75 | uv_system_type = UV_X2APIC; | 71 | genapic = apic_probe[i]; |
76 | else if (!strcmp(oem_table_id, "UVH")) | 72 | printk(KERN_INFO "Setting APIC routing to %s.\n", |
77 | uv_system_type = UV_NON_UNIQUE_APIC; | 73 | genapic->name); |
74 | return 1; | ||
75 | } | ||
78 | } | 76 | } |
79 | return 0; | 77 | return 0; |
80 | } | 78 | } |
81 | |||
82 | unsigned int read_apic_id(void) | ||
83 | { | ||
84 | unsigned int id; | ||
85 | |||
86 | WARN_ON(preemptible() && num_online_cpus() > 1); | ||
87 | id = apic_read(APIC_ID); | ||
88 | if (uv_system_type >= UV_X2APIC) | ||
89 | id |= __get_cpu_var(x2apic_extra_bits); | ||
90 | return id; | ||
91 | } | ||
92 | |||
93 | enum uv_system_type get_uv_system_type(void) | ||
94 | { | ||
95 | return uv_system_type; | ||
96 | } | ||
97 | |||
98 | int is_uv_system(void) | ||
99 | { | ||
100 | return uv_system_type != UV_NONE; | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(is_uv_system); | ||
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 786548a62d38..9eca5ba7a6b1 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -15,9 +15,20 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/hardirq.h> | ||
18 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
19 | #include <asm/ipi.h> | 20 | #include <asm/ipi.h> |
20 | #include <asm/genapic.h> | 21 | #include <asm/genapic.h> |
22 | #include <mach_apicdef.h> | ||
23 | |||
24 | #ifdef CONFIG_ACPI | ||
25 | #include <acpi/acpi_bus.h> | ||
26 | #endif | ||
27 | |||
28 | static int __init flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
29 | { | ||
30 | return 1; | ||
31 | } | ||
21 | 32 | ||
22 | static cpumask_t flat_target_cpus(void) | 33 | static cpumask_t flat_target_cpus(void) |
23 | { | 34 | { |
@@ -95,9 +106,33 @@ static void flat_send_IPI_all(int vector) | |||
95 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
96 | } | 107 | } |
97 | 108 | ||
109 | static unsigned int get_apic_id(unsigned long x) | ||
110 | { | ||
111 | unsigned int id; | ||
112 | |||
113 | id = (((x)>>24) & 0xFFu); | ||
114 | return id; | ||
115 | } | ||
116 | |||
117 | static unsigned long set_apic_id(unsigned int id) | ||
118 | { | ||
119 | unsigned long x; | ||
120 | |||
121 | x = ((id & 0xFFu)<<24); | ||
122 | return x; | ||
123 | } | ||
124 | |||
125 | static unsigned int read_xapic_id(void) | ||
126 | { | ||
127 | unsigned int id; | ||
128 | |||
129 | id = get_apic_id(apic_read(APIC_ID)); | ||
130 | return id; | ||
131 | } | ||
132 | |||
98 | static int flat_apic_id_registered(void) | 133 | static int flat_apic_id_registered(void) |
99 | { | 134 | { |
100 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); | 135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
101 | } | 136 | } |
102 | 137 | ||
103 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) |
@@ -112,6 +147,7 @@ static unsigned int phys_pkg_id(int index_msb) | |||
112 | 147 | ||
113 | struct genapic apic_flat = { | 148 | struct genapic apic_flat = { |
114 | .name = "flat", | 149 | .name = "flat", |
150 | .acpi_madt_oem_check = flat_acpi_madt_oem_check, | ||
115 | .int_delivery_mode = dest_LowestPrio, | 151 | .int_delivery_mode = dest_LowestPrio, |
116 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | 152 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), |
117 | .target_cpus = flat_target_cpus, | 153 | .target_cpus = flat_target_cpus, |
@@ -121,8 +157,12 @@ struct genapic apic_flat = { | |||
121 | .send_IPI_all = flat_send_IPI_all, | 157 | .send_IPI_all = flat_send_IPI_all, |
122 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 158 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
123 | .send_IPI_mask = flat_send_IPI_mask, | 159 | .send_IPI_mask = flat_send_IPI_mask, |
160 | .send_IPI_self = apic_send_IPI_self, | ||
124 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
125 | .phys_pkg_id = phys_pkg_id, | 162 | .phys_pkg_id = phys_pkg_id, |
163 | .get_apic_id = get_apic_id, | ||
164 | .set_apic_id = set_apic_id, | ||
165 | .apic_id_mask = (0xFFu<<24), | ||
126 | }; | 166 | }; |
127 | 167 | ||
128 | /* | 168 | /* |
@@ -130,6 +170,21 @@ struct genapic apic_flat = { | |||
130 | * We cannot use logical delivery in this case because the mask | 170 | * We cannot use logical delivery in this case because the mask |
131 | * overflows, so use physical mode. | 171 | * overflows, so use physical mode. |
132 | */ | 172 | */ |
173 | static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
174 | { | ||
175 | #ifdef CONFIG_ACPI | ||
176 | /* | ||
177 | * Quirk: some x86_64 machines can only use physical APIC mode | ||
178 | * regardless of how many processors are present (x86_64 ES7000 | ||
179 | * is an example). | ||
180 | */ | ||
181 | if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && | ||
182 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) | ||
183 | return 1; | ||
184 | #endif | ||
185 | |||
186 | return 0; | ||
187 | } | ||
133 | 188 | ||
134 | static cpumask_t physflat_target_cpus(void) | 189 | static cpumask_t physflat_target_cpus(void) |
135 | { | 190 | { |
@@ -176,6 +231,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
176 | 231 | ||
177 | struct genapic apic_physflat = { | 232 | struct genapic apic_physflat = { |
178 | .name = "physical flat", | 233 | .name = "physical flat", |
234 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | ||
179 | .int_delivery_mode = dest_Fixed, | 235 | .int_delivery_mode = dest_Fixed, |
180 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | 236 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), |
181 | .target_cpus = physflat_target_cpus, | 237 | .target_cpus = physflat_target_cpus, |
@@ -185,6 +241,10 @@ struct genapic apic_physflat = { | |||
185 | .send_IPI_all = physflat_send_IPI_all, | 241 | .send_IPI_all = physflat_send_IPI_all, |
186 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 242 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
187 | .send_IPI_mask = physflat_send_IPI_mask, | 243 | .send_IPI_mask = physflat_send_IPI_mask, |
244 | .send_IPI_self = apic_send_IPI_self, | ||
188 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 245 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
189 | .phys_pkg_id = phys_pkg_id, | 246 | .phys_pkg_id = phys_pkg_id, |
247 | .get_apic_id = get_apic_id, | ||
248 | .set_apic_id = set_apic_id, | ||
249 | .apic_id_mask = (0xFFu<<24), | ||
190 | }; | 250 | }; |
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c new file mode 100644 index 000000000000..fed9f68efd66 --- /dev/null +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -0,0 +1,164 @@ | |||
1 | #include <linux/threads.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/ctype.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | #include <asm/ipi.h> | ||
11 | #include <asm/genapic.h> | ||
12 | |||
13 | DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); | ||
14 | |||
15 | static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
16 | { | ||
17 | if (cpu_has_x2apic) | ||
18 | return 1; | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
24 | |||
25 | static cpumask_t x2apic_target_cpus(void) | ||
26 | { | ||
27 | return cpumask_of_cpu(0); | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * for now each logical cpu is in its own vector allocation domain. | ||
32 | */ | ||
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | ||
34 | { | ||
35 | cpumask_t domain = CPU_MASK_NONE; | ||
36 | cpu_set(cpu, domain); | ||
37 | return domain; | ||
38 | } | ||
39 | |||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | ||
41 | unsigned int dest) | ||
42 | { | ||
43 | unsigned long cfg; | ||
44 | |||
45 | cfg = __prepare_ICR(0, vector, dest); | ||
46 | |||
47 | /* | ||
48 | * send the IPI. | ||
49 | */ | ||
50 | x2apic_icr_write(cfg, apicid); | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * for now, we send the IPI's one by one in the cpumask. | ||
55 | * TBD: Based on the cpu mask, we can send the IPI's to the cluster group | ||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | ||
57 | * writes. | ||
58 | */ | ||
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | unsigned long query_cpu; | ||
63 | |||
64 | local_irq_save(flags); | ||
65 | for_each_cpu_mask(query_cpu, mask) { | ||
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
67 | vector, APIC_DEST_LOGICAL); | ||
68 | } | ||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | |||
72 | static void x2apic_send_IPI_allbutself(int vector) | ||
73 | { | ||
74 | cpumask_t mask = cpu_online_map; | ||
75 | |||
76 | cpu_clear(smp_processor_id(), mask); | ||
77 | |||
78 | if (!cpus_empty(mask)) | ||
79 | x2apic_send_IPI_mask(mask, vector); | ||
80 | } | ||
81 | |||
82 | static void x2apic_send_IPI_all(int vector) | ||
83 | { | ||
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | ||
85 | } | ||
86 | |||
87 | static int x2apic_apic_id_registered(void) | ||
88 | { | ||
89 | return 1; | ||
90 | } | ||
91 | |||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | ||
93 | { | ||
94 | int cpu; | ||
95 | |||
96 | /* | ||
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
98 | * May as well be the first. | ||
99 | */ | ||
100 | cpu = first_cpu(cpumask); | ||
101 | if ((unsigned)cpu < NR_CPUS) | ||
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
103 | else | ||
104 | return BAD_APICID; | ||
105 | } | ||
106 | |||
107 | static unsigned int get_apic_id(unsigned long x) | ||
108 | { | ||
109 | unsigned int id; | ||
110 | |||
111 | id = x; | ||
112 | return id; | ||
113 | } | ||
114 | |||
115 | static unsigned long set_apic_id(unsigned int id) | ||
116 | { | ||
117 | unsigned long x; | ||
118 | |||
119 | x = id; | ||
120 | return x; | ||
121 | } | ||
122 | |||
123 | static unsigned int x2apic_read_id(void) | ||
124 | { | ||
125 | return apic_read(APIC_ID); | ||
126 | } | ||
127 | |||
128 | static unsigned int phys_pkg_id(int index_msb) | ||
129 | { | ||
130 | return x2apic_read_id() >> index_msb; | ||
131 | } | ||
132 | |||
133 | static void x2apic_send_IPI_self(int vector) | ||
134 | { | ||
135 | apic_write(APIC_SELF_IPI, vector); | ||
136 | } | ||
137 | |||
138 | static void init_x2apic_ldr(void) | ||
139 | { | ||
140 | int cpu = smp_processor_id(); | ||
141 | |||
142 | per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); | ||
143 | return; | ||
144 | } | ||
145 | |||
146 | struct genapic apic_x2apic_cluster = { | ||
147 | .name = "cluster x2apic", | ||
148 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | ||
149 | .int_delivery_mode = dest_LowestPrio, | ||
150 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | ||
151 | .target_cpus = x2apic_target_cpus, | ||
152 | .vector_allocation_domain = x2apic_vector_allocation_domain, | ||
153 | .apic_id_registered = x2apic_apic_id_registered, | ||
154 | .init_apic_ldr = init_x2apic_ldr, | ||
155 | .send_IPI_all = x2apic_send_IPI_all, | ||
156 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | ||
157 | .send_IPI_mask = x2apic_send_IPI_mask, | ||
158 | .send_IPI_self = x2apic_send_IPI_self, | ||
159 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | ||
160 | .phys_pkg_id = phys_pkg_id, | ||
161 | .get_apic_id = get_apic_id, | ||
162 | .set_apic_id = set_apic_id, | ||
163 | .apic_id_mask = (0xFFFFFFFFu), | ||
164 | }; | ||
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c new file mode 100644 index 000000000000..958d537b4cc9 --- /dev/null +++ b/arch/x86/kernel/genx2apic_phys.c | |||
@@ -0,0 +1,159 @@ | |||
1 | #include <linux/threads.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/ctype.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | #include <asm/ipi.h> | ||
11 | #include <asm/genapic.h> | ||
12 | |||
13 | static int x2apic_phys; | ||
14 | |||
15 | static int set_x2apic_phys_mode(char *arg) | ||
16 | { | ||
17 | x2apic_phys = 1; | ||
18 | return 0; | ||
19 | } | ||
20 | early_param("x2apic_phys", set_x2apic_phys_mode); | ||
21 | |||
22 | static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
23 | { | ||
24 | if (cpu_has_x2apic && x2apic_phys) | ||
25 | return 1; | ||
26 | |||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
31 | |||
32 | static cpumask_t x2apic_target_cpus(void) | ||
33 | { | ||
34 | return cpumask_of_cpu(0); | ||
35 | } | ||
36 | |||
37 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | ||
38 | { | ||
39 | cpumask_t domain = CPU_MASK_NONE; | ||
40 | cpu_set(cpu, domain); | ||
41 | return domain; | ||
42 | } | ||
43 | |||
44 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | ||
45 | unsigned int dest) | ||
46 | { | ||
47 | unsigned long cfg; | ||
48 | |||
49 | cfg = __prepare_ICR(0, vector, dest); | ||
50 | |||
51 | /* | ||
52 | * send the IPI. | ||
53 | */ | ||
54 | x2apic_icr_write(cfg, apicid); | ||
55 | } | ||
56 | |||
57 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | unsigned long query_cpu; | ||
61 | |||
62 | local_irq_save(flags); | ||
63 | for_each_cpu_mask(query_cpu, mask) { | ||
64 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), | ||
65 | vector, APIC_DEST_PHYSICAL); | ||
66 | } | ||
67 | local_irq_restore(flags); | ||
68 | } | ||
69 | |||
70 | static void x2apic_send_IPI_allbutself(int vector) | ||
71 | { | ||
72 | cpumask_t mask = cpu_online_map; | ||
73 | |||
74 | cpu_clear(smp_processor_id(), mask); | ||
75 | |||
76 | if (!cpus_empty(mask)) | ||
77 | x2apic_send_IPI_mask(mask, vector); | ||
78 | } | ||
79 | |||
80 | static void x2apic_send_IPI_all(int vector) | ||
81 | { | ||
82 | x2apic_send_IPI_mask(cpu_online_map, vector); | ||
83 | } | ||
84 | |||
85 | static int x2apic_apic_id_registered(void) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | ||
91 | { | ||
92 | int cpu; | ||
93 | |||
94 | /* | ||
95 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
96 | * May as well be the first. | ||
97 | */ | ||
98 | cpu = first_cpu(cpumask); | ||
99 | if ((unsigned)cpu < NR_CPUS) | ||
100 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
101 | else | ||
102 | return BAD_APICID; | ||
103 | } | ||
104 | |||
105 | static unsigned int get_apic_id(unsigned long x) | ||
106 | { | ||
107 | unsigned int id; | ||
108 | |||
109 | id = x; | ||
110 | return id; | ||
111 | } | ||
112 | |||
113 | static unsigned long set_apic_id(unsigned int id) | ||
114 | { | ||
115 | unsigned long x; | ||
116 | |||
117 | x = id; | ||
118 | return x; | ||
119 | } | ||
120 | |||
121 | static unsigned int x2apic_read_id(void) | ||
122 | { | ||
123 | return apic_read(APIC_ID); | ||
124 | } | ||
125 | |||
126 | static unsigned int phys_pkg_id(int index_msb) | ||
127 | { | ||
128 | return x2apic_read_id() >> index_msb; | ||
129 | } | ||
130 | |||
131 | void x2apic_send_IPI_self(int vector) | ||
132 | { | ||
133 | apic_write(APIC_SELF_IPI, vector); | ||
134 | } | ||
135 | |||
136 | void init_x2apic_ldr(void) | ||
137 | { | ||
138 | return; | ||
139 | } | ||
140 | |||
141 | struct genapic apic_x2apic_phys = { | ||
142 | .name = "physical x2apic", | ||
143 | .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, | ||
144 | .int_delivery_mode = dest_Fixed, | ||
145 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | ||
146 | .target_cpus = x2apic_target_cpus, | ||
147 | .vector_allocation_domain = x2apic_vector_allocation_domain, | ||
148 | .apic_id_registered = x2apic_apic_id_registered, | ||
149 | .init_apic_ldr = init_x2apic_ldr, | ||
150 | .send_IPI_all = x2apic_send_IPI_all, | ||
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | ||
152 | .send_IPI_mask = x2apic_send_IPI_mask, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | ||
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | ||
155 | .phys_pkg_id = phys_pkg_id, | ||
156 | .get_apic_id = get_apic_id, | ||
157 | .set_apic_id = set_apic_id, | ||
158 | .apic_id_mask = (0xFFFFFFFFu), | ||
159 | }; | ||
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index bfa837cb16be..99269beabc7c 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -12,12 +12,12 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/kernel.h> | ||
16 | #include <linux/ctype.h> | 15 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
19 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
20 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/hardirq.h> | ||
21 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
22 | #include <asm/ipi.h> | 22 | #include <asm/ipi.h> |
23 | #include <asm/genapic.h> | 23 | #include <asm/genapic.h> |
@@ -26,6 +26,35 @@ | |||
26 | #include <asm/uv/uv_hub.h> | 26 | #include <asm/uv/uv_hub.h> |
27 | #include <asm/uv/bios.h> | 27 | #include <asm/uv/bios.h> |
28 | 28 | ||
29 | DEFINE_PER_CPU(int, x2apic_extra_bits); | ||
30 | |||
31 | static enum uv_system_type uv_system_type; | ||
32 | |||
33 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
34 | { | ||
35 | if (!strcmp(oem_id, "SGI")) { | ||
36 | if (!strcmp(oem_table_id, "UVL")) | ||
37 | uv_system_type = UV_LEGACY_APIC; | ||
38 | else if (!strcmp(oem_table_id, "UVX")) | ||
39 | uv_system_type = UV_X2APIC; | ||
40 | else if (!strcmp(oem_table_id, "UVH")) { | ||
41 | uv_system_type = UV_NON_UNIQUE_APIC; | ||
42 | return 1; | ||
43 | } | ||
44 | } | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | enum uv_system_type get_uv_system_type(void) | ||
49 | { | ||
50 | return uv_system_type; | ||
51 | } | ||
52 | |||
53 | int is_uv_system(void) | ||
54 | { | ||
55 | return uv_system_type != UV_NONE; | ||
56 | } | ||
57 | |||
29 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | 58 | DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); |
30 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); | 59 | EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); |
31 | 60 | ||
@@ -123,6 +152,10 @@ static int uv_apic_id_registered(void) | |||
123 | return 1; | 152 | return 1; |
124 | } | 153 | } |
125 | 154 | ||
155 | static void uv_init_apic_ldr(void) | ||
156 | { | ||
157 | } | ||
158 | |||
126 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 159 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) |
127 | { | 160 | { |
128 | int cpu; | 161 | int cpu; |
@@ -138,9 +171,34 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
138 | return BAD_APICID; | 171 | return BAD_APICID; |
139 | } | 172 | } |
140 | 173 | ||
174 | static unsigned int get_apic_id(unsigned long x) | ||
175 | { | ||
176 | unsigned int id; | ||
177 | |||
178 | WARN_ON(preemptible() && num_online_cpus() > 1); | ||
179 | id = x | __get_cpu_var(x2apic_extra_bits); | ||
180 | |||
181 | return id; | ||
182 | } | ||
183 | |||
184 | static unsigned long set_apic_id(unsigned int id) | ||
185 | { | ||
186 | unsigned long x; | ||
187 | |||
188 | /* maskout x2apic_extra_bits ? */ | ||
189 | x = id; | ||
190 | return x; | ||
191 | } | ||
192 | |||
193 | static unsigned int uv_read_apic_id(void) | ||
194 | { | ||
195 | |||
196 | return get_apic_id(apic_read(APIC_ID)); | ||
197 | } | ||
198 | |||
141 | static unsigned int phys_pkg_id(int index_msb) | 199 | static unsigned int phys_pkg_id(int index_msb) |
142 | { | 200 | { |
143 | return GET_APIC_ID(read_apic_id()) >> index_msb; | 201 | return uv_read_apic_id() >> index_msb; |
144 | } | 202 | } |
145 | 203 | ||
146 | #ifdef ZZZ /* Needs x2apic patch */ | 204 | #ifdef ZZZ /* Needs x2apic patch */ |
@@ -152,17 +210,22 @@ static void uv_send_IPI_self(int vector) | |||
152 | 210 | ||
153 | struct genapic apic_x2apic_uv_x = { | 211 | struct genapic apic_x2apic_uv_x = { |
154 | .name = "UV large system", | 212 | .name = "UV large system", |
213 | .acpi_madt_oem_check = uv_acpi_madt_oem_check, | ||
155 | .int_delivery_mode = dest_Fixed, | 214 | .int_delivery_mode = dest_Fixed, |
156 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | 215 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), |
157 | .target_cpus = uv_target_cpus, | 216 | .target_cpus = uv_target_cpus, |
158 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ | 217 | .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */ |
159 | .apic_id_registered = uv_apic_id_registered, | 218 | .apic_id_registered = uv_apic_id_registered, |
219 | .init_apic_ldr = uv_init_apic_ldr, | ||
160 | .send_IPI_all = uv_send_IPI_all, | 220 | .send_IPI_all = uv_send_IPI_all, |
161 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 221 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
162 | .send_IPI_mask = uv_send_IPI_mask, | 222 | .send_IPI_mask = uv_send_IPI_mask, |
163 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ | 223 | /* ZZZ.send_IPI_self = uv_send_IPI_self, */ |
164 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 224 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
165 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ | 225 | .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */ |
226 | .get_apic_id = get_apic_id, | ||
227 | .set_apic_id = set_apic_id, | ||
228 | .apic_id_mask = (0xFFFFFFFFu), | ||
166 | }; | 229 | }; |
167 | 230 | ||
168 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 231 | static __cpuinit void set_x2apic_extra_bits(int pnode) |
@@ -401,3 +464,5 @@ void __cpuinit uv_cpu_init(void) | |||
401 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) | 464 | if (get_uv_system_type() == UV_NON_UNIQUE_APIC) |
402 | set_x2apic_extra_bits(uv_hub_info->pnode); | 465 | set_x2apic_extra_bits(uv_hub_info->pnode); |
403 | } | 466 | } |
467 | |||
468 | |||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index eb9ddd8efb82..45723f1fe198 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -21,9 +21,12 @@ | |||
21 | # include <asm/sigcontext32.h> | 21 | # include <asm/sigcontext32.h> |
22 | # include <asm/user32.h> | 22 | # include <asm/user32.h> |
23 | #else | 23 | #else |
24 | # define save_i387_ia32 save_i387 | 24 | # define save_i387_xstate_ia32 save_i387_xstate |
25 | # define restore_i387_ia32 restore_i387 | 25 | # define restore_i387_xstate_ia32 restore_i387_xstate |
26 | # define _fpstate_ia32 _fpstate | 26 | # define _fpstate_ia32 _fpstate |
27 | # define _xstate_ia32 _xstate | ||
28 | # define sig_xstate_ia32_size sig_xstate_size | ||
29 | # define fx_sw_reserved_ia32 fx_sw_reserved | ||
27 | # define user_i387_ia32_struct user_i387_struct | 30 | # define user_i387_ia32_struct user_i387_struct |
28 | # define user32_fxsr_struct user_fxsr_struct | 31 | # define user32_fxsr_struct user_fxsr_struct |
29 | #endif | 32 | #endif |
@@ -36,6 +39,7 @@ | |||
36 | 39 | ||
37 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 40 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
38 | unsigned int xstate_size; | 41 | unsigned int xstate_size; |
42 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | ||
39 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 43 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
40 | 44 | ||
41 | void __cpuinit mxcsr_feature_mask_init(void) | 45 | void __cpuinit mxcsr_feature_mask_init(void) |
@@ -61,6 +65,11 @@ void __init init_thread_xstate(void) | |||
61 | return; | 65 | return; |
62 | } | 66 | } |
63 | 67 | ||
68 | if (cpu_has_xsave) { | ||
69 | xsave_cntxt_init(); | ||
70 | return; | ||
71 | } | ||
72 | |||
64 | if (cpu_has_fxsr) | 73 | if (cpu_has_fxsr) |
65 | xstate_size = sizeof(struct i387_fxsave_struct); | 74 | xstate_size = sizeof(struct i387_fxsave_struct); |
66 | #ifdef CONFIG_X86_32 | 75 | #ifdef CONFIG_X86_32 |
@@ -83,9 +92,19 @@ void __cpuinit fpu_init(void) | |||
83 | 92 | ||
84 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ | 93 | write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */ |
85 | 94 | ||
95 | /* | ||
96 | * Boot processor to setup the FP and extended state context info. | ||
97 | */ | ||
98 | if (!smp_processor_id()) | ||
99 | init_thread_xstate(); | ||
100 | xsave_init(); | ||
101 | |||
86 | mxcsr_feature_mask_init(); | 102 | mxcsr_feature_mask_init(); |
87 | /* clean state in init */ | 103 | /* clean state in init */ |
88 | current_thread_info()->status = 0; | 104 | if (cpu_has_xsave) |
105 | current_thread_info()->status = TS_XSAVE; | ||
106 | else | ||
107 | current_thread_info()->status = 0; | ||
89 | clear_used_math(); | 108 | clear_used_math(); |
90 | } | 109 | } |
91 | #endif /* CONFIG_X86_64 */ | 110 | #endif /* CONFIG_X86_64 */ |
@@ -195,6 +214,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
195 | */ | 214 | */ |
196 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 215 | target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
197 | 216 | ||
217 | /* | ||
218 | * update the header bits in the xsave header, indicating the | ||
219 | * presence of FP and SSE state. | ||
220 | */ | ||
221 | if (cpu_has_xsave) | ||
222 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; | ||
223 | |||
198 | return ret; | 224 | return ret; |
199 | } | 225 | } |
200 | 226 | ||
@@ -395,6 +421,12 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
395 | if (!ret) | 421 | if (!ret) |
396 | convert_to_fxsr(target, &env); | 422 | convert_to_fxsr(target, &env); |
397 | 423 | ||
424 | /* | ||
425 | * update the header bit in the xsave header, indicating the | ||
426 | * presence of FP. | ||
427 | */ | ||
428 | if (cpu_has_xsave) | ||
429 | target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; | ||
398 | return ret; | 430 | return ret; |
399 | } | 431 | } |
400 | 432 | ||
@@ -407,7 +439,6 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
407 | struct task_struct *tsk = current; | 439 | struct task_struct *tsk = current; |
408 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; | 440 | struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave; |
409 | 441 | ||
410 | unlazy_fpu(tsk); | ||
411 | fp->status = fp->swd; | 442 | fp->status = fp->swd; |
412 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) | 443 | if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) |
413 | return -1; | 444 | return -1; |
@@ -421,8 +452,6 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
421 | struct user_i387_ia32_struct env; | 452 | struct user_i387_ia32_struct env; |
422 | int err = 0; | 453 | int err = 0; |
423 | 454 | ||
424 | unlazy_fpu(tsk); | ||
425 | |||
426 | convert_from_fxsr(&env, tsk); | 455 | convert_from_fxsr(&env, tsk); |
427 | if (__copy_to_user(buf, &env, sizeof(env))) | 456 | if (__copy_to_user(buf, &env, sizeof(env))) |
428 | return -1; | 457 | return -1; |
@@ -432,16 +461,40 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
432 | if (err) | 461 | if (err) |
433 | return -1; | 462 | return -1; |
434 | 463 | ||
435 | if (__copy_to_user(&buf->_fxsr_env[0], fx, | 464 | if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size)) |
436 | sizeof(struct i387_fxsave_struct))) | ||
437 | return -1; | 465 | return -1; |
438 | return 1; | 466 | return 1; |
439 | } | 467 | } |
440 | 468 | ||
441 | int save_i387_ia32(struct _fpstate_ia32 __user *buf) | 469 | static int save_i387_xsave(void __user *buf) |
470 | { | ||
471 | struct _fpstate_ia32 __user *fx = buf; | ||
472 | int err = 0; | ||
473 | |||
474 | if (save_i387_fxsave(fx) < 0) | ||
475 | return -1; | ||
476 | |||
477 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32, | ||
478 | sizeof(struct _fpx_sw_bytes)); | ||
479 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
480 | (__u32 __user *) (buf + sig_xstate_ia32_size | ||
481 | - FP_XSTATE_MAGIC2_SIZE)); | ||
482 | if (err) | ||
483 | return -1; | ||
484 | |||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | int save_i387_xstate_ia32(void __user *buf) | ||
442 | { | 489 | { |
490 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
491 | struct task_struct *tsk = current; | ||
492 | |||
443 | if (!used_math()) | 493 | if (!used_math()) |
444 | return 0; | 494 | return 0; |
495 | |||
496 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size)) | ||
497 | return -EACCES; | ||
445 | /* | 498 | /* |
446 | * This will cause a "finit" to be triggered by the next | 499 | * This will cause a "finit" to be triggered by the next |
447 | * attempted FPU operation by the 'current' process. | 500 | * attempted FPU operation by the 'current' process. |
@@ -451,13 +504,17 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
451 | if (!HAVE_HWFP) { | 504 | if (!HAVE_HWFP) { |
452 | return fpregs_soft_get(current, NULL, | 505 | return fpregs_soft_get(current, NULL, |
453 | 0, sizeof(struct user_i387_ia32_struct), | 506 | 0, sizeof(struct user_i387_ia32_struct), |
454 | NULL, buf) ? -1 : 1; | 507 | NULL, fp) ? -1 : 1; |
455 | } | 508 | } |
456 | 509 | ||
510 | unlazy_fpu(tsk); | ||
511 | |||
512 | if (cpu_has_xsave) | ||
513 | return save_i387_xsave(fp); | ||
457 | if (cpu_has_fxsr) | 514 | if (cpu_has_fxsr) |
458 | return save_i387_fxsave(buf); | 515 | return save_i387_fxsave(fp); |
459 | else | 516 | else |
460 | return save_i387_fsave(buf); | 517 | return save_i387_fsave(fp); |
461 | } | 518 | } |
462 | 519 | ||
463 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | 520 | static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) |
@@ -468,14 +525,15 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) | |||
468 | sizeof(struct i387_fsave_struct)); | 525 | sizeof(struct i387_fsave_struct)); |
469 | } | 526 | } |
470 | 527 | ||
471 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | 528 | static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, |
529 | unsigned int size) | ||
472 | { | 530 | { |
473 | struct task_struct *tsk = current; | 531 | struct task_struct *tsk = current; |
474 | struct user_i387_ia32_struct env; | 532 | struct user_i387_ia32_struct env; |
475 | int err; | 533 | int err; |
476 | 534 | ||
477 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], | 535 | err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0], |
478 | sizeof(struct i387_fxsave_struct)); | 536 | size); |
479 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 537 | /* mxcsr reserved bits must be masked to zero for security reasons */ |
480 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; | 538 | tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask; |
481 | if (err || __copy_from_user(&env, buf, sizeof(env))) | 539 | if (err || __copy_from_user(&env, buf, sizeof(env))) |
@@ -485,14 +543,69 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf) | |||
485 | return 0; | 543 | return 0; |
486 | } | 544 | } |
487 | 545 | ||
488 | int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | 546 | static int restore_i387_xsave(void __user *buf) |
547 | { | ||
548 | struct _fpx_sw_bytes fx_sw_user; | ||
549 | struct _fpstate_ia32 __user *fx_user = | ||
550 | ((struct _fpstate_ia32 __user *) buf); | ||
551 | struct i387_fxsave_struct __user *fx = | ||
552 | (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; | ||
553 | struct xsave_hdr_struct *xsave_hdr = | ||
554 | ¤t->thread.xstate->xsave.xsave_hdr; | ||
555 | u64 mask; | ||
556 | int err; | ||
557 | |||
558 | if (check_for_xstate(fx, buf, &fx_sw_user)) | ||
559 | goto fx_only; | ||
560 | |||
561 | mask = fx_sw_user.xstate_bv; | ||
562 | |||
563 | err = restore_i387_fxsave(buf, fx_sw_user.xstate_size); | ||
564 | |||
565 | xsave_hdr->xstate_bv &= pcntxt_mask; | ||
566 | /* | ||
567 | * These bits must be zero. | ||
568 | */ | ||
569 | xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; | ||
570 | |||
571 | /* | ||
572 | * Init the state that is not present in the memory layout | ||
573 | * and enabled by the OS. | ||
574 | */ | ||
575 | mask = ~(pcntxt_mask & ~mask); | ||
576 | xsave_hdr->xstate_bv &= mask; | ||
577 | |||
578 | return err; | ||
579 | fx_only: | ||
580 | /* | ||
581 | * Couldn't find the extended state information in the memory | ||
582 | * layout. Restore the FP/SSE and init the other extended state | ||
583 | * enabled by the OS. | ||
584 | */ | ||
585 | xsave_hdr->xstate_bv = XSTATE_FPSSE; | ||
586 | return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct)); | ||
587 | } | ||
588 | |||
589 | int restore_i387_xstate_ia32(void __user *buf) | ||
489 | { | 590 | { |
490 | int err; | 591 | int err; |
491 | struct task_struct *tsk = current; | 592 | struct task_struct *tsk = current; |
593 | struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; | ||
492 | 594 | ||
493 | if (HAVE_HWFP) | 595 | if (HAVE_HWFP) |
494 | clear_fpu(tsk); | 596 | clear_fpu(tsk); |
495 | 597 | ||
598 | if (!buf) { | ||
599 | if (used_math()) { | ||
600 | clear_fpu(tsk); | ||
601 | clear_used_math(); | ||
602 | } | ||
603 | |||
604 | return 0; | ||
605 | } else | ||
606 | if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size)) | ||
607 | return -EACCES; | ||
608 | |||
496 | if (!used_math()) { | 609 | if (!used_math()) { |
497 | err = init_fpu(tsk); | 610 | err = init_fpu(tsk); |
498 | if (err) | 611 | if (err) |
@@ -500,14 +613,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf) | |||
500 | } | 613 | } |
501 | 614 | ||
502 | if (HAVE_HWFP) { | 615 | if (HAVE_HWFP) { |
503 | if (cpu_has_fxsr) | 616 | if (cpu_has_xsave) |
504 | err = restore_i387_fxsave(buf); | 617 | err = restore_i387_xsave(buf); |
618 | else if (cpu_has_fxsr) | ||
619 | err = restore_i387_fxsave(fp, sizeof(struct | ||
620 | i387_fxsave_struct)); | ||
505 | else | 621 | else |
506 | err = restore_i387_fsave(buf); | 622 | err = restore_i387_fsave(fp); |
507 | } else { | 623 | } else { |
508 | err = fpregs_soft_set(current, NULL, | 624 | err = fpregs_soft_set(current, NULL, |
509 | 0, sizeof(struct user_i387_ia32_struct), | 625 | 0, sizeof(struct user_i387_ia32_struct), |
510 | NULL, buf) != 0; | 626 | NULL, fp) != 0; |
511 | } | 627 | } |
512 | set_used_math(); | 628 | set_used_math(); |
513 | 629 | ||
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index dc92b49d9204..4b8a53d841f7 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -282,6 +282,30 @@ static int __init i8259A_init_sysfs(void) | |||
282 | 282 | ||
283 | device_initcall(i8259A_init_sysfs); | 283 | device_initcall(i8259A_init_sysfs); |
284 | 284 | ||
285 | void mask_8259A(void) | ||
286 | { | ||
287 | unsigned long flags; | ||
288 | |||
289 | spin_lock_irqsave(&i8259A_lock, flags); | ||
290 | |||
291 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | ||
292 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ | ||
293 | |||
294 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
295 | } | ||
296 | |||
297 | void unmask_8259A(void) | ||
298 | { | ||
299 | unsigned long flags; | ||
300 | |||
301 | spin_lock_irqsave(&i8259A_lock, flags); | ||
302 | |||
303 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ | ||
304 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ | ||
305 | |||
306 | spin_unlock_irqrestore(&i8259A_lock, flags); | ||
307 | } | ||
308 | |||
285 | void init_8259A(int auto_eoi) | 309 | void init_8259A(int auto_eoi) |
286 | { | 310 | { |
287 | unsigned long flags; | 311 | unsigned long flags; |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 09cddb57bec4..26ea3ea3fb30 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/nmi.h> | 46 | #include <asm/nmi.h> |
47 | #include <asm/msidef.h> | 47 | #include <asm/msidef.h> |
48 | #include <asm/hypertransport.h> | 48 | #include <asm/hypertransport.h> |
49 | #include <asm/setup.h> | ||
49 | 50 | ||
50 | #include <mach_apic.h> | 51 | #include <mach_apic.h> |
51 | #include <mach_apicdef.h> | 52 | #include <mach_apicdef.h> |
@@ -1490,7 +1491,7 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1490 | smp_processor_id(), hard_smp_processor_id()); | 1491 | smp_processor_id(), hard_smp_processor_id()); |
1491 | v = apic_read(APIC_ID); | 1492 | v = apic_read(APIC_ID); |
1492 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, | 1493 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, |
1493 | GET_APIC_ID(read_apic_id())); | 1494 | GET_APIC_ID(v)); |
1494 | v = apic_read(APIC_LVR); | 1495 | v = apic_read(APIC_LVR); |
1495 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1496 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1496 | ver = GET_APIC_VERSION(v); | 1497 | ver = GET_APIC_VERSION(v); |
@@ -1698,8 +1699,7 @@ void disable_IO_APIC(void) | |||
1698 | entry.dest_mode = 0; /* Physical */ | 1699 | entry.dest_mode = 0; /* Physical */ |
1699 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1700 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1700 | entry.vector = 0; | 1701 | entry.vector = 0; |
1701 | entry.dest.physical.physical_dest = | 1702 | entry.dest.physical.physical_dest = read_apic_id(); |
1702 | GET_APIC_ID(read_apic_id()); | ||
1703 | 1703 | ||
1704 | /* | 1704 | /* |
1705 | * Add it to the IO-APIC irq-routing table: | 1705 | * Add it to the IO-APIC irq-routing table: |
@@ -1725,10 +1725,8 @@ static void __init setup_ioapic_ids_from_mpc(void) | |||
1725 | unsigned char old_id; | 1725 | unsigned char old_id; |
1726 | unsigned long flags; | 1726 | unsigned long flags; |
1727 | 1727 | ||
1728 | #ifdef CONFIG_X86_NUMAQ | 1728 | if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids()) |
1729 | if (found_numaq) | ||
1730 | return; | 1729 | return; |
1731 | #endif | ||
1732 | 1730 | ||
1733 | /* | 1731 | /* |
1734 | * Don't check I/O APIC IDs for xAPIC systems. They have | 1732 | * Don't check I/O APIC IDs for xAPIC systems. They have |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 61a83b70c18f..e63282e78864 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <acpi/acpi_bus.h> | 37 | #include <acpi/acpi_bus.h> |
38 | #endif | 38 | #endif |
39 | #include <linux/bootmem.h> | 39 | #include <linux/bootmem.h> |
40 | #include <linux/dmar.h> | ||
40 | 41 | ||
41 | #include <asm/idle.h> | 42 | #include <asm/idle.h> |
42 | #include <asm/io.h> | 43 | #include <asm/io.h> |
@@ -49,6 +50,7 @@ | |||
49 | #include <asm/nmi.h> | 50 | #include <asm/nmi.h> |
50 | #include <asm/msidef.h> | 51 | #include <asm/msidef.h> |
51 | #include <asm/hypertransport.h> | 52 | #include <asm/hypertransport.h> |
53 | #include <asm/irq_remapping.h> | ||
52 | 54 | ||
53 | #include <mach_ipi.h> | 55 | #include <mach_ipi.h> |
54 | #include <mach_apic.h> | 56 | #include <mach_apic.h> |
@@ -108,6 +110,9 @@ static DEFINE_SPINLOCK(vector_lock); | |||
108 | */ | 110 | */ |
109 | int nr_ioapic_registers[MAX_IO_APICS]; | 111 | int nr_ioapic_registers[MAX_IO_APICS]; |
110 | 112 | ||
113 | /* I/O APIC RTE contents at the OS boot up */ | ||
114 | struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; | ||
115 | |||
111 | /* I/O APIC entries */ | 116 | /* I/O APIC entries */ |
112 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; | 117 | struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; |
113 | int nr_ioapics; | 118 | int nr_ioapics; |
@@ -303,7 +308,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
303 | pin = entry->pin; | 308 | pin = entry->pin; |
304 | if (pin == -1) | 309 | if (pin == -1) |
305 | break; | 310 | break; |
306 | io_apic_write(apic, 0x11 + pin*2, dest); | 311 | /* |
312 | * With interrupt-remapping, destination information comes | ||
313 | * from interrupt-remapping table entry. | ||
314 | */ | ||
315 | if (!irq_remapped(irq)) | ||
316 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
307 | reg = io_apic_read(apic, 0x10 + pin*2); | 317 | reg = io_apic_read(apic, 0x10 + pin*2); |
308 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 318 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
309 | reg |= vector; | 319 | reg |= vector; |
@@ -440,6 +450,69 @@ static void clear_IO_APIC (void) | |||
440 | clear_IO_APIC_pin(apic, pin); | 450 | clear_IO_APIC_pin(apic, pin); |
441 | } | 451 | } |
442 | 452 | ||
453 | /* | ||
454 | * Saves and masks all the unmasked IO-APIC RTE's | ||
455 | */ | ||
456 | int save_mask_IO_APIC_setup(void) | ||
457 | { | ||
458 | union IO_APIC_reg_01 reg_01; | ||
459 | unsigned long flags; | ||
460 | int apic, pin; | ||
461 | |||
462 | /* | ||
463 | * The number of IO-APIC IRQ registers (== #pins): | ||
464 | */ | ||
465 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
466 | spin_lock_irqsave(&ioapic_lock, flags); | ||
467 | reg_01.raw = io_apic_read(apic, 1); | ||
468 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
469 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | ||
470 | } | ||
471 | |||
472 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
473 | early_ioapic_entries[apic] = | ||
474 | kzalloc(sizeof(struct IO_APIC_route_entry) * | ||
475 | nr_ioapic_registers[apic], GFP_KERNEL); | ||
476 | if (!early_ioapic_entries[apic]) | ||
477 | return -ENOMEM; | ||
478 | } | ||
479 | |||
480 | for (apic = 0; apic < nr_ioapics; apic++) | ||
481 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | ||
482 | struct IO_APIC_route_entry entry; | ||
483 | |||
484 | entry = early_ioapic_entries[apic][pin] = | ||
485 | ioapic_read_entry(apic, pin); | ||
486 | if (!entry.mask) { | ||
487 | entry.mask = 1; | ||
488 | ioapic_write_entry(apic, pin, entry); | ||
489 | } | ||
490 | } | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | void restore_IO_APIC_setup(void) | ||
495 | { | ||
496 | int apic, pin; | ||
497 | |||
498 | for (apic = 0; apic < nr_ioapics; apic++) | ||
499 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | ||
500 | ioapic_write_entry(apic, pin, | ||
501 | early_ioapic_entries[apic][pin]); | ||
502 | } | ||
503 | |||
504 | void reinit_intr_remapped_IO_APIC(int intr_remapping) | ||
505 | { | ||
506 | /* | ||
507 | * for now plain restore of previous settings. | ||
508 | * TBD: In the case of OS enabling interrupt-remapping, | ||
509 | * IO-APIC RTE's need to be setup to point to interrupt-remapping | ||
510 | * table entries. for now, do a plain restore, and wait for | ||
511 | * the setup_IO_APIC_irqs() to do proper initialization. | ||
512 | */ | ||
513 | restore_IO_APIC_setup(); | ||
514 | } | ||
515 | |||
443 | int skip_ioapic_setup; | 516 | int skip_ioapic_setup; |
444 | int ioapic_force; | 517 | int ioapic_force; |
445 | 518 | ||
@@ -839,18 +912,98 @@ void __setup_vector_irq(int cpu) | |||
839 | } | 912 | } |
840 | 913 | ||
841 | static struct irq_chip ioapic_chip; | 914 | static struct irq_chip ioapic_chip; |
915 | #ifdef CONFIG_INTR_REMAP | ||
916 | static struct irq_chip ir_ioapic_chip; | ||
917 | #endif | ||
842 | 918 | ||
843 | static void ioapic_register_intr(int irq, unsigned long trigger) | 919 | static void ioapic_register_intr(int irq, unsigned long trigger) |
844 | { | 920 | { |
845 | if (trigger) { | 921 | if (trigger) |
846 | irq_desc[irq].status |= IRQ_LEVEL; | 922 | irq_desc[irq].status |= IRQ_LEVEL; |
847 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 923 | else |
848 | handle_fasteoi_irq, "fasteoi"); | ||
849 | } else { | ||
850 | irq_desc[irq].status &= ~IRQ_LEVEL; | 924 | irq_desc[irq].status &= ~IRQ_LEVEL; |
925 | |||
926 | #ifdef CONFIG_INTR_REMAP | ||
927 | if (irq_remapped(irq)) { | ||
928 | irq_desc[irq].status |= IRQ_MOVE_PCNTXT; | ||
929 | if (trigger) | ||
930 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
931 | handle_fasteoi_irq, | ||
932 | "fasteoi"); | ||
933 | else | ||
934 | set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, | ||
935 | handle_edge_irq, "edge"); | ||
936 | return; | ||
937 | } | ||
938 | #endif | ||
939 | if (trigger) | ||
940 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | ||
941 | handle_fasteoi_irq, | ||
942 | "fasteoi"); | ||
943 | else | ||
851 | set_irq_chip_and_handler_name(irq, &ioapic_chip, | 944 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
852 | handle_edge_irq, "edge"); | 945 | handle_edge_irq, "edge"); |
946 | } | ||
947 | |||
948 | static int setup_ioapic_entry(int apic, int irq, | ||
949 | struct IO_APIC_route_entry *entry, | ||
950 | unsigned int destination, int trigger, | ||
951 | int polarity, int vector) | ||
952 | { | ||
953 | /* | ||
954 | * add it to the IO-APIC irq-routing table: | ||
955 | */ | ||
956 | memset(entry,0,sizeof(*entry)); | ||
957 | |||
958 | #ifdef CONFIG_INTR_REMAP | ||
959 | if (intr_remapping_enabled) { | ||
960 | struct intel_iommu *iommu = map_ioapic_to_ir(apic); | ||
961 | struct irte irte; | ||
962 | struct IR_IO_APIC_route_entry *ir_entry = | ||
963 | (struct IR_IO_APIC_route_entry *) entry; | ||
964 | int index; | ||
965 | |||
966 | if (!iommu) | ||
967 | panic("No mapping iommu for ioapic %d\n", apic); | ||
968 | |||
969 | index = alloc_irte(iommu, irq, 1); | ||
970 | if (index < 0) | ||
971 | panic("Failed to allocate IRTE for ioapic %d\n", apic); | ||
972 | |||
973 | memset(&irte, 0, sizeof(irte)); | ||
974 | |||
975 | irte.present = 1; | ||
976 | irte.dst_mode = INT_DEST_MODE; | ||
977 | irte.trigger_mode = trigger; | ||
978 | irte.dlvry_mode = INT_DELIVERY_MODE; | ||
979 | irte.vector = vector; | ||
980 | irte.dest_id = IRTE_DEST(destination); | ||
981 | |||
982 | modify_irte(irq, &irte); | ||
983 | |||
984 | ir_entry->index2 = (index >> 15) & 0x1; | ||
985 | ir_entry->zero = 0; | ||
986 | ir_entry->format = 1; | ||
987 | ir_entry->index = (index & 0x7fff); | ||
988 | } else | ||
989 | #endif | ||
990 | { | ||
991 | entry->delivery_mode = INT_DELIVERY_MODE; | ||
992 | entry->dest_mode = INT_DEST_MODE; | ||
993 | entry->dest = destination; | ||
853 | } | 994 | } |
995 | |||
996 | entry->mask = 0; /* enable IRQ */ | ||
997 | entry->trigger = trigger; | ||
998 | entry->polarity = polarity; | ||
999 | entry->vector = vector; | ||
1000 | |||
1001 | /* Mask level triggered irqs. | ||
1002 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | ||
1003 | */ | ||
1004 | if (trigger) | ||
1005 | entry->mask = 1; | ||
1006 | return 0; | ||
854 | } | 1007 | } |
855 | 1008 | ||
856 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | 1009 | static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, |
@@ -875,24 +1028,15 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, | |||
875 | apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, | 1028 | apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, |
876 | irq, trigger, polarity); | 1029 | irq, trigger, polarity); |
877 | 1030 | ||
878 | /* | ||
879 | * add it to the IO-APIC irq-routing table: | ||
880 | */ | ||
881 | memset(&entry,0,sizeof(entry)); | ||
882 | 1031 | ||
883 | entry.delivery_mode = INT_DELIVERY_MODE; | 1032 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
884 | entry.dest_mode = INT_DEST_MODE; | 1033 | cpu_mask_to_apicid(mask), trigger, polarity, |
885 | entry.dest = cpu_mask_to_apicid(mask); | 1034 | cfg->vector)) { |
886 | entry.mask = 0; /* enable IRQ */ | 1035 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
887 | entry.trigger = trigger; | 1036 | mp_ioapics[apic].mp_apicid, pin); |
888 | entry.polarity = polarity; | 1037 | __clear_irq_vector(irq); |
889 | entry.vector = cfg->vector; | 1038 | return; |
890 | 1039 | } | |
891 | /* Mask level triggered irqs. | ||
892 | * Use IRQ_DELAYED_DISABLE for edge triggered irqs. | ||
893 | */ | ||
894 | if (trigger) | ||
895 | entry.mask = 1; | ||
896 | 1040 | ||
897 | ioapic_register_intr(irq, trigger); | 1041 | ioapic_register_intr(irq, trigger); |
898 | if (irq < 16) | 1042 | if (irq < 16) |
@@ -944,6 +1088,9 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin, | |||
944 | { | 1088 | { |
945 | struct IO_APIC_route_entry entry; | 1089 | struct IO_APIC_route_entry entry; |
946 | 1090 | ||
1091 | if (intr_remapping_enabled) | ||
1092 | return; | ||
1093 | |||
947 | memset(&entry, 0, sizeof(entry)); | 1094 | memset(&entry, 0, sizeof(entry)); |
948 | 1095 | ||
949 | /* | 1096 | /* |
@@ -1090,6 +1237,7 @@ static __apicdebuginit void print_APIC_bitfield (int base) | |||
1090 | void __apicdebuginit print_local_APIC(void * dummy) | 1237 | void __apicdebuginit print_local_APIC(void * dummy) |
1091 | { | 1238 | { |
1092 | unsigned int v, ver, maxlvt; | 1239 | unsigned int v, ver, maxlvt; |
1240 | unsigned long icr; | ||
1093 | 1241 | ||
1094 | if (apic_verbosity == APIC_QUIET) | 1242 | if (apic_verbosity == APIC_QUIET) |
1095 | return; | 1243 | return; |
@@ -1097,7 +1245,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1097 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1245 | printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
1098 | smp_processor_id(), hard_smp_processor_id()); | 1246 | smp_processor_id(), hard_smp_processor_id()); |
1099 | v = apic_read(APIC_ID); | 1247 | v = apic_read(APIC_ID); |
1100 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id())); | 1248 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
1101 | v = apic_read(APIC_LVR); | 1249 | v = apic_read(APIC_LVR); |
1102 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | 1250 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); |
1103 | ver = GET_APIC_VERSION(v); | 1251 | ver = GET_APIC_VERSION(v); |
@@ -1133,10 +1281,9 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1133 | v = apic_read(APIC_ESR); | 1281 | v = apic_read(APIC_ESR); |
1134 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | 1282 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); |
1135 | 1283 | ||
1136 | v = apic_read(APIC_ICR); | 1284 | icr = apic_icr_read(); |
1137 | printk(KERN_DEBUG "... APIC ICR: %08x\n", v); | 1285 | printk(KERN_DEBUG "... APIC ICR: %08x\n", icr); |
1138 | v = apic_read(APIC_ICR2); | 1286 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32); |
1139 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", v); | ||
1140 | 1287 | ||
1141 | v = apic_read(APIC_LVTT); | 1288 | v = apic_read(APIC_LVTT); |
1142 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | 1289 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); |
@@ -1291,7 +1438,7 @@ void disable_IO_APIC(void) | |||
1291 | entry.dest_mode = 0; /* Physical */ | 1438 | entry.dest_mode = 0; /* Physical */ |
1292 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ | 1439 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1293 | entry.vector = 0; | 1440 | entry.vector = 0; |
1294 | entry.dest = GET_APIC_ID(read_apic_id()); | 1441 | entry.dest = read_apic_id(); |
1295 | 1442 | ||
1296 | /* | 1443 | /* |
1297 | * Add it to the IO-APIC irq-routing table: | 1444 | * Add it to the IO-APIC irq-routing table: |
@@ -1397,6 +1544,147 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
1397 | */ | 1544 | */ |
1398 | 1545 | ||
1399 | #ifdef CONFIG_SMP | 1546 | #ifdef CONFIG_SMP |
1547 | |||
1548 | #ifdef CONFIG_INTR_REMAP | ||
1549 | static void ir_irq_migration(struct work_struct *work); | ||
1550 | |||
1551 | static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | ||
1552 | |||
1553 | /* | ||
1554 | * Migrate the IO-APIC irq in the presence of intr-remapping. | ||
1555 | * | ||
1556 | * For edge triggered, irq migration is a simple atomic update(of vector | ||
1557 | * and cpu destination) of IRTE and flush the hardware cache. | ||
1558 | * | ||
1559 | * For level triggered, we need to modify the io-apic RTE aswell with the update | ||
1560 | * vector information, along with modifying IRTE with vector and destination. | ||
1561 | * So irq migration for level triggered is little bit more complex compared to | ||
1562 | * edge triggered migration. But the good news is, we use the same algorithm | ||
1563 | * for level triggered migration as we have today, only difference being, | ||
1564 | * we now initiate the irq migration from process context instead of the | ||
1565 | * interrupt context. | ||
1566 | * | ||
1567 | * In future, when we do a directed EOI (combined with cpu EOI broadcast | ||
1568 | * suppression) to the IO-APIC, level triggered irq migration will also be | ||
1569 | * as simple as edge triggered migration and we can do the irq migration | ||
1570 | * with a simple atomic update to IO-APIC RTE. | ||
1571 | */ | ||
1572 | static void migrate_ioapic_irq(int irq, cpumask_t mask) | ||
1573 | { | ||
1574 | struct irq_cfg *cfg = irq_cfg + irq; | ||
1575 | struct irq_desc *desc = irq_desc + irq; | ||
1576 | cpumask_t tmp, cleanup_mask; | ||
1577 | struct irte irte; | ||
1578 | int modify_ioapic_rte = desc->status & IRQ_LEVEL; | ||
1579 | unsigned int dest; | ||
1580 | unsigned long flags; | ||
1581 | |||
1582 | cpus_and(tmp, mask, cpu_online_map); | ||
1583 | if (cpus_empty(tmp)) | ||
1584 | return; | ||
1585 | |||
1586 | if (get_irte(irq, &irte)) | ||
1587 | return; | ||
1588 | |||
1589 | if (assign_irq_vector(irq, mask)) | ||
1590 | return; | ||
1591 | |||
1592 | cpus_and(tmp, cfg->domain, mask); | ||
1593 | dest = cpu_mask_to_apicid(tmp); | ||
1594 | |||
1595 | if (modify_ioapic_rte) { | ||
1596 | spin_lock_irqsave(&ioapic_lock, flags); | ||
1597 | __target_IO_APIC_irq(irq, dest, cfg->vector); | ||
1598 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
1599 | } | ||
1600 | |||
1601 | irte.vector = cfg->vector; | ||
1602 | irte.dest_id = IRTE_DEST(dest); | ||
1603 | |||
1604 | /* | ||
1605 | * Modified the IRTE and flushes the Interrupt entry cache. | ||
1606 | */ | ||
1607 | modify_irte(irq, &irte); | ||
1608 | |||
1609 | if (cfg->move_in_progress) { | ||
1610 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
1611 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
1612 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
1613 | cfg->move_in_progress = 0; | ||
1614 | } | ||
1615 | |||
1616 | irq_desc[irq].affinity = mask; | ||
1617 | } | ||
1618 | |||
1619 | static int migrate_irq_remapped_level(int irq) | ||
1620 | { | ||
1621 | int ret = -1; | ||
1622 | |||
1623 | mask_IO_APIC_irq(irq); | ||
1624 | |||
1625 | if (io_apic_level_ack_pending(irq)) { | ||
1626 | /* | ||
1627 | * Interrupt in progress. Migrating irq now will change the | ||
1628 | * vector information in the IO-APIC RTE and that will confuse | ||
1629 | * the EOI broadcast performed by cpu. | ||
1630 | * So, delay the irq migration to the next instance. | ||
1631 | */ | ||
1632 | schedule_delayed_work(&ir_migration_work, 1); | ||
1633 | goto unmask; | ||
1634 | } | ||
1635 | |||
1636 | /* everthing is clear. we have right of way */ | ||
1637 | migrate_ioapic_irq(irq, irq_desc[irq].pending_mask); | ||
1638 | |||
1639 | ret = 0; | ||
1640 | irq_desc[irq].status &= ~IRQ_MOVE_PENDING; | ||
1641 | cpus_clear(irq_desc[irq].pending_mask); | ||
1642 | |||
1643 | unmask: | ||
1644 | unmask_IO_APIC_irq(irq); | ||
1645 | return ret; | ||
1646 | } | ||
1647 | |||
1648 | static void ir_irq_migration(struct work_struct *work) | ||
1649 | { | ||
1650 | int irq; | ||
1651 | |||
1652 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
1653 | struct irq_desc *desc = irq_desc + irq; | ||
1654 | if (desc->status & IRQ_MOVE_PENDING) { | ||
1655 | unsigned long flags; | ||
1656 | |||
1657 | spin_lock_irqsave(&desc->lock, flags); | ||
1658 | if (!desc->chip->set_affinity || | ||
1659 | !(desc->status & IRQ_MOVE_PENDING)) { | ||
1660 | desc->status &= ~IRQ_MOVE_PENDING; | ||
1661 | spin_unlock_irqrestore(&desc->lock, flags); | ||
1662 | continue; | ||
1663 | } | ||
1664 | |||
1665 | desc->chip->set_affinity(irq, | ||
1666 | irq_desc[irq].pending_mask); | ||
1667 | spin_unlock_irqrestore(&desc->lock, flags); | ||
1668 | } | ||
1669 | } | ||
1670 | } | ||
1671 | |||
1672 | /* | ||
1673 | * Migrates the IRQ destination in the process context. | ||
1674 | */ | ||
1675 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | ||
1676 | { | ||
1677 | if (irq_desc[irq].status & IRQ_LEVEL) { | ||
1678 | irq_desc[irq].status |= IRQ_MOVE_PENDING; | ||
1679 | irq_desc[irq].pending_mask = mask; | ||
1680 | migrate_irq_remapped_level(irq); | ||
1681 | return; | ||
1682 | } | ||
1683 | |||
1684 | migrate_ioapic_irq(irq, mask); | ||
1685 | } | ||
1686 | #endif | ||
1687 | |||
1400 | asmlinkage void smp_irq_move_cleanup_interrupt(void) | 1688 | asmlinkage void smp_irq_move_cleanup_interrupt(void) |
1401 | { | 1689 | { |
1402 | unsigned vector, me; | 1690 | unsigned vector, me; |
@@ -1453,6 +1741,17 @@ static void irq_complete_move(unsigned int irq) | |||
1453 | #else | 1741 | #else |
1454 | static inline void irq_complete_move(unsigned int irq) {} | 1742 | static inline void irq_complete_move(unsigned int irq) {} |
1455 | #endif | 1743 | #endif |
1744 | #ifdef CONFIG_INTR_REMAP | ||
1745 | static void ack_x2apic_level(unsigned int irq) | ||
1746 | { | ||
1747 | ack_x2APIC_irq(); | ||
1748 | } | ||
1749 | |||
1750 | static void ack_x2apic_edge(unsigned int irq) | ||
1751 | { | ||
1752 | ack_x2APIC_irq(); | ||
1753 | } | ||
1754 | #endif | ||
1456 | 1755 | ||
1457 | static void ack_apic_edge(unsigned int irq) | 1756 | static void ack_apic_edge(unsigned int irq) |
1458 | { | 1757 | { |
@@ -1527,6 +1826,21 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
1527 | .retrigger = ioapic_retrigger_irq, | 1826 | .retrigger = ioapic_retrigger_irq, |
1528 | }; | 1827 | }; |
1529 | 1828 | ||
1829 | #ifdef CONFIG_INTR_REMAP | ||
1830 | static struct irq_chip ir_ioapic_chip __read_mostly = { | ||
1831 | .name = "IR-IO-APIC", | ||
1832 | .startup = startup_ioapic_irq, | ||
1833 | .mask = mask_IO_APIC_irq, | ||
1834 | .unmask = unmask_IO_APIC_irq, | ||
1835 | .ack = ack_x2apic_edge, | ||
1836 | .eoi = ack_x2apic_level, | ||
1837 | #ifdef CONFIG_SMP | ||
1838 | .set_affinity = set_ir_ioapic_affinity_irq, | ||
1839 | #endif | ||
1840 | .retrigger = ioapic_retrigger_irq, | ||
1841 | }; | ||
1842 | #endif | ||
1843 | |||
1530 | static inline void init_IO_APIC_traps(void) | 1844 | static inline void init_IO_APIC_traps(void) |
1531 | { | 1845 | { |
1532 | int irq; | 1846 | int irq; |
@@ -1712,6 +2026,8 @@ static inline void __init check_timer(void) | |||
1712 | * 8259A. | 2026 | * 8259A. |
1713 | */ | 2027 | */ |
1714 | if (pin1 == -1) { | 2028 | if (pin1 == -1) { |
2029 | if (intr_remapping_enabled) | ||
2030 | panic("BIOS bug: timer not connected to IO-APIC"); | ||
1715 | pin1 = pin2; | 2031 | pin1 = pin2; |
1716 | apic1 = apic2; | 2032 | apic1 = apic2; |
1717 | no_pin1 = 1; | 2033 | no_pin1 = 1; |
@@ -1738,6 +2054,8 @@ static inline void __init check_timer(void) | |||
1738 | clear_IO_APIC_pin(0, pin1); | 2054 | clear_IO_APIC_pin(0, pin1); |
1739 | goto out; | 2055 | goto out; |
1740 | } | 2056 | } |
2057 | if (intr_remapping_enabled) | ||
2058 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); | ||
1741 | clear_IO_APIC_pin(apic1, pin1); | 2059 | clear_IO_APIC_pin(apic1, pin1); |
1742 | if (!no_pin1) | 2060 | if (!no_pin1) |
1743 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " | 2061 | apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " |
@@ -1977,6 +2295,9 @@ void destroy_irq(unsigned int irq) | |||
1977 | 2295 | ||
1978 | dynamic_irq_cleanup(irq); | 2296 | dynamic_irq_cleanup(irq); |
1979 | 2297 | ||
2298 | #ifdef CONFIG_INTR_REMAP | ||
2299 | free_irte(irq); | ||
2300 | #endif | ||
1980 | spin_lock_irqsave(&vector_lock, flags); | 2301 | spin_lock_irqsave(&vector_lock, flags); |
1981 | __clear_irq_vector(irq); | 2302 | __clear_irq_vector(irq); |
1982 | spin_unlock_irqrestore(&vector_lock, flags); | 2303 | spin_unlock_irqrestore(&vector_lock, flags); |
@@ -1995,11 +2316,42 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
1995 | 2316 | ||
1996 | tmp = TARGET_CPUS; | 2317 | tmp = TARGET_CPUS; |
1997 | err = assign_irq_vector(irq, tmp); | 2318 | err = assign_irq_vector(irq, tmp); |
1998 | if (!err) { | 2319 | if (err) |
1999 | cpus_and(tmp, cfg->domain, tmp); | 2320 | return err; |
2000 | dest = cpu_mask_to_apicid(tmp); | 2321 | |
2322 | cpus_and(tmp, cfg->domain, tmp); | ||
2323 | dest = cpu_mask_to_apicid(tmp); | ||
2324 | |||
2325 | #ifdef CONFIG_INTR_REMAP | ||
2326 | if (irq_remapped(irq)) { | ||
2327 | struct irte irte; | ||
2328 | int ir_index; | ||
2329 | u16 sub_handle; | ||
2330 | |||
2331 | ir_index = map_irq_to_irte_handle(irq, &sub_handle); | ||
2332 | BUG_ON(ir_index == -1); | ||
2333 | |||
2334 | memset (&irte, 0, sizeof(irte)); | ||
2335 | |||
2336 | irte.present = 1; | ||
2337 | irte.dst_mode = INT_DEST_MODE; | ||
2338 | irte.trigger_mode = 0; /* edge */ | ||
2339 | irte.dlvry_mode = INT_DELIVERY_MODE; | ||
2340 | irte.vector = cfg->vector; | ||
2341 | irte.dest_id = IRTE_DEST(dest); | ||
2342 | |||
2343 | modify_irte(irq, &irte); | ||
2001 | 2344 | ||
2002 | msg->address_hi = MSI_ADDR_BASE_HI; | 2345 | msg->address_hi = MSI_ADDR_BASE_HI; |
2346 | msg->data = sub_handle; | ||
2347 | msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | | ||
2348 | MSI_ADDR_IR_SHV | | ||
2349 | MSI_ADDR_IR_INDEX1(ir_index) | | ||
2350 | MSI_ADDR_IR_INDEX2(ir_index); | ||
2351 | } else | ||
2352 | #endif | ||
2353 | { | ||
2354 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
2003 | msg->address_lo = | 2355 | msg->address_lo = |
2004 | MSI_ADDR_BASE_LO | | 2356 | MSI_ADDR_BASE_LO | |
2005 | ((INT_DEST_MODE == 0) ? | 2357 | ((INT_DEST_MODE == 0) ? |
@@ -2049,6 +2401,55 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2049 | write_msi_msg(irq, &msg); | 2401 | write_msi_msg(irq, &msg); |
2050 | irq_desc[irq].affinity = mask; | 2402 | irq_desc[irq].affinity = mask; |
2051 | } | 2403 | } |
2404 | |||
2405 | #ifdef CONFIG_INTR_REMAP | ||
2406 | /* | ||
2407 | * Migrate the MSI irq to another cpumask. This migration is | ||
2408 | * done in the process context using interrupt-remapping hardware. | ||
2409 | */ | ||
2410 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | ||
2411 | { | ||
2412 | struct irq_cfg *cfg = irq_cfg + irq; | ||
2413 | unsigned int dest; | ||
2414 | cpumask_t tmp, cleanup_mask; | ||
2415 | struct irte irte; | ||
2416 | |||
2417 | cpus_and(tmp, mask, cpu_online_map); | ||
2418 | if (cpus_empty(tmp)) | ||
2419 | return; | ||
2420 | |||
2421 | if (get_irte(irq, &irte)) | ||
2422 | return; | ||
2423 | |||
2424 | if (assign_irq_vector(irq, mask)) | ||
2425 | return; | ||
2426 | |||
2427 | cpus_and(tmp, cfg->domain, mask); | ||
2428 | dest = cpu_mask_to_apicid(tmp); | ||
2429 | |||
2430 | irte.vector = cfg->vector; | ||
2431 | irte.dest_id = IRTE_DEST(dest); | ||
2432 | |||
2433 | /* | ||
2434 | * atomically update the IRTE with the new destination and vector. | ||
2435 | */ | ||
2436 | modify_irte(irq, &irte); | ||
2437 | |||
2438 | /* | ||
2439 | * After this point, all the interrupts will start arriving | ||
2440 | * at the new destination. So, time to cleanup the previous | ||
2441 | * vector allocation. | ||
2442 | */ | ||
2443 | if (cfg->move_in_progress) { | ||
2444 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
2445 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2446 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2447 | cfg->move_in_progress = 0; | ||
2448 | } | ||
2449 | |||
2450 | irq_desc[irq].affinity = mask; | ||
2451 | } | ||
2452 | #endif | ||
2052 | #endif /* CONFIG_SMP */ | 2453 | #endif /* CONFIG_SMP */ |
2053 | 2454 | ||
2054 | /* | 2455 | /* |
@@ -2066,26 +2467,157 @@ static struct irq_chip msi_chip = { | |||
2066 | .retrigger = ioapic_retrigger_irq, | 2467 | .retrigger = ioapic_retrigger_irq, |
2067 | }; | 2468 | }; |
2068 | 2469 | ||
2069 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | 2470 | #ifdef CONFIG_INTR_REMAP |
2471 | static struct irq_chip msi_ir_chip = { | ||
2472 | .name = "IR-PCI-MSI", | ||
2473 | .unmask = unmask_msi_irq, | ||
2474 | .mask = mask_msi_irq, | ||
2475 | .ack = ack_x2apic_edge, | ||
2476 | #ifdef CONFIG_SMP | ||
2477 | .set_affinity = ir_set_msi_irq_affinity, | ||
2478 | #endif | ||
2479 | .retrigger = ioapic_retrigger_irq, | ||
2480 | }; | ||
2481 | |||
2482 | /* | ||
2483 | * Map the PCI dev to the corresponding remapping hardware unit | ||
2484 | * and allocate 'nvec' consecutive interrupt-remapping table entries | ||
2485 | * in it. | ||
2486 | */ | ||
2487 | static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) | ||
2488 | { | ||
2489 | struct intel_iommu *iommu; | ||
2490 | int index; | ||
2491 | |||
2492 | iommu = map_dev_to_ir(dev); | ||
2493 | if (!iommu) { | ||
2494 | printk(KERN_ERR | ||
2495 | "Unable to map PCI %s to iommu\n", pci_name(dev)); | ||
2496 | return -ENOENT; | ||
2497 | } | ||
2498 | |||
2499 | index = alloc_irte(iommu, irq, nvec); | ||
2500 | if (index < 0) { | ||
2501 | printk(KERN_ERR | ||
2502 | "Unable to allocate %d IRTE for PCI %s\n", nvec, | ||
2503 | pci_name(dev)); | ||
2504 | return -ENOSPC; | ||
2505 | } | ||
2506 | return index; | ||
2507 | } | ||
2508 | #endif | ||
2509 | |||
2510 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) | ||
2070 | { | 2511 | { |
2512 | int ret; | ||
2071 | struct msi_msg msg; | 2513 | struct msi_msg msg; |
2514 | |||
2515 | ret = msi_compose_msg(dev, irq, &msg); | ||
2516 | if (ret < 0) | ||
2517 | return ret; | ||
2518 | |||
2519 | set_irq_msi(irq, desc); | ||
2520 | write_msi_msg(irq, &msg); | ||
2521 | |||
2522 | #ifdef CONFIG_INTR_REMAP | ||
2523 | if (irq_remapped(irq)) { | ||
2524 | struct irq_desc *desc = irq_desc + irq; | ||
2525 | /* | ||
2526 | * irq migration in process context | ||
2527 | */ | ||
2528 | desc->status |= IRQ_MOVE_PCNTXT; | ||
2529 | set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); | ||
2530 | } else | ||
2531 | #endif | ||
2532 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | ||
2533 | |||
2534 | return 0; | ||
2535 | } | ||
2536 | |||
2537 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | ||
2538 | { | ||
2072 | int irq, ret; | 2539 | int irq, ret; |
2540 | |||
2073 | irq = create_irq(); | 2541 | irq = create_irq(); |
2074 | if (irq < 0) | 2542 | if (irq < 0) |
2075 | return irq; | 2543 | return irq; |
2076 | 2544 | ||
2077 | ret = msi_compose_msg(dev, irq, &msg); | 2545 | #ifdef CONFIG_INTR_REMAP |
2546 | if (!intr_remapping_enabled) | ||
2547 | goto no_ir; | ||
2548 | |||
2549 | ret = msi_alloc_irte(dev, irq, 1); | ||
2550 | if (ret < 0) | ||
2551 | goto error; | ||
2552 | no_ir: | ||
2553 | #endif | ||
2554 | ret = setup_msi_irq(dev, desc, irq); | ||
2078 | if (ret < 0) { | 2555 | if (ret < 0) { |
2079 | destroy_irq(irq); | 2556 | destroy_irq(irq); |
2080 | return ret; | 2557 | return ret; |
2081 | } | 2558 | } |
2559 | return 0; | ||
2082 | 2560 | ||
2083 | set_irq_msi(irq, desc); | 2561 | #ifdef CONFIG_INTR_REMAP |
2084 | write_msi_msg(irq, &msg); | 2562 | error: |
2563 | destroy_irq(irq); | ||
2564 | return ret; | ||
2565 | #endif | ||
2566 | } | ||
2085 | 2567 | ||
2086 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); | 2568 | int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
2569 | { | ||
2570 | int irq, ret, sub_handle; | ||
2571 | struct msi_desc *desc; | ||
2572 | #ifdef CONFIG_INTR_REMAP | ||
2573 | struct intel_iommu *iommu = 0; | ||
2574 | int index = 0; | ||
2575 | #endif | ||
2087 | 2576 | ||
2577 | sub_handle = 0; | ||
2578 | list_for_each_entry(desc, &dev->msi_list, list) { | ||
2579 | irq = create_irq(); | ||
2580 | if (irq < 0) | ||
2581 | return irq; | ||
2582 | #ifdef CONFIG_INTR_REMAP | ||
2583 | if (!intr_remapping_enabled) | ||
2584 | goto no_ir; | ||
2585 | |||
2586 | if (!sub_handle) { | ||
2587 | /* | ||
2588 | * allocate the consecutive block of IRTE's | ||
2589 | * for 'nvec' | ||
2590 | */ | ||
2591 | index = msi_alloc_irte(dev, irq, nvec); | ||
2592 | if (index < 0) { | ||
2593 | ret = index; | ||
2594 | goto error; | ||
2595 | } | ||
2596 | } else { | ||
2597 | iommu = map_dev_to_ir(dev); | ||
2598 | if (!iommu) { | ||
2599 | ret = -ENOENT; | ||
2600 | goto error; | ||
2601 | } | ||
2602 | /* | ||
2603 | * setup the mapping between the irq and the IRTE | ||
2604 | * base index, the sub_handle pointing to the | ||
2605 | * appropriate interrupt remap table entry. | ||
2606 | */ | ||
2607 | set_irte_irq(irq, iommu, index, sub_handle); | ||
2608 | } | ||
2609 | no_ir: | ||
2610 | #endif | ||
2611 | ret = setup_msi_irq(dev, desc, irq); | ||
2612 | if (ret < 0) | ||
2613 | goto error; | ||
2614 | sub_handle++; | ||
2615 | } | ||
2088 | return 0; | 2616 | return 0; |
2617 | |||
2618 | error: | ||
2619 | destroy_irq(irq); | ||
2620 | return ret; | ||
2089 | } | 2621 | } |
2090 | 2622 | ||
2091 | void arch_teardown_msi_irq(unsigned int irq) | 2623 | void arch_teardown_msi_irq(unsigned int irq) |
@@ -2333,6 +2865,10 @@ void __init setup_ioapic_dest(void) | |||
2333 | setup_IO_APIC_irq(ioapic, pin, irq, | 2865 | setup_IO_APIC_irq(ioapic, pin, irq, |
2334 | irq_trigger(irq_entry), | 2866 | irq_trigger(irq_entry), |
2335 | irq_polarity(irq_entry)); | 2867 | irq_polarity(irq_entry)); |
2868 | #ifdef CONFIG_INTR_REMAP | ||
2869 | else if (intr_remapping_enabled) | ||
2870 | set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); | ||
2871 | #endif | ||
2336 | else | 2872 | else |
2337 | set_ioapic_affinity_irq(irq, TARGET_CPUS); | 2873 | set_ioapic_affinity_irq(irq, TARGET_CPUS); |
2338 | } | 2874 | } |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 50e5e4a31c85..191914302744 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/thread_info.h> | 15 | #include <linux/thread_info.h> |
16 | #include <linux/syscalls.h> | 16 | #include <linux/syscalls.h> |
17 | #include <asm/syscalls.h> | ||
17 | 18 | ||
18 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ | 19 | /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ |
19 | static void set_bitmap(unsigned long *bitmap, unsigned int base, | 20 | static void set_bitmap(unsigned long *bitmap, unsigned int base, |
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 3f7537b669d3..f1c688e46f35 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -20,6 +20,8 @@ | |||
20 | 20 | ||
21 | #ifdef CONFIG_X86_32 | 21 | #ifdef CONFIG_X86_32 |
22 | #include <mach_apic.h> | 22 | #include <mach_apic.h> |
23 | #include <mach_ipi.h> | ||
24 | |||
23 | /* | 25 | /* |
24 | * the following functions deal with sending IPIs between CPUs. | 26 | * the following functions deal with sending IPIs between CPUs. |
25 | * | 27 | * |
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
147 | } | 149 | } |
148 | 150 | ||
149 | /* must come after the send_IPI functions above for inlining */ | 151 | /* must come after the send_IPI functions above for inlining */ |
150 | #include <mach_ipi.h> | ||
151 | static int convert_apicid_to_cpu(int apic_id) | 152 | static int convert_apicid_to_cpu(int apic_id) |
152 | { | 153 | { |
153 | int i; | 154 | int i; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1cf8c1fcc088..b71e02d42f4f 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -325,7 +325,7 @@ skip: | |||
325 | for_each_online_cpu(j) | 325 | for_each_online_cpu(j) |
326 | seq_printf(p, "%10u ", | 326 | seq_printf(p, "%10u ", |
327 | per_cpu(irq_stat,j).irq_call_count); | 327 | per_cpu(irq_stat,j).irq_call_count); |
328 | seq_printf(p, " function call interrupts\n"); | 328 | seq_printf(p, " Function call interrupts\n"); |
329 | seq_printf(p, "TLB: "); | 329 | seq_printf(p, "TLB: "); |
330 | for_each_online_cpu(j) | 330 | for_each_online_cpu(j) |
331 | seq_printf(p, "%10u ", | 331 | seq_printf(p, "%10u ", |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1f78b238d8d2..f065fe9071b9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -129,7 +129,7 @@ skip: | |||
129 | seq_printf(p, "CAL: "); | 129 | seq_printf(p, "CAL: "); |
130 | for_each_online_cpu(j) | 130 | for_each_online_cpu(j) |
131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); | 131 | seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); |
132 | seq_printf(p, " function call interrupts\n"); | 132 | seq_printf(p, " Function call interrupts\n"); |
133 | seq_printf(p, "TLB: "); | 133 | seq_printf(p, "TLB: "); |
134 | for_each_online_cpu(j) | 134 | for_each_online_cpu(j) |
135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); | 135 | seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index b68e21f06f4f..0ed5f939b905 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/ldt.h> | 18 | #include <asm/ldt.h> |
19 | #include <asm/desc.h> | 19 | #include <asm/desc.h> |
20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
21 | #include <asm/syscalls.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
23 | static void flush_ldt(void *current_mm) | 24 | static void flush_ldt(void *current_mm) |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index b3fb430725cb..f98f4e1dba09 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -397,7 +397,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) | |||
397 | generic_bigsmp_probe(); | 397 | generic_bigsmp_probe(); |
398 | #endif | 398 | #endif |
399 | 399 | ||
400 | #ifdef CONFIG_X86_32 | ||
400 | setup_apic_routing(); | 401 | setup_apic_routing(); |
402 | #endif | ||
401 | if (!num_processors) | 403 | if (!num_processors) |
402 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); | 404 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); |
403 | return num_processors; | 405 | return num_processors; |
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index eecc8c18f010..4caff39078e0 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c | |||
@@ -229,6 +229,12 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, | |||
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | static int __init numaq_setup_ioapic_ids(void) | ||
233 | { | ||
234 | /* so can skip it */ | ||
235 | return 1; | ||
236 | } | ||
237 | |||
232 | static struct x86_quirks numaq_x86_quirks __initdata = { | 238 | static struct x86_quirks numaq_x86_quirks __initdata = { |
233 | .arch_pre_time_init = numaq_pre_time_init, | 239 | .arch_pre_time_init = numaq_pre_time_init, |
234 | .arch_time_init = NULL, | 240 | .arch_time_init = NULL, |
@@ -243,6 +249,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = { | |||
243 | .mpc_oem_bus_info = mpc_oem_bus_info, | 249 | .mpc_oem_bus_info = mpc_oem_bus_info, |
244 | .mpc_oem_pci_bus = mpc_oem_pci_bus, | 250 | .mpc_oem_pci_bus = mpc_oem_pci_bus, |
245 | .smp_read_mpc_oem = smp_read_mpc_oem, | 251 | .smp_read_mpc_oem = smp_read_mpc_oem, |
252 | .setup_ioapic_ids = numaq_setup_ioapic_ids, | ||
246 | }; | 253 | }; |
247 | 254 | ||
248 | void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | 255 | void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 300da17e61cb..4090cd6f8436 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -373,8 +373,6 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
373 | 373 | ||
374 | struct pv_apic_ops pv_apic_ops = { | 374 | struct pv_apic_ops pv_apic_ops = { |
375 | #ifdef CONFIG_X86_LOCAL_APIC | 375 | #ifdef CONFIG_X86_LOCAL_APIC |
376 | .apic_write = native_apic_write, | ||
377 | .apic_read = native_apic_read, | ||
378 | .setup_boot_clock = setup_boot_APIC_clock, | 376 | .setup_boot_clock = setup_boot_APIC_clock, |
379 | .setup_secondary_clock = setup_secondary_APIC_clock, | 377 | .setup_secondary_clock = setup_secondary_APIC_clock, |
380 | .startup_ipi_hook = paravirt_nop, | 378 | .startup_ipi_hook = paravirt_nop, |
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 58262218781b..9fe644f4861d 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c | |||
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
23 | start = start_##ops##_##x; \ | 23 | start = start_##ops##_##x; \ |
24 | end = end_##ops##_##x; \ | 24 | end = end_##ops##_##x; \ |
25 | goto patch_site | 25 | goto patch_site |
26 | switch(type) { | 26 | switch (type) { |
27 | PATCH_SITE(pv_irq_ops, irq_disable); | 27 | PATCH_SITE(pv_irq_ops, irq_disable); |
28 | PATCH_SITE(pv_irq_ops, irq_enable); | 28 | PATCH_SITE(pv_irq_ops, irq_enable); |
29 | PATCH_SITE(pv_irq_ops, restore_fl); | 29 | PATCH_SITE(pv_irq_ops, restore_fl); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 87d4d6964ec2..f704cb51ff82 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void) | |||
82 | * using 512M as goal | 82 | * using 512M as goal |
83 | */ | 83 | */ |
84 | align = 64ULL<<20; | 84 | align = 64ULL<<20; |
85 | size = round_up(dma32_bootmem_size, align); | 85 | size = roundup(dma32_bootmem_size, align); |
86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
87 | 512ULL<<20); | 87 | 512ULL<<20); |
88 | if (dma32_bootmem_ptr) | 88 | if (dma32_bootmem_ptr) |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..2c9abc95e026 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -55,6 +55,8 @@ | |||
55 | #include <asm/tlbflush.h> | 55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | 56 | #include <asm/cpu.h> |
57 | #include <asm/kdebug.h> | 57 | #include <asm/kdebug.h> |
58 | #include <asm/syscalls.h> | ||
59 | #include <asm/smp.h> | ||
58 | 60 | ||
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 61 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | 62 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..7502508a7664 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -37,11 +37,11 @@ | |||
37 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/prctl.h> | 39 | #include <linux/prctl.h> |
40 | #include <linux/uaccess.h> | ||
41 | #include <linux/io.h> | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
46 | #include <asm/i387.h> | 46 | #include <asm/i387.h> |
47 | #include <asm/mmu_context.h> | 47 | #include <asm/mmu_context.h> |
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
52 | #include <asm/ia32.h> | 52 | #include <asm/ia32.h> |
53 | #include <asm/idle.h> | 53 | #include <asm/idle.h> |
54 | #include <asm/syscalls.h> | ||
54 | 55 | ||
55 | asmlinkage extern void ret_from_fork(void); | 56 | asmlinkage extern void ret_from_fork(void); |
56 | 57 | ||
@@ -88,7 +89,7 @@ void exit_idle(void) | |||
88 | #ifdef CONFIG_HOTPLUG_CPU | 89 | #ifdef CONFIG_HOTPLUG_CPU |
89 | DECLARE_PER_CPU(int, cpu_state); | 90 | DECLARE_PER_CPU(int, cpu_state); |
90 | 91 | ||
91 | #include <asm/nmi.h> | 92 | #include <linux/nmi.h> |
92 | /* We halt the CPU with physical CPU hotplug */ | 93 | /* We halt the CPU with physical CPU hotplug */ |
93 | static inline void play_dead(void) | 94 | static inline void play_dead(void) |
94 | { | 95 | { |
@@ -151,7 +152,7 @@ void cpu_idle(void) | |||
151 | } | 152 | } |
152 | 153 | ||
153 | /* Prints also some state that isn't saved in the pt_regs */ | 154 | /* Prints also some state that isn't saved in the pt_regs */ |
154 | void __show_regs(struct pt_regs * regs) | 155 | void __show_regs(struct pt_regs *regs) |
155 | { | 156 | { |
156 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 157 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; |
157 | unsigned long d0, d1, d2, d3, d6, d7; | 158 | unsigned long d0, d1, d2, d3, d6, d7; |
@@ -160,59 +161,61 @@ void __show_regs(struct pt_regs * regs) | |||
160 | 161 | ||
161 | printk("\n"); | 162 | printk("\n"); |
162 | print_modules(); | 163 | print_modules(); |
163 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 164 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", |
164 | current->pid, current->comm, print_tainted(), | 165 | current->pid, current->comm, print_tainted(), |
165 | init_utsname()->release, | 166 | init_utsname()->release, |
166 | (int)strcspn(init_utsname()->version, " "), | 167 | (int)strcspn(init_utsname()->version, " "), |
167 | init_utsname()->version); | 168 | init_utsname()->version); |
168 | printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 169 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
169 | printk_address(regs->ip, 1); | 170 | printk_address(regs->ip, 1); |
170 | printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, | 171 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
171 | regs->flags); | 172 | regs->sp, regs->flags); |
172 | printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", | 173 | printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n", |
173 | regs->ax, regs->bx, regs->cx); | 174 | regs->ax, regs->bx, regs->cx); |
174 | printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", | 175 | printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n", |
175 | regs->dx, regs->si, regs->di); | 176 | regs->dx, regs->si, regs->di); |
176 | printk("RBP: %016lx R08: %016lx R09: %016lx\n", | 177 | printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n", |
177 | regs->bp, regs->r8, regs->r9); | 178 | regs->bp, regs->r8, regs->r9); |
178 | printk("R10: %016lx R11: %016lx R12: %016lx\n", | 179 | printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n", |
179 | regs->r10, regs->r11, regs->r12); | 180 | regs->r10, regs->r11, regs->r12); |
180 | printk("R13: %016lx R14: %016lx R15: %016lx\n", | 181 | printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n", |
181 | regs->r13, regs->r14, regs->r15); | 182 | regs->r13, regs->r14, regs->r15); |
182 | 183 | ||
183 | asm("movl %%ds,%0" : "=r" (ds)); | 184 | asm("movl %%ds,%0" : "=r" (ds)); |
184 | asm("movl %%cs,%0" : "=r" (cs)); | 185 | asm("movl %%cs,%0" : "=r" (cs)); |
185 | asm("movl %%es,%0" : "=r" (es)); | 186 | asm("movl %%es,%0" : "=r" (es)); |
186 | asm("movl %%fs,%0" : "=r" (fsindex)); | 187 | asm("movl %%fs,%0" : "=r" (fsindex)); |
187 | asm("movl %%gs,%0" : "=r" (gsindex)); | 188 | asm("movl %%gs,%0" : "=r" (gsindex)); |
188 | 189 | ||
189 | rdmsrl(MSR_FS_BASE, fs); | 190 | rdmsrl(MSR_FS_BASE, fs); |
190 | rdmsrl(MSR_GS_BASE, gs); | 191 | rdmsrl(MSR_GS_BASE, gs); |
191 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 192 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); |
192 | 193 | ||
193 | cr0 = read_cr0(); | 194 | cr0 = read_cr0(); |
194 | cr2 = read_cr2(); | 195 | cr2 = read_cr2(); |
195 | cr3 = read_cr3(); | 196 | cr3 = read_cr3(); |
196 | cr4 = read_cr4(); | 197 | cr4 = read_cr4(); |
197 | 198 | ||
198 | printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 199 | printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", |
199 | fs,fsindex,gs,gsindex,shadowgs); | 200 | fs, fsindex, gs, gsindex, shadowgs); |
200 | printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); | 201 | printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, |
201 | printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); | 202 | es, cr0); |
203 | printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, | ||
204 | cr4); | ||
202 | 205 | ||
203 | get_debugreg(d0, 0); | 206 | get_debugreg(d0, 0); |
204 | get_debugreg(d1, 1); | 207 | get_debugreg(d1, 1); |
205 | get_debugreg(d2, 2); | 208 | get_debugreg(d2, 2); |
206 | printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | 209 | printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); |
207 | get_debugreg(d3, 3); | 210 | get_debugreg(d3, 3); |
208 | get_debugreg(d6, 6); | 211 | get_debugreg(d6, 6); |
209 | get_debugreg(d7, 7); | 212 | get_debugreg(d7, 7); |
210 | printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 213 | printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); |
211 | } | 214 | } |
212 | 215 | ||
213 | void show_regs(struct pt_regs *regs) | 216 | void show_regs(struct pt_regs *regs) |
214 | { | 217 | { |
215 | printk("CPU %d:", smp_processor_id()); | 218 | printk(KERN_INFO "CPU %d:", smp_processor_id()); |
216 | __show_regs(regs); | 219 | __show_regs(regs); |
217 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); | 220 | show_trace(NULL, regs, (void *)(regs + 1), regs->bp); |
218 | } | 221 | } |
@@ -313,10 +316,10 @@ void prepare_to_copy(struct task_struct *tsk) | |||
313 | 316 | ||
314 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 317 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
315 | unsigned long unused, | 318 | unsigned long unused, |
316 | struct task_struct * p, struct pt_regs * regs) | 319 | struct task_struct *p, struct pt_regs *regs) |
317 | { | 320 | { |
318 | int err; | 321 | int err; |
319 | struct pt_regs * childregs; | 322 | struct pt_regs *childregs; |
320 | struct task_struct *me = current; | 323 | struct task_struct *me = current; |
321 | 324 | ||
322 | childregs = ((struct pt_regs *) | 325 | childregs = ((struct pt_regs *) |
@@ -361,10 +364,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
361 | if (test_thread_flag(TIF_IA32)) | 364 | if (test_thread_flag(TIF_IA32)) |
362 | err = do_set_thread_area(p, -1, | 365 | err = do_set_thread_area(p, -1, |
363 | (struct user_desc __user *)childregs->si, 0); | 366 | (struct user_desc __user *)childregs->si, 0); |
364 | else | 367 | else |
365 | #endif | 368 | #endif |
366 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 369 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); |
367 | if (err) | 370 | if (err) |
368 | goto out; | 371 | goto out; |
369 | } | 372 | } |
370 | err = 0; | 373 | err = 0; |
@@ -543,7 +546,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
543 | unsigned fsindex, gsindex; | 546 | unsigned fsindex, gsindex; |
544 | 547 | ||
545 | /* we're going to use this soon, after a few expensive things */ | 548 | /* we're going to use this soon, after a few expensive things */ |
546 | if (next_p->fpu_counter>5) | 549 | if (next_p->fpu_counter > 5) |
547 | prefetch(next->xstate); | 550 | prefetch(next->xstate); |
548 | 551 | ||
549 | /* | 552 | /* |
@@ -551,13 +554,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
551 | */ | 554 | */ |
552 | load_sp0(tss, next); | 555 | load_sp0(tss, next); |
553 | 556 | ||
554 | /* | 557 | /* |
555 | * Switch DS and ES. | 558 | * Switch DS and ES. |
556 | * This won't pick up thread selector changes, but I guess that is ok. | 559 | * This won't pick up thread selector changes, but I guess that is ok. |
557 | */ | 560 | */ |
558 | savesegment(es, prev->es); | 561 | savesegment(es, prev->es); |
559 | if (unlikely(next->es | prev->es)) | 562 | if (unlikely(next->es | prev->es)) |
560 | loadsegment(es, next->es); | 563 | loadsegment(es, next->es); |
561 | 564 | ||
562 | savesegment(ds, prev->ds); | 565 | savesegment(ds, prev->ds); |
563 | if (unlikely(next->ds | prev->ds)) | 566 | if (unlikely(next->ds | prev->ds)) |
@@ -583,7 +586,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
583 | */ | 586 | */ |
584 | arch_leave_lazy_cpu_mode(); | 587 | arch_leave_lazy_cpu_mode(); |
585 | 588 | ||
586 | /* | 589 | /* |
587 | * Switch FS and GS. | 590 | * Switch FS and GS. |
588 | * | 591 | * |
589 | * Segment register != 0 always requires a reload. Also | 592 | * Segment register != 0 always requires a reload. Also |
@@ -592,13 +595,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
592 | */ | 595 | */ |
593 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 596 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
594 | loadsegment(fs, next->fsindex); | 597 | loadsegment(fs, next->fsindex); |
595 | /* | 598 | /* |
596 | * Check if the user used a selector != 0; if yes | 599 | * Check if the user used a selector != 0; if yes |
597 | * clear 64bit base, since overloaded base is always | 600 | * clear 64bit base, since overloaded base is always |
598 | * mapped to the Null selector | 601 | * mapped to the Null selector |
599 | */ | 602 | */ |
600 | if (fsindex) | 603 | if (fsindex) |
601 | prev->fs = 0; | 604 | prev->fs = 0; |
602 | } | 605 | } |
603 | /* when next process has a 64bit base use it */ | 606 | /* when next process has a 64bit base use it */ |
604 | if (next->fs) | 607 | if (next->fs) |
@@ -608,7 +611,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
608 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 611 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
609 | load_gs_index(next->gsindex); | 612 | load_gs_index(next->gsindex); |
610 | if (gsindex) | 613 | if (gsindex) |
611 | prev->gs = 0; | 614 | prev->gs = 0; |
612 | } | 615 | } |
613 | if (next->gs) | 616 | if (next->gs) |
614 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 617 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
@@ -617,12 +620,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
617 | /* Must be after DS reload */ | 620 | /* Must be after DS reload */ |
618 | unlazy_fpu(prev_p); | 621 | unlazy_fpu(prev_p); |
619 | 622 | ||
620 | /* | 623 | /* |
621 | * Switch the PDA and FPU contexts. | 624 | * Switch the PDA and FPU contexts. |
622 | */ | 625 | */ |
623 | prev->usersp = read_pda(oldrsp); | 626 | prev->usersp = read_pda(oldrsp); |
624 | write_pda(oldrsp, next->usersp); | 627 | write_pda(oldrsp, next->usersp); |
625 | write_pda(pcurrent, next_p); | 628 | write_pda(pcurrent, next_p); |
626 | 629 | ||
627 | write_pda(kernelstack, | 630 | write_pda(kernelstack, |
628 | (unsigned long)task_stack_page(next_p) + | 631 | (unsigned long)task_stack_page(next_p) + |
@@ -663,7 +666,7 @@ long sys_execve(char __user *name, char __user * __user *argv, | |||
663 | char __user * __user *envp, struct pt_regs *regs) | 666 | char __user * __user *envp, struct pt_regs *regs) |
664 | { | 667 | { |
665 | long error; | 668 | long error; |
666 | char * filename; | 669 | char *filename; |
667 | 670 | ||
668 | filename = getname(name); | 671 | filename = getname(name); |
669 | error = PTR_ERR(filename); | 672 | error = PTR_ERR(filename); |
@@ -721,55 +724,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) | |||
721 | unsigned long get_wchan(struct task_struct *p) | 724 | unsigned long get_wchan(struct task_struct *p) |
722 | { | 725 | { |
723 | unsigned long stack; | 726 | unsigned long stack; |
724 | u64 fp,ip; | 727 | u64 fp, ip; |
725 | int count = 0; | 728 | int count = 0; |
726 | 729 | ||
727 | if (!p || p == current || p->state==TASK_RUNNING) | 730 | if (!p || p == current || p->state == TASK_RUNNING) |
728 | return 0; | 731 | return 0; |
729 | stack = (unsigned long)task_stack_page(p); | 732 | stack = (unsigned long)task_stack_page(p); |
730 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) | 733 | if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) |
731 | return 0; | 734 | return 0; |
732 | fp = *(u64 *)(p->thread.sp); | 735 | fp = *(u64 *)(p->thread.sp); |
733 | do { | 736 | do { |
734 | if (fp < (unsigned long)stack || | 737 | if (fp < (unsigned long)stack || |
735 | fp > (unsigned long)stack+THREAD_SIZE) | 738 | fp > (unsigned long)stack+THREAD_SIZE) |
736 | return 0; | 739 | return 0; |
737 | ip = *(u64 *)(fp+8); | 740 | ip = *(u64 *)(fp+8); |
738 | if (!in_sched_functions(ip)) | 741 | if (!in_sched_functions(ip)) |
739 | return ip; | 742 | return ip; |
740 | fp = *(u64 *)fp; | 743 | fp = *(u64 *)fp; |
741 | } while (count++ < 16); | 744 | } while (count++ < 16); |
742 | return 0; | 745 | return 0; |
743 | } | 746 | } |
744 | 747 | ||
745 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 748 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
746 | { | 749 | { |
747 | int ret = 0; | 750 | int ret = 0; |
748 | int doit = task == current; | 751 | int doit = task == current; |
749 | int cpu; | 752 | int cpu; |
750 | 753 | ||
751 | switch (code) { | 754 | switch (code) { |
752 | case ARCH_SET_GS: | 755 | case ARCH_SET_GS: |
753 | if (addr >= TASK_SIZE_OF(task)) | 756 | if (addr >= TASK_SIZE_OF(task)) |
754 | return -EPERM; | 757 | return -EPERM; |
755 | cpu = get_cpu(); | 758 | cpu = get_cpu(); |
756 | /* handle small bases via the GDT because that's faster to | 759 | /* handle small bases via the GDT because that's faster to |
757 | switch. */ | 760 | switch. */ |
758 | if (addr <= 0xffffffff) { | 761 | if (addr <= 0xffffffff) { |
759 | set_32bit_tls(task, GS_TLS, addr); | 762 | set_32bit_tls(task, GS_TLS, addr); |
760 | if (doit) { | 763 | if (doit) { |
761 | load_TLS(&task->thread, cpu); | 764 | load_TLS(&task->thread, cpu); |
762 | load_gs_index(GS_TLS_SEL); | 765 | load_gs_index(GS_TLS_SEL); |
763 | } | 766 | } |
764 | task->thread.gsindex = GS_TLS_SEL; | 767 | task->thread.gsindex = GS_TLS_SEL; |
765 | task->thread.gs = 0; | 768 | task->thread.gs = 0; |
766 | } else { | 769 | } else { |
767 | task->thread.gsindex = 0; | 770 | task->thread.gsindex = 0; |
768 | task->thread.gs = addr; | 771 | task->thread.gs = addr; |
769 | if (doit) { | 772 | if (doit) { |
770 | load_gs_index(0); | 773 | load_gs_index(0); |
771 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 774 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); |
772 | } | 775 | } |
773 | } | 776 | } |
774 | put_cpu(); | 777 | put_cpu(); |
775 | break; | 778 | break; |
@@ -823,8 +826,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
823 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 826 | rdmsrl(MSR_KERNEL_GS_BASE, base); |
824 | else | 827 | else |
825 | base = task->thread.gs; | 828 | base = task->thread.gs; |
826 | } | 829 | } else |
827 | else | ||
828 | base = task->thread.gs; | 830 | base = task->thread.gs; |
829 | ret = put_user(base, (unsigned long __user *)addr); | 831 | ret = put_user(base, (unsigned long __user *)addr); |
830 | break; | 832 | break; |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index e37dccce85db..9e43a48ad6e0 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
16 | #include <linux/regset.h> | 16 | #include <linux/regset.h> |
17 | #include <linux/tracehook.h> | ||
17 | #include <linux/user.h> | 18 | #include <linux/user.h> |
18 | #include <linux/elf.h> | 19 | #include <linux/elf.h> |
19 | #include <linux/security.h> | 20 | #include <linux/security.h> |
@@ -69,7 +70,7 @@ static inline bool invalid_selector(u16 value) | |||
69 | 70 | ||
70 | #define FLAG_MASK FLAG_MASK_32 | 71 | #define FLAG_MASK FLAG_MASK_32 |
71 | 72 | ||
72 | static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) | 73 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) |
73 | { | 74 | { |
74 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); | 75 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); |
75 | regno >>= 2; | 76 | regno >>= 2; |
@@ -1375,30 +1376,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | |||
1375 | force_sig_info(SIGTRAP, &info, tsk); | 1376 | force_sig_info(SIGTRAP, &info, tsk); |
1376 | } | 1377 | } |
1377 | 1378 | ||
1378 | static void syscall_trace(struct pt_regs *regs) | ||
1379 | { | ||
1380 | if (!(current->ptrace & PT_PTRACED)) | ||
1381 | return; | ||
1382 | |||
1383 | #if 0 | ||
1384 | printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n", | ||
1385 | current->comm, | ||
1386 | regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0), | ||
1387 | current_thread_info()->flags, current->ptrace); | ||
1388 | #endif | ||
1389 | |||
1390 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
1391 | ? 0x80 : 0)); | ||
1392 | /* | ||
1393 | * this isn't the same as continuing with a signal, but it will do | ||
1394 | * for normal use. strace only continues with a signal if the | ||
1395 | * stopping signal is not SIGTRAP. -brl | ||
1396 | */ | ||
1397 | if (current->exit_code) { | ||
1398 | send_sig(current->exit_code, current, 1); | ||
1399 | current->exit_code = 0; | ||
1400 | } | ||
1401 | } | ||
1402 | 1379 | ||
1403 | #ifdef CONFIG_X86_32 | 1380 | #ifdef CONFIG_X86_32 |
1404 | # define IS_IA32 1 | 1381 | # define IS_IA32 1 |
@@ -1432,8 +1409,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1432 | if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) | 1409 | if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) |
1433 | ret = -1L; | 1410 | ret = -1L; |
1434 | 1411 | ||
1435 | if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) | 1412 | if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && |
1436 | syscall_trace(regs); | 1413 | tracehook_report_syscall_entry(regs)) |
1414 | ret = -1L; | ||
1437 | 1415 | ||
1438 | if (unlikely(current->audit_context)) { | 1416 | if (unlikely(current->audit_context)) { |
1439 | if (IS_IA32) | 1417 | if (IS_IA32) |
@@ -1459,7 +1437,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1459 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1437 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1460 | 1438 | ||
1461 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1439 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1462 | syscall_trace(regs); | 1440 | tracehook_report_syscall_exit(regs, 0); |
1463 | 1441 | ||
1464 | /* | 1442 | /* |
1465 | * If TIF_SYSCALL_EMU is set, we only get here because of | 1443 | * If TIF_SYSCALL_EMU is set, we only get here because of |
@@ -1475,6 +1453,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1475 | * system call instruction. | 1453 | * system call instruction. |
1476 | */ | 1454 | */ |
1477 | if (test_thread_flag(TIF_SINGLESTEP) && | 1455 | if (test_thread_flag(TIF_SINGLESTEP) && |
1478 | (current->ptrace & PT_PTRACED)) | 1456 | tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL)) |
1479 | send_sigtrap(current, regs, 0); | 1457 | send_sigtrap(current, regs, 0); |
1480 | } | 1458 | } |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9838f2539dfc..c6b9330c1bff 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -742,6 +742,8 @@ void __init setup_arch(char **cmdline_p) | |||
742 | #else | 742 | #else |
743 | num_physpages = max_pfn; | 743 | num_physpages = max_pfn; |
744 | 744 | ||
745 | if (cpu_has_x2apic) | ||
746 | check_x2apic(); | ||
745 | 747 | ||
746 | /* How many end-of-memory variables you have, grandma! */ | 748 | /* How many end-of-memory variables you have, grandma! */ |
747 | /* need this before calling reserve_initrd */ | 749 | /* need this before calling reserve_initrd */ |
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h index 72bbb519d2dc..cc673aa55ce4 100644 --- a/arch/x86/kernel/sigframe.h +++ b/arch/x86/kernel/sigframe.h | |||
@@ -3,9 +3,18 @@ struct sigframe { | |||
3 | char __user *pretcode; | 3 | char __user *pretcode; |
4 | int sig; | 4 | int sig; |
5 | struct sigcontext sc; | 5 | struct sigcontext sc; |
6 | struct _fpstate fpstate; | 6 | /* |
7 | * fpstate is unused. fpstate is moved/allocated after | ||
8 | * retcode[] below. This movement allows to have the FP state and the | ||
9 | * future state extensions (xsave) stay together. | ||
10 | * And at the same time retaining the unused fpstate, prevents changing | ||
11 | * the offset of extramask[] in the sigframe and thus prevent any | ||
12 | * legacy application accessing/modifying it. | ||
13 | */ | ||
14 | struct _fpstate fpstate_unused; | ||
7 | unsigned long extramask[_NSIG_WORDS-1]; | 15 | unsigned long extramask[_NSIG_WORDS-1]; |
8 | char retcode[8]; | 16 | char retcode[8]; |
17 | /* fp state follows here */ | ||
9 | }; | 18 | }; |
10 | 19 | ||
11 | struct rt_sigframe { | 20 | struct rt_sigframe { |
@@ -15,13 +24,19 @@ struct rt_sigframe { | |||
15 | void __user *puc; | 24 | void __user *puc; |
16 | struct siginfo info; | 25 | struct siginfo info; |
17 | struct ucontext uc; | 26 | struct ucontext uc; |
18 | struct _fpstate fpstate; | ||
19 | char retcode[8]; | 27 | char retcode[8]; |
28 | /* fp state follows here */ | ||
20 | }; | 29 | }; |
21 | #else | 30 | #else |
22 | struct rt_sigframe { | 31 | struct rt_sigframe { |
23 | char __user *pretcode; | 32 | char __user *pretcode; |
24 | struct ucontext uc; | 33 | struct ucontext uc; |
25 | struct siginfo info; | 34 | struct siginfo info; |
35 | /* fp state follows here */ | ||
26 | }; | 36 | }; |
37 | |||
38 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
39 | sigset_t *set, struct pt_regs *regs); | ||
40 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
41 | sigset_t *set, struct pt_regs *regs); | ||
27 | #endif | 42 | #endif |
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 6fb5bcdd8933..da3cf3270f83 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
20 | #include <linux/tracehook.h> | ||
20 | #include <linux/elf.h> | 21 | #include <linux/elf.h> |
21 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
@@ -26,6 +27,8 @@ | |||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <asm/i387.h> | 28 | #include <asm/i387.h> |
28 | #include <asm/vdso.h> | 29 | #include <asm/vdso.h> |
30 | #include <asm/syscall.h> | ||
31 | #include <asm/syscalls.h> | ||
29 | 32 | ||
30 | #include "sigframe.h" | 33 | #include "sigframe.h" |
31 | 34 | ||
@@ -159,28 +162,14 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
159 | } | 162 | } |
160 | 163 | ||
161 | { | 164 | { |
162 | struct _fpstate __user *buf; | 165 | void __user *buf; |
163 | 166 | ||
164 | err |= __get_user(buf, &sc->fpstate); | 167 | err |= __get_user(buf, &sc->fpstate); |
165 | if (buf) { | 168 | err |= restore_i387_xstate(buf); |
166 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | ||
167 | goto badframe; | ||
168 | err |= restore_i387(buf); | ||
169 | } else { | ||
170 | struct task_struct *me = current; | ||
171 | |||
172 | if (used_math()) { | ||
173 | clear_fpu(me); | ||
174 | clear_used_math(); | ||
175 | } | ||
176 | } | ||
177 | } | 169 | } |
178 | 170 | ||
179 | err |= __get_user(*pax, &sc->ax); | 171 | err |= __get_user(*pax, &sc->ax); |
180 | return err; | 172 | return err; |
181 | |||
182 | badframe: | ||
183 | return 1; | ||
184 | } | 173 | } |
185 | 174 | ||
186 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) | 175 | asmlinkage unsigned long sys_sigreturn(unsigned long __unused) |
@@ -226,9 +215,8 @@ badframe: | |||
226 | return 0; | 215 | return 0; |
227 | } | 216 | } |
228 | 217 | ||
229 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | 218 | static long do_rt_sigreturn(struct pt_regs *regs) |
230 | { | 219 | { |
231 | struct pt_regs *regs = (struct pt_regs *)&__unused; | ||
232 | struct rt_sigframe __user *frame; | 220 | struct rt_sigframe __user *frame; |
233 | unsigned long ax; | 221 | unsigned long ax; |
234 | sigset_t set; | 222 | sigset_t set; |
@@ -254,15 +242,22 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused) | |||
254 | return ax; | 242 | return ax; |
255 | 243 | ||
256 | badframe: | 244 | badframe: |
257 | force_sig(SIGSEGV, current); | 245 | signal_fault(regs, frame, "rt_sigreturn"); |
258 | return 0; | 246 | return 0; |
259 | } | 247 | } |
260 | 248 | ||
249 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | ||
250 | { | ||
251 | struct pt_regs *regs = (struct pt_regs *)&__unused; | ||
252 | |||
253 | return do_rt_sigreturn(regs); | ||
254 | } | ||
255 | |||
261 | /* | 256 | /* |
262 | * Set up a signal frame. | 257 | * Set up a signal frame. |
263 | */ | 258 | */ |
264 | static int | 259 | static int |
265 | setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | 260 | setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, |
266 | struct pt_regs *regs, unsigned long mask) | 261 | struct pt_regs *regs, unsigned long mask) |
267 | { | 262 | { |
268 | int tmp, err = 0; | 263 | int tmp, err = 0; |
@@ -289,7 +284,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
289 | err |= __put_user(regs->sp, &sc->sp_at_signal); | 284 | err |= __put_user(regs->sp, &sc->sp_at_signal); |
290 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); | 285 | err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); |
291 | 286 | ||
292 | tmp = save_i387(fpstate); | 287 | tmp = save_i387_xstate(fpstate); |
293 | if (tmp < 0) | 288 | if (tmp < 0) |
294 | err = 1; | 289 | err = 1; |
295 | else | 290 | else |
@@ -306,7 +301,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate, | |||
306 | * Determine which stack to use.. | 301 | * Determine which stack to use.. |
307 | */ | 302 | */ |
308 | static inline void __user * | 303 | static inline void __user * |
309 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 304 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, |
305 | void **fpstate) | ||
310 | { | 306 | { |
311 | unsigned long sp; | 307 | unsigned long sp; |
312 | 308 | ||
@@ -332,6 +328,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
332 | sp = (unsigned long) ka->sa.sa_restorer; | 328 | sp = (unsigned long) ka->sa.sa_restorer; |
333 | } | 329 | } |
334 | 330 | ||
331 | if (used_math()) { | ||
332 | sp = sp - sig_xstate_size; | ||
333 | *fpstate = (struct _fpstate *) sp; | ||
334 | } | ||
335 | |||
335 | sp -= frame_size; | 336 | sp -= frame_size; |
336 | /* | 337 | /* |
337 | * Align the stack pointer according to the i386 ABI, | 338 | * Align the stack pointer according to the i386 ABI, |
@@ -343,38 +344,29 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
343 | } | 344 | } |
344 | 345 | ||
345 | static int | 346 | static int |
346 | setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | 347 | __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, |
347 | struct pt_regs *regs) | 348 | struct pt_regs *regs) |
348 | { | 349 | { |
349 | struct sigframe __user *frame; | 350 | struct sigframe __user *frame; |
350 | void __user *restorer; | 351 | void __user *restorer; |
351 | int err = 0; | 352 | int err = 0; |
352 | int usig; | 353 | void __user *fpstate = NULL; |
353 | 354 | ||
354 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 355 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
355 | 356 | ||
356 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 357 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
357 | goto give_sigsegv; | 358 | return -EFAULT; |
358 | |||
359 | usig = current_thread_info()->exec_domain | ||
360 | && current_thread_info()->exec_domain->signal_invmap | ||
361 | && sig < 32 | ||
362 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
363 | : sig; | ||
364 | 359 | ||
365 | err = __put_user(usig, &frame->sig); | 360 | if (__put_user(sig, &frame->sig)) |
366 | if (err) | 361 | return -EFAULT; |
367 | goto give_sigsegv; | ||
368 | 362 | ||
369 | err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); | 363 | if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0])) |
370 | if (err) | 364 | return -EFAULT; |
371 | goto give_sigsegv; | ||
372 | 365 | ||
373 | if (_NSIG_WORDS > 1) { | 366 | if (_NSIG_WORDS > 1) { |
374 | err = __copy_to_user(&frame->extramask, &set->sig[1], | 367 | if (__copy_to_user(&frame->extramask, &set->sig[1], |
375 | sizeof(frame->extramask)); | 368 | sizeof(frame->extramask))) |
376 | if (err) | 369 | return -EFAULT; |
377 | goto give_sigsegv; | ||
378 | } | 370 | } |
379 | 371 | ||
380 | if (current->mm->context.vdso) | 372 | if (current->mm->context.vdso) |
@@ -399,7 +391,7 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
399 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); | 391 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); |
400 | 392 | ||
401 | if (err) | 393 | if (err) |
402 | goto give_sigsegv; | 394 | return -EFAULT; |
403 | 395 | ||
404 | /* Set up registers for signal handler */ | 396 | /* Set up registers for signal handler */ |
405 | regs->sp = (unsigned long)frame; | 397 | regs->sp = (unsigned long)frame; |
@@ -414,50 +406,43 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
414 | regs->cs = __USER_CS; | 406 | regs->cs = __USER_CS; |
415 | 407 | ||
416 | return 0; | 408 | return 0; |
417 | |||
418 | give_sigsegv: | ||
419 | force_sigsegv(sig, current); | ||
420 | return -EFAULT; | ||
421 | } | 409 | } |
422 | 410 | ||
423 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 411 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
424 | sigset_t *set, struct pt_regs *regs) | 412 | sigset_t *set, struct pt_regs *regs) |
425 | { | 413 | { |
426 | struct rt_sigframe __user *frame; | 414 | struct rt_sigframe __user *frame; |
427 | void __user *restorer; | 415 | void __user *restorer; |
428 | int err = 0; | 416 | int err = 0; |
429 | int usig; | 417 | void __user *fpstate = NULL; |
430 | 418 | ||
431 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 419 | frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); |
432 | 420 | ||
433 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 421 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
434 | goto give_sigsegv; | 422 | return -EFAULT; |
435 | 423 | ||
436 | usig = current_thread_info()->exec_domain | 424 | err |= __put_user(sig, &frame->sig); |
437 | && current_thread_info()->exec_domain->signal_invmap | ||
438 | && sig < 32 | ||
439 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
440 | : sig; | ||
441 | |||
442 | err |= __put_user(usig, &frame->sig); | ||
443 | err |= __put_user(&frame->info, &frame->pinfo); | 425 | err |= __put_user(&frame->info, &frame->pinfo); |
444 | err |= __put_user(&frame->uc, &frame->puc); | 426 | err |= __put_user(&frame->uc, &frame->puc); |
445 | err |= copy_siginfo_to_user(&frame->info, info); | 427 | err |= copy_siginfo_to_user(&frame->info, info); |
446 | if (err) | 428 | if (err) |
447 | goto give_sigsegv; | 429 | return -EFAULT; |
448 | 430 | ||
449 | /* Create the ucontext. */ | 431 | /* Create the ucontext. */ |
450 | err |= __put_user(0, &frame->uc.uc_flags); | 432 | if (cpu_has_xsave) |
433 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
434 | else | ||
435 | err |= __put_user(0, &frame->uc.uc_flags); | ||
451 | err |= __put_user(0, &frame->uc.uc_link); | 436 | err |= __put_user(0, &frame->uc.uc_link); |
452 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 437 | err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
453 | err |= __put_user(sas_ss_flags(regs->sp), | 438 | err |= __put_user(sas_ss_flags(regs->sp), |
454 | &frame->uc.uc_stack.ss_flags); | 439 | &frame->uc.uc_stack.ss_flags); |
455 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 440 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
456 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, | 441 | err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, |
457 | regs, set->sig[0]); | 442 | regs, set->sig[0]); |
458 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 443 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
459 | if (err) | 444 | if (err) |
460 | goto give_sigsegv; | 445 | return -EFAULT; |
461 | 446 | ||
462 | /* Set up to return from userspace. */ | 447 | /* Set up to return from userspace. */ |
463 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); | 448 | restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); |
@@ -477,12 +462,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
477 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); | 462 | err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); |
478 | 463 | ||
479 | if (err) | 464 | if (err) |
480 | goto give_sigsegv; | 465 | return -EFAULT; |
481 | 466 | ||
482 | /* Set up registers for signal handler */ | 467 | /* Set up registers for signal handler */ |
483 | regs->sp = (unsigned long)frame; | 468 | regs->sp = (unsigned long)frame; |
484 | regs->ip = (unsigned long)ka->sa.sa_handler; | 469 | regs->ip = (unsigned long)ka->sa.sa_handler; |
485 | regs->ax = (unsigned long)usig; | 470 | regs->ax = (unsigned long)sig; |
486 | regs->dx = (unsigned long)&frame->info; | 471 | regs->dx = (unsigned long)&frame->info; |
487 | regs->cx = (unsigned long)&frame->uc; | 472 | regs->cx = (unsigned long)&frame->uc; |
488 | 473 | ||
@@ -492,25 +477,48 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
492 | regs->cs = __USER_CS; | 477 | regs->cs = __USER_CS; |
493 | 478 | ||
494 | return 0; | 479 | return 0; |
495 | |||
496 | give_sigsegv: | ||
497 | force_sigsegv(sig, current); | ||
498 | return -EFAULT; | ||
499 | } | 480 | } |
500 | 481 | ||
501 | /* | 482 | /* |
502 | * OK, we're invoking a handler: | 483 | * OK, we're invoking a handler: |
503 | */ | 484 | */ |
504 | static int | 485 | static int |
486 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
487 | sigset_t *set, struct pt_regs *regs) | ||
488 | { | ||
489 | int ret; | ||
490 | int usig; | ||
491 | |||
492 | usig = current_thread_info()->exec_domain | ||
493 | && current_thread_info()->exec_domain->signal_invmap | ||
494 | && sig < 32 | ||
495 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
496 | : sig; | ||
497 | |||
498 | /* Set up the stack frame */ | ||
499 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
500 | ret = __setup_rt_frame(usig, ka, info, set, regs); | ||
501 | else | ||
502 | ret = __setup_frame(usig, ka, set, regs); | ||
503 | |||
504 | if (ret) { | ||
505 | force_sigsegv(sig, current); | ||
506 | return -EFAULT; | ||
507 | } | ||
508 | |||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | static int | ||
505 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 513 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
506 | sigset_t *oldset, struct pt_regs *regs) | 514 | sigset_t *oldset, struct pt_regs *regs) |
507 | { | 515 | { |
508 | int ret; | 516 | int ret; |
509 | 517 | ||
510 | /* Are we from a system call? */ | 518 | /* Are we from a system call? */ |
511 | if ((long)regs->orig_ax >= 0) { | 519 | if (syscall_get_nr(current, regs) >= 0) { |
512 | /* If so, check system call restarting.. */ | 520 | /* If so, check system call restarting.. */ |
513 | switch (regs->ax) { | 521 | switch (syscall_get_error(current, regs)) { |
514 | case -ERESTART_RESTARTBLOCK: | 522 | case -ERESTART_RESTARTBLOCK: |
515 | case -ERESTARTNOHAND: | 523 | case -ERESTARTNOHAND: |
516 | regs->ax = -EINTR; | 524 | regs->ax = -EINTR; |
@@ -537,11 +545,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
537 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) | 545 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) |
538 | regs->flags &= ~X86_EFLAGS_TF; | 546 | regs->flags &= ~X86_EFLAGS_TF; |
539 | 547 | ||
540 | /* Set up the stack frame */ | 548 | ret = setup_rt_frame(sig, ka, info, oldset, regs); |
541 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
542 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
543 | else | ||
544 | ret = setup_frame(sig, ka, oldset, regs); | ||
545 | 549 | ||
546 | if (ret) | 550 | if (ret) |
547 | return ret; | 551 | return ret; |
@@ -558,8 +562,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
558 | * handler too. | 562 | * handler too. |
559 | */ | 563 | */ |
560 | regs->flags &= ~X86_EFLAGS_TF; | 564 | regs->flags &= ~X86_EFLAGS_TF; |
561 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
562 | ptrace_notify(SIGTRAP); | ||
563 | 565 | ||
564 | spin_lock_irq(¤t->sighand->siglock); | 566 | spin_lock_irq(¤t->sighand->siglock); |
565 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 567 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
@@ -568,9 +570,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
568 | recalc_sigpending(); | 570 | recalc_sigpending(); |
569 | spin_unlock_irq(¤t->sighand->siglock); | 571 | spin_unlock_irq(¤t->sighand->siglock); |
570 | 572 | ||
573 | tracehook_signal_handler(sig, info, ka, regs, | ||
574 | test_thread_flag(TIF_SINGLESTEP)); | ||
575 | |||
571 | return 0; | 576 | return 0; |
572 | } | 577 | } |
573 | 578 | ||
579 | #define NR_restart_syscall __NR_restart_syscall | ||
574 | /* | 580 | /* |
575 | * Note that 'init' is a special process: it doesn't get signals it doesn't | 581 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
576 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 582 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
@@ -623,9 +629,9 @@ static void do_signal(struct pt_regs *regs) | |||
623 | } | 629 | } |
624 | 630 | ||
625 | /* Did we come from a system call? */ | 631 | /* Did we come from a system call? */ |
626 | if ((long)regs->orig_ax >= 0) { | 632 | if (syscall_get_nr(current, regs) >= 0) { |
627 | /* Restart the system call - no handlers present */ | 633 | /* Restart the system call - no handlers present */ |
628 | switch (regs->ax) { | 634 | switch (syscall_get_error(current, regs)) { |
629 | case -ERESTARTNOHAND: | 635 | case -ERESTARTNOHAND: |
630 | case -ERESTARTSYS: | 636 | case -ERESTARTSYS: |
631 | case -ERESTARTNOINTR: | 637 | case -ERESTARTNOINTR: |
@@ -634,7 +640,7 @@ static void do_signal(struct pt_regs *regs) | |||
634 | break; | 640 | break; |
635 | 641 | ||
636 | case -ERESTART_RESTARTBLOCK: | 642 | case -ERESTART_RESTARTBLOCK: |
637 | regs->ax = __NR_restart_syscall; | 643 | regs->ax = NR_restart_syscall; |
638 | regs->ip -= 2; | 644 | regs->ip -= 2; |
639 | break; | 645 | break; |
640 | } | 646 | } |
@@ -661,5 +667,26 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
661 | if (thread_info_flags & _TIF_SIGPENDING) | 667 | if (thread_info_flags & _TIF_SIGPENDING) |
662 | do_signal(regs); | 668 | do_signal(regs); |
663 | 669 | ||
670 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
671 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
672 | tracehook_notify_resume(regs); | ||
673 | } | ||
674 | |||
664 | clear_thread_flag(TIF_IRET); | 675 | clear_thread_flag(TIF_IRET); |
665 | } | 676 | } |
677 | |||
678 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | ||
679 | { | ||
680 | struct task_struct *me = current; | ||
681 | |||
682 | if (show_unhandled_signals && printk_ratelimit()) { | ||
683 | printk(KERN_INFO | ||
684 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", | ||
685 | me->comm, me->pid, where, frame, | ||
686 | regs->ip, regs->sp, regs->orig_ax); | ||
687 | print_vma_addr(" in ", regs->ip); | ||
688 | printk(KERN_CONT "\n"); | ||
689 | } | ||
690 | |||
691 | force_sig(SIGSEGV, me); | ||
692 | } | ||
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index ca316b5b742c..bf77d4789a2d 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c | |||
@@ -15,17 +15,21 @@ | |||
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/tracehook.h> | ||
18 | #include <linux/unistd.h> | 19 | #include <linux/unistd.h> |
19 | #include <linux/stddef.h> | 20 | #include <linux/stddef.h> |
20 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
21 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/uaccess.h> | ||
24 | |||
22 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
23 | #include <asm/ucontext.h> | 26 | #include <asm/ucontext.h> |
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/i387.h> | 27 | #include <asm/i387.h> |
26 | #include <asm/proto.h> | 28 | #include <asm/proto.h> |
27 | #include <asm/ia32_unistd.h> | 29 | #include <asm/ia32_unistd.h> |
28 | #include <asm/mce.h> | 30 | #include <asm/mce.h> |
31 | #include <asm/syscall.h> | ||
32 | #include <asm/syscalls.h> | ||
29 | #include "sigframe.h" | 33 | #include "sigframe.h" |
30 | 34 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
@@ -41,11 +45,6 @@ | |||
41 | # define FIX_EFLAGS __FIX_EFLAGS | 45 | # define FIX_EFLAGS __FIX_EFLAGS |
42 | #endif | 46 | #endif |
43 | 47 | ||
44 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
45 | sigset_t *set, struct pt_regs * regs); | ||
46 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | ||
47 | sigset_t *set, struct pt_regs * regs); | ||
48 | |||
49 | asmlinkage long | 48 | asmlinkage long |
50 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 49 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
51 | struct pt_regs *regs) | 50 | struct pt_regs *regs) |
@@ -54,69 +53,6 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
54 | } | 53 | } |
55 | 54 | ||
56 | /* | 55 | /* |
57 | * Signal frame handlers. | ||
58 | */ | ||
59 | |||
60 | static inline int save_i387(struct _fpstate __user *buf) | ||
61 | { | ||
62 | struct task_struct *tsk = current; | ||
63 | int err = 0; | ||
64 | |||
65 | BUILD_BUG_ON(sizeof(struct user_i387_struct) != | ||
66 | sizeof(tsk->thread.xstate->fxsave)); | ||
67 | |||
68 | if ((unsigned long)buf % 16) | ||
69 | printk("save_i387: bad fpstate %p\n", buf); | ||
70 | |||
71 | if (!used_math()) | ||
72 | return 0; | ||
73 | clear_used_math(); /* trigger finit */ | ||
74 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
75 | err = save_i387_checking((struct i387_fxsave_struct __user *) | ||
76 | buf); | ||
77 | if (err) | ||
78 | return err; | ||
79 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
80 | stts(); | ||
81 | } else { | ||
82 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
83 | sizeof(struct i387_fxsave_struct))) | ||
84 | return -1; | ||
85 | } | ||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * This restores directly out of user space. Exceptions are handled. | ||
91 | */ | ||
92 | static inline int restore_i387(struct _fpstate __user *buf) | ||
93 | { | ||
94 | struct task_struct *tsk = current; | ||
95 | int err; | ||
96 | |||
97 | if (!used_math()) { | ||
98 | err = init_fpu(tsk); | ||
99 | if (err) | ||
100 | return err; | ||
101 | } | ||
102 | |||
103 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
104 | clts(); | ||
105 | task_thread_info(current)->status |= TS_USEDFPU; | ||
106 | } | ||
107 | err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
108 | if (unlikely(err)) { | ||
109 | /* | ||
110 | * Encountered an error while doing the restore from the | ||
111 | * user buffer, clear the fpu state. | ||
112 | */ | ||
113 | clear_fpu(tsk); | ||
114 | clear_used_math(); | ||
115 | } | ||
116 | return err; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Do a signal return; undo the signal stack. | 56 | * Do a signal return; undo the signal stack. |
121 | */ | 57 | */ |
122 | static int | 58 | static int |
@@ -128,7 +64,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
128 | /* Always make any pending restarted system calls return -EINTR */ | 64 | /* Always make any pending restarted system calls return -EINTR */ |
129 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 65 | current_thread_info()->restart_block.fn = do_no_restart_syscall; |
130 | 66 | ||
131 | #define COPY(x) err |= __get_user(regs->x, &sc->x) | 67 | #define COPY(x) (err |= __get_user(regs->x, &sc->x)) |
132 | 68 | ||
133 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); | 69 | COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); |
134 | COPY(dx); COPY(cx); COPY(ip); | 70 | COPY(dx); COPY(cx); COPY(ip); |
@@ -158,34 +94,21 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | |||
158 | } | 94 | } |
159 | 95 | ||
160 | { | 96 | { |
161 | struct _fpstate __user * buf; | 97 | void __user *buf; |
162 | err |= __get_user(buf, &sc->fpstate); | ||
163 | 98 | ||
164 | if (buf) { | 99 | err |= __get_user(buf, &sc->fpstate); |
165 | if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) | 100 | err |= restore_i387_xstate(buf); |
166 | goto badframe; | ||
167 | err |= restore_i387(buf); | ||
168 | } else { | ||
169 | struct task_struct *me = current; | ||
170 | if (used_math()) { | ||
171 | clear_fpu(me); | ||
172 | clear_used_math(); | ||
173 | } | ||
174 | } | ||
175 | } | 101 | } |
176 | 102 | ||
177 | err |= __get_user(*pax, &sc->ax); | 103 | err |= __get_user(*pax, &sc->ax); |
178 | return err; | 104 | return err; |
179 | |||
180 | badframe: | ||
181 | return 1; | ||
182 | } | 105 | } |
183 | 106 | ||
184 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 107 | static long do_rt_sigreturn(struct pt_regs *regs) |
185 | { | 108 | { |
186 | struct rt_sigframe __user *frame; | 109 | struct rt_sigframe __user *frame; |
187 | sigset_t set; | ||
188 | unsigned long ax; | 110 | unsigned long ax; |
111 | sigset_t set; | ||
189 | 112 | ||
190 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); | 113 | frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); |
191 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 114 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
@@ -198,7 +121,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
198 | current->blocked = set; | 121 | current->blocked = set; |
199 | recalc_sigpending(); | 122 | recalc_sigpending(); |
200 | spin_unlock_irq(¤t->sighand->siglock); | 123 | spin_unlock_irq(¤t->sighand->siglock); |
201 | 124 | ||
202 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) | 125 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
203 | goto badframe; | 126 | goto badframe; |
204 | 127 | ||
@@ -208,16 +131,22 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | |||
208 | return ax; | 131 | return ax; |
209 | 132 | ||
210 | badframe: | 133 | badframe: |
211 | signal_fault(regs,frame,"sigreturn"); | 134 | signal_fault(regs, frame, "rt_sigreturn"); |
212 | return 0; | 135 | return 0; |
213 | } | 136 | } |
137 | |||
138 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | ||
139 | { | ||
140 | return do_rt_sigreturn(regs); | ||
141 | } | ||
214 | 142 | ||
215 | /* | 143 | /* |
216 | * Set up a signal frame. | 144 | * Set up a signal frame. |
217 | */ | 145 | */ |
218 | 146 | ||
219 | static inline int | 147 | static inline int |
220 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) | 148 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
149 | unsigned long mask, struct task_struct *me) | ||
221 | { | 150 | { |
222 | int err = 0; | 151 | int err = 0; |
223 | 152 | ||
@@ -269,41 +198,40 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) | |||
269 | sp = current->sas_ss_sp + current->sas_ss_size; | 198 | sp = current->sas_ss_sp + current->sas_ss_size; |
270 | } | 199 | } |
271 | 200 | ||
272 | return (void __user *)round_down(sp - size, 16); | 201 | return (void __user *)round_down(sp - size, 64); |
273 | } | 202 | } |
274 | 203 | ||
275 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 204 | static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
276 | sigset_t *set, struct pt_regs * regs) | 205 | sigset_t *set, struct pt_regs *regs) |
277 | { | 206 | { |
278 | struct rt_sigframe __user *frame; | 207 | struct rt_sigframe __user *frame; |
279 | struct _fpstate __user *fp = NULL; | 208 | void __user *fp = NULL; |
280 | int err = 0; | 209 | int err = 0; |
281 | struct task_struct *me = current; | 210 | struct task_struct *me = current; |
282 | 211 | ||
283 | if (used_math()) { | 212 | if (used_math()) { |
284 | fp = get_stack(ka, regs, sizeof(struct _fpstate)); | 213 | fp = get_stack(ka, regs, sig_xstate_size); |
285 | frame = (void __user *)round_down( | 214 | frame = (void __user *)round_down( |
286 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; | 215 | (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; |
287 | 216 | ||
288 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 217 | if (save_i387_xstate(fp) < 0) |
289 | goto give_sigsegv; | 218 | return -EFAULT; |
290 | |||
291 | if (save_i387(fp) < 0) | ||
292 | err |= -1; | ||
293 | } else | 219 | } else |
294 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; | 220 | frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; |
295 | 221 | ||
296 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 222 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
297 | goto give_sigsegv; | 223 | return -EFAULT; |
298 | 224 | ||
299 | if (ka->sa.sa_flags & SA_SIGINFO) { | 225 | if (ka->sa.sa_flags & SA_SIGINFO) { |
300 | err |= copy_siginfo_to_user(&frame->info, info); | 226 | if (copy_siginfo_to_user(&frame->info, info)) |
301 | if (err) | 227 | return -EFAULT; |
302 | goto give_sigsegv; | ||
303 | } | 228 | } |
304 | 229 | ||
305 | /* Create the ucontext. */ | 230 | /* Create the ucontext. */ |
306 | err |= __put_user(0, &frame->uc.uc_flags); | 231 | if (cpu_has_xsave) |
232 | err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); | ||
233 | else | ||
234 | err |= __put_user(0, &frame->uc.uc_flags); | ||
307 | err |= __put_user(0, &frame->uc.uc_link); | 235 | err |= __put_user(0, &frame->uc.uc_link); |
308 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); | 236 | err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); |
309 | err |= __put_user(sas_ss_flags(regs->sp), | 237 | err |= __put_user(sas_ss_flags(regs->sp), |
@@ -311,9 +239,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
311 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); | 239 | err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); |
312 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); | 240 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); |
313 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); | 241 | err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); |
314 | if (sizeof(*set) == 16) { | 242 | if (sizeof(*set) == 16) { |
315 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); | 243 | __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); |
316 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); | 244 | __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); |
317 | } else | 245 | } else |
318 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 246 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
319 | 247 | ||
@@ -324,15 +252,15 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
324 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); | 252 | err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); |
325 | } else { | 253 | } else { |
326 | /* could use a vstub here */ | 254 | /* could use a vstub here */ |
327 | goto give_sigsegv; | 255 | return -EFAULT; |
328 | } | 256 | } |
329 | 257 | ||
330 | if (err) | 258 | if (err) |
331 | goto give_sigsegv; | 259 | return -EFAULT; |
332 | 260 | ||
333 | /* Set up registers for signal handler */ | 261 | /* Set up registers for signal handler */ |
334 | regs->di = sig; | 262 | regs->di = sig; |
335 | /* In case the signal handler was declared without prototypes */ | 263 | /* In case the signal handler was declared without prototypes */ |
336 | regs->ax = 0; | 264 | regs->ax = 0; |
337 | 265 | ||
338 | /* This also works for non SA_SIGINFO handlers because they expect the | 266 | /* This also works for non SA_SIGINFO handlers because they expect the |
@@ -348,44 +276,34 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
348 | regs->cs = __USER_CS; | 276 | regs->cs = __USER_CS; |
349 | 277 | ||
350 | return 0; | 278 | return 0; |
351 | |||
352 | give_sigsegv: | ||
353 | force_sigsegv(sig, current); | ||
354 | return -EFAULT; | ||
355 | } | 279 | } |
356 | 280 | ||
357 | /* | 281 | /* |
358 | * Return -1L or the syscall number that @regs is executing. | 282 | * OK, we're invoking a handler |
359 | */ | 283 | */ |
360 | static long current_syscall(struct pt_regs *regs) | 284 | static int |
285 | setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
286 | sigset_t *set, struct pt_regs *regs) | ||
361 | { | 287 | { |
362 | /* | 288 | int ret; |
363 | * We always sign-extend a -1 value being set here, | ||
364 | * so this is always either -1L or a syscall number. | ||
365 | */ | ||
366 | return regs->orig_ax; | ||
367 | } | ||
368 | 289 | ||
369 | /* | ||
370 | * Return a value that is -EFOO if the system call in @regs->orig_ax | ||
371 | * returned an error. This only works for @regs from @current. | ||
372 | */ | ||
373 | static long current_syscall_ret(struct pt_regs *regs) | ||
374 | { | ||
375 | #ifdef CONFIG_IA32_EMULATION | 290 | #ifdef CONFIG_IA32_EMULATION |
376 | if (test_thread_flag(TIF_IA32)) | 291 | if (test_thread_flag(TIF_IA32)) { |
377 | /* | 292 | if (ka->sa.sa_flags & SA_SIGINFO) |
378 | * Sign-extend the value so (int)-EFOO becomes (long)-EFOO | 293 | ret = ia32_setup_rt_frame(sig, ka, info, set, regs); |
379 | * and will match correctly in comparisons. | 294 | else |
380 | */ | 295 | ret = ia32_setup_frame(sig, ka, set, regs); |
381 | return (int) regs->ax; | 296 | } else |
382 | #endif | 297 | #endif |
383 | return regs->ax; | 298 | ret = __setup_rt_frame(sig, ka, info, set, regs); |
384 | } | ||
385 | 299 | ||
386 | /* | 300 | if (ret) { |
387 | * OK, we're invoking a handler | 301 | force_sigsegv(sig, current); |
388 | */ | 302 | return -EFAULT; |
303 | } | ||
304 | |||
305 | return ret; | ||
306 | } | ||
389 | 307 | ||
390 | static int | 308 | static int |
391 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | 309 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
@@ -394,9 +312,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
394 | int ret; | 312 | int ret; |
395 | 313 | ||
396 | /* Are we from a system call? */ | 314 | /* Are we from a system call? */ |
397 | if (current_syscall(regs) >= 0) { | 315 | if (syscall_get_nr(current, regs) >= 0) { |
398 | /* If so, check system call restarting.. */ | 316 | /* If so, check system call restarting.. */ |
399 | switch (current_syscall_ret(regs)) { | 317 | switch (syscall_get_error(current, regs)) { |
400 | case -ERESTART_RESTARTBLOCK: | 318 | case -ERESTART_RESTARTBLOCK: |
401 | case -ERESTARTNOHAND: | 319 | case -ERESTARTNOHAND: |
402 | regs->ax = -EINTR; | 320 | regs->ax = -EINTR; |
@@ -423,50 +341,46 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, | |||
423 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) | 341 | likely(test_and_clear_thread_flag(TIF_FORCED_TF))) |
424 | regs->flags &= ~X86_EFLAGS_TF; | 342 | regs->flags &= ~X86_EFLAGS_TF; |
425 | 343 | ||
426 | #ifdef CONFIG_IA32_EMULATION | ||
427 | if (test_thread_flag(TIF_IA32)) { | ||
428 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
429 | ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); | ||
430 | else | ||
431 | ret = ia32_setup_frame(sig, ka, oldset, regs); | ||
432 | } else | ||
433 | #endif | ||
434 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 344 | ret = setup_rt_frame(sig, ka, info, oldset, regs); |
435 | 345 | ||
436 | if (ret == 0) { | 346 | if (ret) |
437 | /* | 347 | return ret; |
438 | * This has nothing to do with segment registers, | ||
439 | * despite the name. This magic affects uaccess.h | ||
440 | * macros' behavior. Reset it to the normal setting. | ||
441 | */ | ||
442 | set_fs(USER_DS); | ||
443 | 348 | ||
444 | /* | 349 | /* |
445 | * Clear the direction flag as per the ABI for function entry. | 350 | * This has nothing to do with segment registers, |
446 | */ | 351 | * despite the name. This magic affects uaccess.h |
447 | regs->flags &= ~X86_EFLAGS_DF; | 352 | * macros' behavior. Reset it to the normal setting. |
353 | */ | ||
354 | set_fs(USER_DS); | ||
448 | 355 | ||
449 | /* | 356 | /* |
450 | * Clear TF when entering the signal handler, but | 357 | * Clear the direction flag as per the ABI for function entry. |
451 | * notify any tracer that was single-stepping it. | 358 | */ |
452 | * The tracer may want to single-step inside the | 359 | regs->flags &= ~X86_EFLAGS_DF; |
453 | * handler too. | ||
454 | */ | ||
455 | regs->flags &= ~X86_EFLAGS_TF; | ||
456 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
457 | ptrace_notify(SIGTRAP); | ||
458 | |||
459 | spin_lock_irq(¤t->sighand->siglock); | ||
460 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
461 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
462 | sigaddset(¤t->blocked,sig); | ||
463 | recalc_sigpending(); | ||
464 | spin_unlock_irq(¤t->sighand->siglock); | ||
465 | } | ||
466 | 360 | ||
467 | return ret; | 361 | /* |
362 | * Clear TF when entering the signal handler, but | ||
363 | * notify any tracer that was single-stepping it. | ||
364 | * The tracer may want to single-step inside the | ||
365 | * handler too. | ||
366 | */ | ||
367 | regs->flags &= ~X86_EFLAGS_TF; | ||
368 | |||
369 | spin_lock_irq(¤t->sighand->siglock); | ||
370 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | ||
371 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
372 | sigaddset(¤t->blocked, sig); | ||
373 | recalc_sigpending(); | ||
374 | spin_unlock_irq(¤t->sighand->siglock); | ||
375 | |||
376 | tracehook_signal_handler(sig, info, ka, regs, | ||
377 | test_thread_flag(TIF_SINGLESTEP)); | ||
378 | |||
379 | return 0; | ||
468 | } | 380 | } |
469 | 381 | ||
382 | #define NR_restart_syscall \ | ||
383 | test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall | ||
470 | /* | 384 | /* |
471 | * Note that 'init' is a special process: it doesn't get signals it doesn't | 385 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
472 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 386 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
@@ -518,9 +432,9 @@ static void do_signal(struct pt_regs *regs) | |||
518 | } | 432 | } |
519 | 433 | ||
520 | /* Did we come from a system call? */ | 434 | /* Did we come from a system call? */ |
521 | if (current_syscall(regs) >= 0) { | 435 | if (syscall_get_nr(current, regs) >= 0) { |
522 | /* Restart the system call - no handlers present */ | 436 | /* Restart the system call - no handlers present */ |
523 | switch (current_syscall_ret(regs)) { | 437 | switch (syscall_get_error(current, regs)) { |
524 | case -ERESTARTNOHAND: | 438 | case -ERESTARTNOHAND: |
525 | case -ERESTARTSYS: | 439 | case -ERESTARTSYS: |
526 | case -ERESTARTNOINTR: | 440 | case -ERESTARTNOINTR: |
@@ -528,9 +442,7 @@ static void do_signal(struct pt_regs *regs) | |||
528 | regs->ip -= 2; | 442 | regs->ip -= 2; |
529 | break; | 443 | break; |
530 | case -ERESTART_RESTARTBLOCK: | 444 | case -ERESTART_RESTARTBLOCK: |
531 | regs->ax = test_thread_flag(TIF_IA32) ? | 445 | regs->ax = NR_restart_syscall; |
532 | __NR_ia32_restart_syscall : | ||
533 | __NR_restart_syscall; | ||
534 | regs->ip -= 2; | 446 | regs->ip -= 2; |
535 | break; | 447 | break; |
536 | } | 448 | } |
@@ -558,17 +470,25 @@ void do_notify_resume(struct pt_regs *regs, void *unused, | |||
558 | /* deal with pending signal delivery */ | 470 | /* deal with pending signal delivery */ |
559 | if (thread_info_flags & _TIF_SIGPENDING) | 471 | if (thread_info_flags & _TIF_SIGPENDING) |
560 | do_signal(regs); | 472 | do_signal(regs); |
473 | |||
474 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
475 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
476 | tracehook_notify_resume(regs); | ||
477 | } | ||
561 | } | 478 | } |
562 | 479 | ||
563 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 480 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
564 | { | 481 | { |
565 | struct task_struct *me = current; | 482 | struct task_struct *me = current; |
483 | |||
566 | if (show_unhandled_signals && printk_ratelimit()) { | 484 | if (show_unhandled_signals && printk_ratelimit()) { |
567 | printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", | 485 | printk(KERN_INFO |
568 | me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); | 486 | "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", |
487 | me->comm, me->pid, where, frame, | ||
488 | regs->ip, regs->sp, regs->orig_ax); | ||
569 | print_vma_addr(" in ", regs->ip); | 489 | print_vma_addr(" in ", regs->ip); |
570 | printk("\n"); | 490 | printk(KERN_CONT "\n"); |
571 | } | 491 | } |
572 | 492 | ||
573 | force_sig(SIGSEGV, me); | 493 | force_sig(SIGSEGV, me); |
574 | } | 494 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7985c5b3f916..2ff0bbcd5bd1 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |||
88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 88 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 89 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
90 | #else | 90 | #else |
91 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 91 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 92 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | 93 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) |
94 | #endif | 94 | #endif |
@@ -123,13 +123,12 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
123 | 123 | ||
124 | static atomic_t init_deasserted; | 124 | static atomic_t init_deasserted; |
125 | 125 | ||
126 | static int boot_cpu_logical_apicid; | ||
127 | 126 | ||
128 | /* representing cpus for which sibling maps can be computed */ | 127 | /* representing cpus for which sibling maps can be computed */ |
129 | static cpumask_t cpu_sibling_setup_map; | 128 | static cpumask_t cpu_sibling_setup_map; |
130 | 129 | ||
131 | /* Set if we find a B stepping CPU */ | 130 | /* Set if we find a B stepping CPU */ |
132 | int __cpuinitdata smp_b_stepping; | 131 | static int __cpuinitdata smp_b_stepping; |
133 | 132 | ||
134 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 133 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
135 | 134 | ||
@@ -165,6 +164,8 @@ static void unmap_cpu_to_node(int cpu) | |||
165 | #endif | 164 | #endif |
166 | 165 | ||
167 | #ifdef CONFIG_X86_32 | 166 | #ifdef CONFIG_X86_32 |
167 | static int boot_cpu_logical_apicid; | ||
168 | |||
168 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | 169 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = |
169 | { [0 ... NR_CPUS-1] = BAD_APICID }; | 170 | { [0 ... NR_CPUS-1] = BAD_APICID }; |
170 | 171 | ||
@@ -210,7 +211,7 @@ static void __cpuinit smp_callin(void) | |||
210 | /* | 211 | /* |
211 | * (This works even if the APIC is not enabled.) | 212 | * (This works even if the APIC is not enabled.) |
212 | */ | 213 | */ |
213 | phys_id = GET_APIC_ID(read_apic_id()); | 214 | phys_id = read_apic_id(); |
214 | cpuid = smp_processor_id(); | 215 | cpuid = smp_processor_id(); |
215 | if (cpu_isset(cpuid, cpu_callin_map)) { | 216 | if (cpu_isset(cpuid, cpu_callin_map)) { |
216 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | 217 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, |
@@ -550,8 +551,7 @@ static inline void __inquire_remote_apic(int apicid) | |||
550 | printk(KERN_CONT | 551 | printk(KERN_CONT |
551 | "a previous APIC delivery may have failed\n"); | 552 | "a previous APIC delivery may have failed\n"); |
552 | 553 | ||
553 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | 554 | apic_icr_write(APIC_DM_REMRD | regs[i], apicid); |
554 | apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]); | ||
555 | 555 | ||
556 | timeout = 0; | 556 | timeout = 0; |
557 | do { | 557 | do { |
@@ -583,11 +583,9 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |||
583 | int maxlvt; | 583 | int maxlvt; |
584 | 584 | ||
585 | /* Target chip */ | 585 | /* Target chip */ |
586 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | ||
587 | |||
588 | /* Boot on the stack */ | 586 | /* Boot on the stack */ |
589 | /* Kick the second */ | 587 | /* Kick the second */ |
590 | apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | 588 | apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid); |
591 | 589 | ||
592 | pr_debug("Waiting for send to finish...\n"); | 590 | pr_debug("Waiting for send to finish...\n"); |
593 | send_status = safe_apic_wait_icr_idle(); | 591 | send_status = safe_apic_wait_icr_idle(); |
@@ -640,13 +638,11 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
640 | /* | 638 | /* |
641 | * Turn INIT on target chip | 639 | * Turn INIT on target chip |
642 | */ | 640 | */ |
643 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
644 | |||
645 | /* | 641 | /* |
646 | * Send IPI | 642 | * Send IPI |
647 | */ | 643 | */ |
648 | apic_write(APIC_ICR, | 644 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, |
649 | APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT); | 645 | phys_apicid); |
650 | 646 | ||
651 | pr_debug("Waiting for send to finish...\n"); | 647 | pr_debug("Waiting for send to finish...\n"); |
652 | send_status = safe_apic_wait_icr_idle(); | 648 | send_status = safe_apic_wait_icr_idle(); |
@@ -656,10 +652,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
656 | pr_debug("Deasserting INIT.\n"); | 652 | pr_debug("Deasserting INIT.\n"); |
657 | 653 | ||
658 | /* Target chip */ | 654 | /* Target chip */ |
659 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
660 | |||
661 | /* Send IPI */ | 655 | /* Send IPI */ |
662 | apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | 656 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); |
663 | 657 | ||
664 | pr_debug("Waiting for send to finish...\n"); | 658 | pr_debug("Waiting for send to finish...\n"); |
665 | send_status = safe_apic_wait_icr_idle(); | 659 | send_status = safe_apic_wait_icr_idle(); |
@@ -702,11 +696,10 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |||
702 | */ | 696 | */ |
703 | 697 | ||
704 | /* Target chip */ | 698 | /* Target chip */ |
705 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | ||
706 | |||
707 | /* Boot on the stack */ | 699 | /* Boot on the stack */ |
708 | /* Kick the second */ | 700 | /* Kick the second */ |
709 | apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12)); | 701 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
702 | phys_apicid); | ||
710 | 703 | ||
711 | /* | 704 | /* |
712 | * Give the other CPU some time to accept the IPI. | 705 | * Give the other CPU some time to accept the IPI. |
@@ -1175,10 +1168,17 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1175 | * Setup boot CPU information | 1168 | * Setup boot CPU information |
1176 | */ | 1169 | */ |
1177 | smp_store_cpu_info(0); /* Final full version of the data */ | 1170 | smp_store_cpu_info(0); /* Final full version of the data */ |
1171 | #ifdef CONFIG_X86_32 | ||
1178 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1172 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1173 | #endif | ||
1179 | current_thread_info()->cpu = 0; /* needed? */ | 1174 | current_thread_info()->cpu = 0; /* needed? */ |
1180 | set_cpu_sibling_map(0); | 1175 | set_cpu_sibling_map(0); |
1181 | 1176 | ||
1177 | #ifdef CONFIG_X86_64 | ||
1178 | enable_IR_x2apic(); | ||
1179 | setup_apic_routing(); | ||
1180 | #endif | ||
1181 | |||
1182 | if (smp_sanity_check(max_cpus) < 0) { | 1182 | if (smp_sanity_check(max_cpus) < 0) { |
1183 | printk(KERN_INFO "SMP disabled\n"); | 1183 | printk(KERN_INFO "SMP disabled\n"); |
1184 | disable_smp(); | 1184 | disable_smp(); |
@@ -1186,9 +1186,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | preempt_disable(); | 1188 | preempt_disable(); |
1189 | if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) { | 1189 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1190 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
1191 | GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid); | 1191 | read_apic_id(), boot_cpu_physical_apicid); |
1192 | /* Or can we switch back to PIC here? */ | 1192 | /* Or can we switch back to PIC here? */ |
1193 | } | 1193 | } |
1194 | preempt_enable(); | 1194 | preempt_enable(); |
@@ -1313,16 +1313,13 @@ __init void prefill_possible_map(void) | |||
1313 | if (!num_processors) | 1313 | if (!num_processors) |
1314 | num_processors = 1; | 1314 | num_processors = 1; |
1315 | 1315 | ||
1316 | #ifdef CONFIG_HOTPLUG_CPU | ||
1317 | if (additional_cpus == -1) { | 1316 | if (additional_cpus == -1) { |
1318 | if (disabled_cpus > 0) | 1317 | if (disabled_cpus > 0) |
1319 | additional_cpus = disabled_cpus; | 1318 | additional_cpus = disabled_cpus; |
1320 | else | 1319 | else |
1321 | additional_cpus = 0; | 1320 | additional_cpus = 0; |
1322 | } | 1321 | } |
1323 | #else | 1322 | |
1324 | additional_cpus = 0; | ||
1325 | #endif | ||
1326 | possible = num_processors + additional_cpus; | 1323 | possible = num_processors + additional_cpus; |
1327 | if (possible > NR_CPUS) | 1324 | if (possible > NR_CPUS) |
1328 | possible = NR_CPUS; | 1325 | possible = NR_CPUS; |
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c index d67ce5f044ba..7b987852e876 100644 --- a/arch/x86/kernel/summit_32.c +++ b/arch/x86/kernel/summit_32.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | #include <asm/bios_ebda.h> | 32 | #include <asm/bios_ebda.h> |
33 | #include <asm/mach-summit/mach_mpparse.h> | 33 | #include <asm/summit/mpparse.h> |
34 | 34 | ||
35 | static struct rio_table_hdr *rio_table_hdr __initdata; | 35 | static struct rio_table_hdr *rio_table_hdr __initdata; |
36 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; | 36 | static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 7066cb855a60..1884a8d12bfa 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
24 | 24 | ||
25 | #include <asm/syscalls.h> | ||
26 | |||
25 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | 27 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, |
26 | unsigned long prot, unsigned long flags, | 28 | unsigned long prot, unsigned long flags, |
27 | unsigned long fd, unsigned long pgoff) | 29 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 3b360ef33817..6bc211accf08 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -13,15 +13,17 @@ | |||
13 | #include <linux/utsname.h> | 13 | #include <linux/utsname.h> |
14 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/uaccess.h> | ||
16 | 17 | ||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/syscalls.h> | ||
19 | 20 | ||
20 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, | 21 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, |
21 | unsigned long fd, unsigned long off) | 22 | unsigned long prot, unsigned long flags, |
23 | unsigned long fd, unsigned long off) | ||
22 | { | 24 | { |
23 | long error; | 25 | long error; |
24 | struct file * file; | 26 | struct file *file; |
25 | 27 | ||
26 | error = -EINVAL; | 28 | error = -EINVAL; |
27 | if (off & ~PAGE_MASK) | 29 | if (off & ~PAGE_MASK) |
@@ -56,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
56 | unmapped base down for this case. This can give | 58 | unmapped base down for this case. This can give |
57 | conflicts with the heap, but we assume that glibc | 59 | conflicts with the heap, but we assume that glibc |
58 | malloc knows how to fall back to mmap. Give it 1GB | 60 | malloc knows how to fall back to mmap. Give it 1GB |
59 | of playground for now. -AK */ | 61 | of playground for now. -AK */ |
60 | *begin = 0x40000000; | 62 | *begin = 0x40000000; |
61 | *end = 0x80000000; | 63 | *end = 0x80000000; |
62 | if (current->flags & PF_RANDOMIZE) { | 64 | if (current->flags & PF_RANDOMIZE) { |
63 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); | 65 | new_begin = randomize_range(*begin, *begin + 0x02000000, 0); |
64 | if (new_begin) | 66 | if (new_begin) |
@@ -66,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
66 | } | 68 | } |
67 | } else { | 69 | } else { |
68 | *begin = TASK_UNMAPPED_BASE; | 70 | *begin = TASK_UNMAPPED_BASE; |
69 | *end = TASK_SIZE; | 71 | *end = TASK_SIZE; |
70 | } | 72 | } |
71 | } | 73 | } |
72 | 74 | ||
73 | unsigned long | 75 | unsigned long |
74 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | 76 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
@@ -78,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
78 | struct vm_area_struct *vma; | 80 | struct vm_area_struct *vma; |
79 | unsigned long start_addr; | 81 | unsigned long start_addr; |
80 | unsigned long begin, end; | 82 | unsigned long begin, end; |
81 | 83 | ||
82 | if (flags & MAP_FIXED) | 84 | if (flags & MAP_FIXED) |
83 | return addr; | 85 | return addr; |
84 | 86 | ||
85 | find_start_end(flags, &begin, &end); | 87 | find_start_end(flags, &begin, &end); |
86 | 88 | ||
87 | if (len > end) | 89 | if (len > end) |
88 | return -ENOMEM; | 90 | return -ENOMEM; |
@@ -96,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
96 | } | 98 | } |
97 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) | 99 | if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) |
98 | && len <= mm->cached_hole_size) { | 100 | && len <= mm->cached_hole_size) { |
99 | mm->cached_hole_size = 0; | 101 | mm->cached_hole_size = 0; |
100 | mm->free_area_cache = begin; | 102 | mm->free_area_cache = begin; |
101 | } | 103 | } |
102 | addr = mm->free_area_cache; | 104 | addr = mm->free_area_cache; |
103 | if (addr < begin) | 105 | if (addr < begin) |
104 | addr = begin; | 106 | addr = begin; |
105 | start_addr = addr; | 107 | start_addr = addr; |
106 | 108 | ||
107 | full_search: | 109 | full_search: |
@@ -127,7 +129,7 @@ full_search: | |||
127 | return addr; | 129 | return addr; |
128 | } | 130 | } |
129 | if (addr + mm->cached_hole_size < vma->vm_start) | 131 | if (addr + mm->cached_hole_size < vma->vm_start) |
130 | mm->cached_hole_size = vma->vm_start - addr; | 132 | mm->cached_hole_size = vma->vm_start - addr; |
131 | 133 | ||
132 | addr = vma->vm_end; | 134 | addr = vma->vm_end; |
133 | } | 135 | } |
@@ -177,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
177 | vma = find_vma(mm, addr-len); | 179 | vma = find_vma(mm, addr-len); |
178 | if (!vma || addr <= vma->vm_start) | 180 | if (!vma || addr <= vma->vm_start) |
179 | /* remember the address as a hint for next time */ | 181 | /* remember the address as a hint for next time */ |
180 | return (mm->free_area_cache = addr-len); | 182 | return mm->free_area_cache = addr-len; |
181 | } | 183 | } |
182 | 184 | ||
183 | if (mm->mmap_base < len) | 185 | if (mm->mmap_base < len) |
@@ -194,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
194 | vma = find_vma(mm, addr); | 196 | vma = find_vma(mm, addr); |
195 | if (!vma || addr+len <= vma->vm_start) | 197 | if (!vma || addr+len <= vma->vm_start) |
196 | /* remember the address as a hint for next time */ | 198 | /* remember the address as a hint for next time */ |
197 | return (mm->free_area_cache = addr); | 199 | return mm->free_area_cache = addr; |
198 | 200 | ||
199 | /* remember the largest hole we saw so far */ | 201 | /* remember the largest hole we saw so far */ |
200 | if (addr + mm->cached_hole_size < vma->vm_start) | 202 | if (addr + mm->cached_hole_size < vma->vm_start) |
@@ -224,13 +226,13 @@ bottomup: | |||
224 | } | 226 | } |
225 | 227 | ||
226 | 228 | ||
227 | asmlinkage long sys_uname(struct new_utsname __user * name) | 229 | asmlinkage long sys_uname(struct new_utsname __user *name) |
228 | { | 230 | { |
229 | int err; | 231 | int err; |
230 | down_read(&uts_sem); | 232 | down_read(&uts_sem); |
231 | err = copy_to_user(name, utsname(), sizeof (*name)); | 233 | err = copy_to_user(name, utsname(), sizeof(*name)); |
232 | up_read(&uts_sem); | 234 | up_read(&uts_sem); |
233 | if (personality(current->personality) == PER_LINUX32) | 235 | if (personality(current->personality) == PER_LINUX32) |
234 | err |= copy_to_user(&name->machine, "i686", 5); | 236 | err |= copy_to_user(&name->machine, "i686", 5); |
235 | return err ? -EFAULT : 0; | 237 | return err ? -EFAULT : 0; |
236 | } | 238 | } |
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c index 170d43c17487..3d1be4f0fac5 100644 --- a/arch/x86/kernel/syscall_64.c +++ b/arch/x86/kernel/syscall_64.c | |||
@@ -8,12 +8,12 @@ | |||
8 | #define __NO_STUBS | 8 | #define __NO_STUBS |
9 | 9 | ||
10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | 10 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; |
11 | #undef _ASM_X86_64_UNISTD_H_ | 11 | #undef ASM_X86__UNISTD_64_H |
12 | #include <asm/unistd_64.h> | 12 | #include <asm/unistd_64.h> |
13 | 13 | ||
14 | #undef __SYSCALL | 14 | #undef __SYSCALL |
15 | #define __SYSCALL(nr, sym) [nr] = sym, | 15 | #define __SYSCALL(nr, sym) [nr] = sym, |
16 | #undef _ASM_X86_64_UNISTD_H_ | 16 | #undef ASM_X86__UNISTD_64_H |
17 | 17 | ||
18 | typedef void (*sys_call_ptr_t)(void); | 18 | typedef void (*sys_call_ptr_t)(void); |
19 | 19 | ||
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index ffe3c664afc0..bbecf8b6bf96 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/arch_hooks.h> | 36 | #include <asm/arch_hooks.h> |
37 | #include <asm/hpet.h> | 37 | #include <asm/hpet.h> |
38 | #include <asm/time.h> | 38 | #include <asm/time.h> |
39 | #include <asm/timer.h> | ||
39 | 40 | ||
40 | #include "do_timer.h" | 41 | #include "do_timer.h" |
41 | 42 | ||
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index ab6bf375a307..6bb7b8579e70 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/ldt.h> | 10 | #include <asm/ldt.h> |
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/proto.h> | 12 | #include <asm/proto.h> |
13 | #include <asm/syscalls.h> | ||
13 | 14 | ||
14 | #include "tls.h" | 15 | #include "tls.h" |
15 | 16 | ||
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 03df8e45e5a1..da5a5964fccb 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c | |||
@@ -1228,7 +1228,6 @@ void __init trap_init(void) | |||
1228 | 1228 | ||
1229 | set_bit(SYSCALL_VECTOR, used_vectors); | 1229 | set_bit(SYSCALL_VECTOR, used_vectors); |
1230 | 1230 | ||
1231 | init_thread_xstate(); | ||
1232 | /* | 1231 | /* |
1233 | * Should be a barrier for any external CPU state: | 1232 | * Should be a barrier for any external CPU state: |
1234 | */ | 1233 | */ |
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 513caaca7115..56d6f1147785 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/bug.h> | 32 | #include <linux/bug.h> |
33 | #include <linux/nmi.h> | 33 | #include <linux/nmi.h> |
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | #include <linux/smp.h> | ||
36 | #include <linux/io.h> | ||
35 | 37 | ||
36 | #if defined(CONFIG_EDAC) | 38 | #if defined(CONFIG_EDAC) |
37 | #include <linux/edac.h> | 39 | #include <linux/edac.h> |
@@ -45,9 +47,6 @@ | |||
45 | #include <asm/unwind.h> | 47 | #include <asm/unwind.h> |
46 | #include <asm/desc.h> | 48 | #include <asm/desc.h> |
47 | #include <asm/i387.h> | 49 | #include <asm/i387.h> |
48 | #include <asm/nmi.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/io.h> | ||
51 | #include <asm/pgalloc.h> | 50 | #include <asm/pgalloc.h> |
52 | #include <asm/proto.h> | 51 | #include <asm/proto.h> |
53 | #include <asm/pda.h> | 52 | #include <asm/pda.h> |
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
85 | 84 | ||
86 | void printk_address(unsigned long address, int reliable) | 85 | void printk_address(unsigned long address, int reliable) |
87 | { | 86 | { |
88 | printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); | 87 | printk(" [<%016lx>] %s%pS\n", |
88 | address, reliable ? "" : "? ", (void *) address); | ||
89 | } | 89 | } |
90 | 90 | ||
91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | 91 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, |
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
98 | [STACKFAULT_STACK - 1] = "#SS", | 98 | [STACKFAULT_STACK - 1] = "#SS", |
99 | [MCE_STACK - 1] = "#MC", | 99 | [MCE_STACK - 1] = "#MC", |
100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | 100 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
101 | [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | 101 | [N_EXCEPTION_STACKS ... |
102 | N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" | ||
102 | #endif | 103 | #endif |
103 | }; | 104 | }; |
104 | unsigned k; | 105 | unsigned k; |
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
166 | * x86-64 can have up to three kernel stacks: | 167 | * x86-64 can have up to three kernel stacks: |
167 | * process stack | 168 | * process stack |
168 | * interrupt stack | 169 | * interrupt stack |
169 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 170 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
219 | const struct stacktrace_ops *ops, void *data) | 220 | const struct stacktrace_ops *ops, void *data) |
220 | { | 221 | { |
221 | const unsigned cpu = get_cpu(); | 222 | const unsigned cpu = get_cpu(); |
222 | unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; | 223 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
223 | unsigned used = 0; | 224 | unsigned used = 0; |
224 | struct thread_info *tinfo; | 225 | struct thread_info *tinfo; |
225 | 226 | ||
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
237 | if (!bp) { | 238 | if (!bp) { |
238 | if (task == current) { | 239 | if (task == current) { |
239 | /* Grab bp right from our regs */ | 240 | /* Grab bp right from our regs */ |
240 | asm("movq %%rbp, %0" : "=r" (bp) :); | 241 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
241 | } else { | 242 | } else { |
242 | /* bp is the last reg pushed by switch_to */ | 243 | /* bp is the last reg pushed by switch_to */ |
243 | bp = *(unsigned long *) task->thread.sp; | 244 | bp = *(unsigned long *) task->thread.sp; |
@@ -357,11 +358,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
357 | unsigned long *stack; | 358 | unsigned long *stack; |
358 | int i; | 359 | int i; |
359 | const int cpu = smp_processor_id(); | 360 | const int cpu = smp_processor_id(); |
360 | unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); | 361 | unsigned long *irqstack_end = |
361 | unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | 362 | (unsigned long *) (cpu_pda(cpu)->irqstackptr); |
363 | unsigned long *irqstack = | ||
364 | (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); | ||
362 | 365 | ||
363 | // debugging aid: "show_stack(NULL, NULL);" prints the | 366 | /* |
364 | // back trace for this cpu. | 367 | * debugging aid: "show_stack(NULL, NULL);" prints the |
368 | * back trace for this cpu. | ||
369 | */ | ||
365 | 370 | ||
366 | if (sp == NULL) { | 371 | if (sp == NULL) { |
367 | if (task) | 372 | if (task) |
@@ -404,7 +409,7 @@ void dump_stack(void) | |||
404 | 409 | ||
405 | #ifdef CONFIG_FRAME_POINTER | 410 | #ifdef CONFIG_FRAME_POINTER |
406 | if (!bp) | 411 | if (!bp) |
407 | asm("movq %%rbp, %0" : "=r" (bp):); | 412 | asm("movq %%rbp, %0" : "=r" (bp) : ); |
408 | #endif | 413 | #endif |
409 | 414 | ||
410 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", | 415 | printk("Pid: %d, comm: %.20s %s %s %.*s\n", |
@@ -414,7 +419,6 @@ void dump_stack(void) | |||
414 | init_utsname()->version); | 419 | init_utsname()->version); |
415 | show_trace(NULL, NULL, &stack, bp); | 420 | show_trace(NULL, NULL, &stack, bp); |
416 | } | 421 | } |
417 | |||
418 | EXPORT_SYMBOL(dump_stack); | 422 | EXPORT_SYMBOL(dump_stack); |
419 | 423 | ||
420 | void show_registers(struct pt_regs *regs) | 424 | void show_registers(struct pt_regs *regs) |
@@ -493,7 +497,7 @@ unsigned __kprobes long oops_begin(void) | |||
493 | raw_local_irq_save(flags); | 497 | raw_local_irq_save(flags); |
494 | cpu = smp_processor_id(); | 498 | cpu = smp_processor_id(); |
495 | if (!__raw_spin_trylock(&die_lock)) { | 499 | if (!__raw_spin_trylock(&die_lock)) { |
496 | if (cpu == die_owner) | 500 | if (cpu == die_owner) |
497 | /* nested oops. should stop eventually */; | 501 | /* nested oops. should stop eventually */; |
498 | else | 502 | else |
499 | __raw_spin_lock(&die_lock); | 503 | __raw_spin_lock(&die_lock); |
@@ -638,7 +642,7 @@ kernel_trap: | |||
638 | } | 642 | } |
639 | 643 | ||
640 | #define DO_ERROR(trapnr, signr, str, name) \ | 644 | #define DO_ERROR(trapnr, signr, str, name) \ |
641 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 645 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
642 | { \ | 646 | { \ |
643 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 647 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
644 | == NOTIFY_STOP) \ | 648 | == NOTIFY_STOP) \ |
@@ -648,7 +652,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
648 | } | 652 | } |
649 | 653 | ||
650 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ | 654 | #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ |
651 | asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | 655 | asmlinkage void do_##name(struct pt_regs *regs, long error_code) \ |
652 | { \ | 656 | { \ |
653 | siginfo_t info; \ | 657 | siginfo_t info; \ |
654 | info.si_signo = signr; \ | 658 | info.si_signo = signr; \ |
@@ -683,7 +687,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | |||
683 | preempt_conditional_cli(regs); | 687 | preempt_conditional_cli(regs); |
684 | } | 688 | } |
685 | 689 | ||
686 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | 690 | asmlinkage void do_double_fault(struct pt_regs *regs, long error_code) |
687 | { | 691 | { |
688 | static const char str[] = "double fault"; | 692 | static const char str[] = "double fault"; |
689 | struct task_struct *tsk = current; | 693 | struct task_struct *tsk = current; |
@@ -778,9 +782,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs) | |||
778 | } | 782 | } |
779 | 783 | ||
780 | static notrace __kprobes void | 784 | static notrace __kprobes void |
781 | unknown_nmi_error(unsigned char reason, struct pt_regs * regs) | 785 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) |
782 | { | 786 | { |
783 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) | 787 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == |
788 | NOTIFY_STOP) | ||
784 | return; | 789 | return; |
785 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", | 790 | printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", |
786 | reason); | 791 | reason); |
@@ -882,7 +887,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
882 | else if (user_mode(eregs)) | 887 | else if (user_mode(eregs)) |
883 | regs = task_pt_regs(current); | 888 | regs = task_pt_regs(current); |
884 | /* Exception from kernel and interrupts are enabled. Move to | 889 | /* Exception from kernel and interrupts are enabled. Move to |
885 | kernel process stack. */ | 890 | kernel process stack. */ |
886 | else if (eregs->flags & X86_EFLAGS_IF) | 891 | else if (eregs->flags & X86_EFLAGS_IF) |
887 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | 892 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); |
888 | if (eregs != regs) | 893 | if (eregs != regs) |
@@ -891,7 +896,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
891 | } | 896 | } |
892 | 897 | ||
893 | /* runs on IST stack. */ | 898 | /* runs on IST stack. */ |
894 | asmlinkage void __kprobes do_debug(struct pt_regs * regs, | 899 | asmlinkage void __kprobes do_debug(struct pt_regs *regs, |
895 | unsigned long error_code) | 900 | unsigned long error_code) |
896 | { | 901 | { |
897 | struct task_struct *tsk = current; | 902 | struct task_struct *tsk = current; |
@@ -1035,7 +1040,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs) | |||
1035 | 1040 | ||
1036 | asmlinkage void bad_intr(void) | 1041 | asmlinkage void bad_intr(void) |
1037 | { | 1042 | { |
1038 | printk("bad interrupt"); | 1043 | printk("bad interrupt"); |
1039 | } | 1044 | } |
1040 | 1045 | ||
1041 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | 1046 | asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) |
@@ -1047,7 +1052,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1047 | 1052 | ||
1048 | conditional_sti(regs); | 1053 | conditional_sti(regs); |
1049 | if (!user_mode(regs) && | 1054 | if (!user_mode(regs) && |
1050 | kernel_math_error(regs, "kernel simd math error", 19)) | 1055 | kernel_math_error(regs, "kernel simd math error", 19)) |
1051 | return; | 1056 | return; |
1052 | 1057 | ||
1053 | /* | 1058 | /* |
@@ -1092,7 +1097,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) | |||
1092 | force_sig_info(SIGFPE, &info, task); | 1097 | force_sig_info(SIGFPE, &info, task); |
1093 | } | 1098 | } |
1094 | 1099 | ||
1095 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) | 1100 | asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs) |
1096 | { | 1101 | { |
1097 | } | 1102 | } |
1098 | 1103 | ||
@@ -1134,7 +1139,7 @@ asmlinkage void math_state_restore(void) | |||
1134 | /* | 1139 | /* |
1135 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | 1140 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. |
1136 | */ | 1141 | */ |
1137 | if (unlikely(restore_fpu_checking(&me->thread.xstate->fxsave))) { | 1142 | if (unlikely(restore_fpu_checking(me))) { |
1138 | stts(); | 1143 | stts(); |
1139 | force_sig(SIGSEGV, me); | 1144 | force_sig(SIGSEGV, me); |
1140 | return; | 1145 | return; |
@@ -1149,8 +1154,10 @@ void __init trap_init(void) | |||
1149 | set_intr_gate(0, ÷_error); | 1154 | set_intr_gate(0, ÷_error); |
1150 | set_intr_gate_ist(1, &debug, DEBUG_STACK); | 1155 | set_intr_gate_ist(1, &debug, DEBUG_STACK); |
1151 | set_intr_gate_ist(2, &nmi, NMI_STACK); | 1156 | set_intr_gate_ist(2, &nmi, NMI_STACK); |
1152 | set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ | 1157 | /* int3 can be called from all */ |
1153 | set_system_gate(4, &overflow); /* int4 can be called from all */ | 1158 | set_system_gate_ist(3, &int3, DEBUG_STACK); |
1159 | /* int4 can be called from all */ | ||
1160 | set_system_gate(4, &overflow); | ||
1154 | set_intr_gate(5, &bounds); | 1161 | set_intr_gate(5, &bounds); |
1155 | set_intr_gate(6, &invalid_op); | 1162 | set_intr_gate(6, &invalid_op); |
1156 | set_intr_gate(7, &device_not_available); | 1163 | set_intr_gate(7, &device_not_available); |
@@ -1173,10 +1180,6 @@ void __init trap_init(void) | |||
1173 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 1180 | set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
1174 | #endif | 1181 | #endif |
1175 | /* | 1182 | /* |
1176 | * initialize the per thread extended state: | ||
1177 | */ | ||
1178 | init_thread_xstate(); | ||
1179 | /* | ||
1180 | * Should be a barrier for any external CPU state: | 1183 | * Should be a barrier for any external CPU state: |
1181 | */ | 1184 | */ |
1182 | cpu_init(); | 1185 | cpu_init(); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 594ef47f0a63..61a97e616f70 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
@@ -25,45 +25,31 @@ | |||
25 | #include <asm/visws/cobalt.h> | 25 | #include <asm/visws/cobalt.h> |
26 | #include <asm/visws/piix4.h> | 26 | #include <asm/visws/piix4.h> |
27 | #include <asm/arch_hooks.h> | 27 | #include <asm/arch_hooks.h> |
28 | #include <asm/io_apic.h> | ||
28 | #include <asm/fixmap.h> | 29 | #include <asm/fixmap.h> |
29 | #include <asm/reboot.h> | 30 | #include <asm/reboot.h> |
30 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
31 | #include <asm/e820.h> | 32 | #include <asm/e820.h> |
32 | #include <asm/smp.h> | ||
33 | #include <asm/io.h> | 33 | #include <asm/io.h> |
34 | 34 | ||
35 | #include <mach_ipi.h> | 35 | #include <mach_ipi.h> |
36 | 36 | ||
37 | #include "mach_apic.h" | 37 | #include "mach_apic.h" |
38 | 38 | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/smp.h> | ||
41 | |||
42 | #include <linux/kernel_stat.h> | 39 | #include <linux/kernel_stat.h> |
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/init.h> | ||
45 | 40 | ||
46 | #include <asm/io.h> | ||
47 | #include <asm/apic.h> | ||
48 | #include <asm/i8259.h> | 41 | #include <asm/i8259.h> |
49 | #include <asm/irq_vectors.h> | 42 | #include <asm/irq_vectors.h> |
50 | #include <asm/visws/cobalt.h> | ||
51 | #include <asm/visws/lithium.h> | 43 | #include <asm/visws/lithium.h> |
52 | #include <asm/visws/piix4.h> | ||
53 | 44 | ||
54 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
55 | #include <linux/kernel.h> | 46 | #include <linux/kernel.h> |
56 | #include <linux/init.h> | ||
57 | #include <linux/pci.h> | 47 | #include <linux/pci.h> |
58 | #include <linux/pci_ids.h> | 48 | #include <linux/pci_ids.h> |
59 | 49 | ||
60 | extern int no_broadcast; | 50 | extern int no_broadcast; |
61 | 51 | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/apic.h> | 52 | #include <asm/apic.h> |
64 | #include <asm/arch_hooks.h> | ||
65 | #include <asm/visws/cobalt.h> | ||
66 | #include <asm/visws/lithium.h> | ||
67 | 53 | ||
68 | char visws_board_type = -1; | 54 | char visws_board_type = -1; |
69 | char visws_board_rev = -1; | 55 | char visws_board_rev = -1; |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 38f566fa27d2..4eeb5cf9720d 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/io.h> | 46 | #include <asm/io.h> |
47 | #include <asm/tlbflush.h> | 47 | #include <asm/tlbflush.h> |
48 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
49 | #include <asm/syscalls.h> | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Known problems: | 52 | * Known problems: |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 6ca515d6db54..61531d5c9507 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -905,8 +905,8 @@ static inline int __init activate_vmi(void) | |||
905 | #endif | 905 | #endif |
906 | 906 | ||
907 | #ifdef CONFIG_X86_LOCAL_APIC | 907 | #ifdef CONFIG_X86_LOCAL_APIC |
908 | para_fill(pv_apic_ops.apic_read, APICRead); | 908 | para_fill(apic_ops->read, APICRead); |
909 | para_fill(pv_apic_ops.apic_write, APICWrite); | 909 | para_fill(apic_ops->write, APICWrite); |
910 | #endif | 910 | #endif |
911 | 911 | ||
912 | /* | 912 | /* |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c new file mode 100644 index 000000000000..07713d64debe --- /dev/null +++ b/arch/x86/kernel/xsave.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * xsave/xrstor support. | ||
3 | * | ||
4 | * Author: Suresh Siddha <suresh.b.siddha@intel.com> | ||
5 | */ | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/compat.h> | ||
8 | #include <asm/i387.h> | ||
9 | #ifdef CONFIG_IA32_EMULATION | ||
10 | #include <asm/sigcontext32.h> | ||
11 | #endif | ||
12 | #include <asm/xcr.h> | ||
13 | |||
14 | /* | ||
15 | * Supported feature mask by the CPU and the kernel. | ||
16 | */ | ||
17 | u64 pcntxt_mask; | ||
18 | |||
19 | struct _fpx_sw_bytes fx_sw_reserved; | ||
20 | #ifdef CONFIG_IA32_EMULATION | ||
21 | struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
22 | #endif | ||
23 | |||
24 | /* | ||
25 | * Check for the presence of extended state information in the | ||
26 | * user fpstate pointer in the sigcontext. | ||
27 | */ | ||
28 | int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
29 | void __user *fpstate, | ||
30 | struct _fpx_sw_bytes *fx_sw_user) | ||
31 | { | ||
32 | int min_xstate_size = sizeof(struct i387_fxsave_struct) + | ||
33 | sizeof(struct xsave_hdr_struct); | ||
34 | unsigned int magic2; | ||
35 | int err; | ||
36 | |||
37 | err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], | ||
38 | sizeof(struct _fpx_sw_bytes)); | ||
39 | |||
40 | if (err) | ||
41 | return err; | ||
42 | |||
43 | /* | ||
44 | * First Magic check failed. | ||
45 | */ | ||
46 | if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) | ||
47 | return -1; | ||
48 | |||
49 | /* | ||
50 | * Check for error scenarios. | ||
51 | */ | ||
52 | if (fx_sw_user->xstate_size < min_xstate_size || | ||
53 | fx_sw_user->xstate_size > xstate_size || | ||
54 | fx_sw_user->xstate_size > fx_sw_user->extended_size) | ||
55 | return -1; | ||
56 | |||
57 | err = __get_user(magic2, (__u32 *) (((void *)fpstate) + | ||
58 | fx_sw_user->extended_size - | ||
59 | FP_XSTATE_MAGIC2_SIZE)); | ||
60 | /* | ||
61 | * Check for the presence of second magic word at the end of memory | ||
62 | * layout. This detects the case where the user just copied the legacy | ||
63 | * fpstate layout with out copying the extended state information | ||
64 | * in the memory layout. | ||
65 | */ | ||
66 | if (err || magic2 != FP_XSTATE_MAGIC2) | ||
67 | return -1; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | #ifdef CONFIG_X86_64 | ||
73 | /* | ||
74 | * Signal frame handlers. | ||
75 | */ | ||
76 | |||
77 | int save_i387_xstate(void __user *buf) | ||
78 | { | ||
79 | struct task_struct *tsk = current; | ||
80 | int err = 0; | ||
81 | |||
82 | if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) | ||
83 | return -EACCES; | ||
84 | |||
85 | BUG_ON(sig_xstate_size < xstate_size); | ||
86 | |||
87 | if ((unsigned long)buf % 64) | ||
88 | printk("save_i387_xstate: bad fpstate %p\n", buf); | ||
89 | |||
90 | if (!used_math()) | ||
91 | return 0; | ||
92 | clear_used_math(); /* trigger finit */ | ||
93 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
94 | /* | ||
95 | * Start with clearing the user buffer. This will present a | ||
96 | * clean context for the bytes not touched by the fxsave/xsave. | ||
97 | */ | ||
98 | __clear_user(buf, sig_xstate_size); | ||
99 | |||
100 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
101 | err = xsave_user(buf); | ||
102 | else | ||
103 | err = fxsave_user(buf); | ||
104 | |||
105 | if (err) | ||
106 | return err; | ||
107 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
108 | stts(); | ||
109 | } else { | ||
110 | if (__copy_to_user(buf, &tsk->thread.xstate->fxsave, | ||
111 | xstate_size)) | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
116 | struct _fpstate __user *fx = buf; | ||
117 | |||
118 | err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, | ||
119 | sizeof(struct _fpx_sw_bytes)); | ||
120 | |||
121 | err |= __put_user(FP_XSTATE_MAGIC2, | ||
122 | (__u32 __user *) (buf + sig_xstate_size | ||
123 | - FP_XSTATE_MAGIC2_SIZE)); | ||
124 | } | ||
125 | |||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Restore the extended state if present. Otherwise, restore the FP/SSE | ||
131 | * state. | ||
132 | */ | ||
133 | int restore_user_xstate(void __user *buf) | ||
134 | { | ||
135 | struct _fpx_sw_bytes fx_sw_user; | ||
136 | u64 mask; | ||
137 | int err; | ||
138 | |||
139 | if (((unsigned long)buf % 64) || | ||
140 | check_for_xstate(buf, buf, &fx_sw_user)) | ||
141 | goto fx_only; | ||
142 | |||
143 | mask = fx_sw_user.xstate_bv; | ||
144 | |||
145 | /* | ||
146 | * restore the state passed by the user. | ||
147 | */ | ||
148 | err = xrestore_user(buf, mask); | ||
149 | if (err) | ||
150 | return err; | ||
151 | |||
152 | /* | ||
153 | * init the state skipped by the user. | ||
154 | */ | ||
155 | mask = pcntxt_mask & ~mask; | ||
156 | |||
157 | xrstor_state(init_xstate_buf, mask); | ||
158 | |||
159 | return 0; | ||
160 | |||
161 | fx_only: | ||
162 | /* | ||
163 | * couldn't find the extended state information in the | ||
164 | * memory layout. Restore just the FP/SSE and init all | ||
165 | * the other extended state. | ||
166 | */ | ||
167 | xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); | ||
168 | return fxrstor_checking((__force struct i387_fxsave_struct *)buf); | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * This restores directly out of user space. Exceptions are handled. | ||
173 | */ | ||
174 | int restore_i387_xstate(void __user *buf) | ||
175 | { | ||
176 | struct task_struct *tsk = current; | ||
177 | int err = 0; | ||
178 | |||
179 | if (!buf) { | ||
180 | if (used_math()) | ||
181 | goto clear; | ||
182 | return 0; | ||
183 | } else | ||
184 | if (!access_ok(VERIFY_READ, buf, sig_xstate_size)) | ||
185 | return -EACCES; | ||
186 | |||
187 | if (!used_math()) { | ||
188 | err = init_fpu(tsk); | ||
189 | if (err) | ||
190 | return err; | ||
191 | } | ||
192 | |||
193 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
194 | clts(); | ||
195 | task_thread_info(current)->status |= TS_USEDFPU; | ||
196 | } | ||
197 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
198 | err = restore_user_xstate(buf); | ||
199 | else | ||
200 | err = fxrstor_checking((__force struct i387_fxsave_struct *) | ||
201 | buf); | ||
202 | if (unlikely(err)) { | ||
203 | /* | ||
204 | * Encountered an error while doing the restore from the | ||
205 | * user buffer, clear the fpu state. | ||
206 | */ | ||
207 | clear: | ||
208 | clear_fpu(tsk); | ||
209 | clear_used_math(); | ||
210 | } | ||
211 | return err; | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /* | ||
216 | * Prepare the SW reserved portion of the fxsave memory layout, indicating | ||
217 | * the presence of the extended state information in the memory layout | ||
218 | * pointed by the fpstate pointer in the sigcontext. | ||
219 | * This will be saved when ever the FP and extended state context is | ||
220 | * saved on the user stack during the signal handler delivery to the user. | ||
221 | */ | ||
222 | void prepare_fx_sw_frame(void) | ||
223 | { | ||
224 | int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + | ||
225 | FP_XSTATE_MAGIC2_SIZE; | ||
226 | |||
227 | sig_xstate_size = sizeof(struct _fpstate) + size_extended; | ||
228 | |||
229 | #ifdef CONFIG_IA32_EMULATION | ||
230 | sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended; | ||
231 | #endif | ||
232 | |||
233 | memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved)); | ||
234 | |||
235 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | ||
236 | fx_sw_reserved.extended_size = sig_xstate_size; | ||
237 | fx_sw_reserved.xstate_bv = pcntxt_mask; | ||
238 | fx_sw_reserved.xstate_size = xstate_size; | ||
239 | #ifdef CONFIG_IA32_EMULATION | ||
240 | memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, | ||
241 | sizeof(struct _fpx_sw_bytes)); | ||
242 | fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size; | ||
243 | #endif | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Represents init state for the supported extended state. | ||
248 | */ | ||
249 | struct xsave_struct *init_xstate_buf; | ||
250 | |||
251 | #ifdef CONFIG_X86_64 | ||
252 | unsigned int sig_xstate_size = sizeof(struct _fpstate); | ||
253 | #endif | ||
254 | |||
255 | /* | ||
256 | * Enable the extended processor state save/restore feature | ||
257 | */ | ||
258 | void __cpuinit xsave_init(void) | ||
259 | { | ||
260 | if (!cpu_has_xsave) | ||
261 | return; | ||
262 | |||
263 | set_in_cr4(X86_CR4_OSXSAVE); | ||
264 | |||
265 | /* | ||
266 | * Enable all the features that the HW is capable of | ||
267 | * and the Linux kernel is aware of. | ||
268 | */ | ||
269 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * setup the xstate image representing the init state | ||
274 | */ | ||
275 | void setup_xstate_init(void) | ||
276 | { | ||
277 | init_xstate_buf = alloc_bootmem(xstate_size); | ||
278 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Enable and initialize the xsave feature. | ||
283 | */ | ||
284 | void __init xsave_cntxt_init(void) | ||
285 | { | ||
286 | unsigned int eax, ebx, ecx, edx; | ||
287 | |||
288 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
289 | pcntxt_mask = eax + ((u64)edx << 32); | ||
290 | |||
291 | if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { | ||
292 | printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n", | ||
293 | pcntxt_mask); | ||
294 | BUG(); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * for now OS knows only about FP/SSE | ||
299 | */ | ||
300 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | ||
301 | xsave_init(); | ||
302 | |||
303 | /* | ||
304 | * Recompute the context size for enabled features | ||
305 | */ | ||
306 | cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx); | ||
307 | xstate_size = ebx; | ||
308 | |||
309 | prepare_fx_sw_frame(); | ||
310 | |||
311 | setup_xstate_init(); | ||
312 | |||
313 | printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, " | ||
314 | "cntxt size 0x%x\n", | ||
315 | pcntxt_mask, xstate_size); | ||
316 | } | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index d9249a882aa5..65f0b8a47bed 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/lguest_launcher.h> | 55 | #include <linux/lguest_launcher.h> |
56 | #include <linux/virtio_console.h> | 56 | #include <linux/virtio_console.h> |
57 | #include <linux/pm.h> | 57 | #include <linux/pm.h> |
58 | #include <asm/apic.h> | ||
58 | #include <asm/lguest.h> | 59 | #include <asm/lguest.h> |
59 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
60 | #include <asm/param.h> | 61 | #include <asm/param.h> |
@@ -783,14 +784,44 @@ static void lguest_wbinvd(void) | |||
783 | * code qualifies for Advanced. It will also never interrupt anything. It | 784 | * code qualifies for Advanced. It will also never interrupt anything. It |
784 | * does, however, allow us to get through the Linux boot code. */ | 785 | * does, however, allow us to get through the Linux boot code. */ |
785 | #ifdef CONFIG_X86_LOCAL_APIC | 786 | #ifdef CONFIG_X86_LOCAL_APIC |
786 | static void lguest_apic_write(unsigned long reg, u32 v) | 787 | static void lguest_apic_write(u32 reg, u32 v) |
787 | { | 788 | { |
788 | } | 789 | } |
789 | 790 | ||
790 | static u32 lguest_apic_read(unsigned long reg) | 791 | static u32 lguest_apic_read(u32 reg) |
791 | { | 792 | { |
792 | return 0; | 793 | return 0; |
793 | } | 794 | } |
795 | |||
796 | static u64 lguest_apic_icr_read(void) | ||
797 | { | ||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | static void lguest_apic_icr_write(u32 low, u32 id) | ||
802 | { | ||
803 | /* Warn to see if there's any stray references */ | ||
804 | WARN_ON(1); | ||
805 | } | ||
806 | |||
807 | static void lguest_apic_wait_icr_idle(void) | ||
808 | { | ||
809 | return; | ||
810 | } | ||
811 | |||
812 | static u32 lguest_apic_safe_wait_icr_idle(void) | ||
813 | { | ||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | static struct apic_ops lguest_basic_apic_ops = { | ||
818 | .read = lguest_apic_read, | ||
819 | .write = lguest_apic_write, | ||
820 | .icr_read = lguest_apic_icr_read, | ||
821 | .icr_write = lguest_apic_icr_write, | ||
822 | .wait_icr_idle = lguest_apic_wait_icr_idle, | ||
823 | .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle, | ||
824 | }; | ||
794 | #endif | 825 | #endif |
795 | 826 | ||
796 | /* STOP! Until an interrupt comes in. */ | 827 | /* STOP! Until an interrupt comes in. */ |
@@ -990,8 +1021,7 @@ __init void lguest_init(void) | |||
990 | 1021 | ||
991 | #ifdef CONFIG_X86_LOCAL_APIC | 1022 | #ifdef CONFIG_X86_LOCAL_APIC |
992 | /* apic read/write intercepts */ | 1023 | /* apic read/write intercepts */ |
993 | pv_apic_ops.apic_write = lguest_apic_write; | 1024 | apic_ops = &lguest_basic_apic_ops; |
994 | pv_apic_ops.apic_read = lguest_apic_read; | ||
995 | #endif | 1025 | #endif |
996 | 1026 | ||
997 | /* time operations */ | 1027 | /* time operations */ |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 01b868ba82f8..321cf720dbb6 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info) | |||
16 | rdmsr(rv->msr_no, rv->l, rv->h); | 16 | rdmsr(rv->msr_no, rv->l, rv->h); |
17 | } | 17 | } |
18 | 18 | ||
19 | static void __rdmsr_safe_on_cpu(void *info) | 19 | static void __wrmsr_on_cpu(void *info) |
20 | { | 20 | { |
21 | struct msr_info *rv = info; | 21 | struct msr_info *rv = info; |
22 | 22 | ||
23 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); | 23 | wrmsr(rv->msr_no, rv->l, rv->h); |
24 | } | 24 | } |
25 | 25 | ||
26 | static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | 26 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
27 | { | 27 | { |
28 | int err = 0; | 28 | int err; |
29 | struct msr_info rv; | 29 | struct msr_info rv; |
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
33 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, | ||
34 | &rv, 1); | ||
35 | err = err ? err : rv.err; | ||
36 | } else { | ||
37 | err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | ||
38 | } | ||
39 | *l = rv.l; | 33 | *l = rv.l; |
40 | *h = rv.h; | 34 | *h = rv.h; |
41 | 35 | ||
42 | return err; | 36 | return err; |
43 | } | 37 | } |
44 | 38 | ||
45 | static void __wrmsr_on_cpu(void *info) | 39 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
40 | { | ||
41 | int err; | ||
42 | struct msr_info rv; | ||
43 | |||
44 | rv.msr_no = msr_no; | ||
45 | rv.l = l; | ||
46 | rv.h = h; | ||
47 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
48 | |||
49 | return err; | ||
50 | } | ||
51 | |||
52 | /* These "safe" variants are slower and should be used when the target MSR | ||
53 | may not actually exist. */ | ||
54 | static void __rdmsr_safe_on_cpu(void *info) | ||
46 | { | 55 | { |
47 | struct msr_info *rv = info; | 56 | struct msr_info *rv = info; |
48 | 57 | ||
49 | wrmsr(rv->msr_no, rv->l, rv->h); | 58 | rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); |
50 | } | 59 | } |
51 | 60 | ||
52 | static void __wrmsr_safe_on_cpu(void *info) | 61 | static void __wrmsr_safe_on_cpu(void *info) |
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info) | |||
56 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); | 65 | rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); |
57 | } | 66 | } |
58 | 67 | ||
59 | static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | 68 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
60 | { | 69 | { |
61 | int err = 0; | 70 | int err; |
62 | struct msr_info rv; | 71 | struct msr_info rv; |
63 | 72 | ||
64 | rv.msr_no = msr_no; | 73 | rv.msr_no = msr_no; |
65 | rv.l = l; | 74 | err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
66 | rv.h = h; | 75 | *l = rv.l; |
67 | if (safe) { | 76 | *h = rv.h; |
68 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, | ||
69 | &rv, 1); | ||
70 | err = err ? err : rv.err; | ||
71 | } else { | ||
72 | err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
73 | } | ||
74 | |||
75 | return err; | ||
76 | } | ||
77 | 77 | ||
78 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 78 | return err ? err : rv.err; |
79 | { | ||
80 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
81 | } | 79 | } |
82 | 80 | ||
83 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
84 | { | ||
85 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); | ||
86 | } | ||
87 | |||
88 | /* These "safe" variants are slower and should be used when the target MSR | ||
89 | may not actually exist. */ | ||
90 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | 81 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
91 | { | 82 | { |
92 | return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); | 83 | int err; |
93 | } | 84 | struct msr_info rv; |
94 | 85 | ||
95 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 86 | rv.msr_no = msr_no; |
96 | { | 87 | rv.l = l; |
97 | return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); | 88 | rv.h = h; |
89 | err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
90 | |||
91 | return err ? err : rv.err; | ||
98 | } | 92 | } |
99 | 93 | ||
100 | EXPORT_SYMBOL(rdmsr_on_cpu); | 94 | EXPORT_SYMBOL(rdmsr_on_cpu); |
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c index 94972e7c094d..82004d2bf05e 100644 --- a/arch/x86/lib/string_32.c +++ b/arch/x86/lib/string_32.c | |||
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src) | |||
22 | "testb %%al,%%al\n\t" | 22 | "testb %%al,%%al\n\t" |
23 | "jne 1b" | 23 | "jne 1b" |
24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) | 24 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) |
25 | :"0" (src), "1" (dest) : "memory"); | 25 | : "0" (src), "1" (dest) : "memory"); |
26 | return dest; | 26 | return dest; |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(strcpy); | 28 | EXPORT_SYMBOL(strcpy); |
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count) | |||
42 | "stosb\n" | 42 | "stosb\n" |
43 | "2:" | 43 | "2:" |
44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) | 44 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) |
45 | :"0" (src), "1" (dest), "2" (count) : "memory"); | 45 | : "0" (src), "1" (dest), "2" (count) : "memory"); |
46 | return dest; | 46 | return dest; |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(strncpy); | 48 | EXPORT_SYMBOL(strncpy); |
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src) | |||
60 | "testb %%al,%%al\n\t" | 60 | "testb %%al,%%al\n\t" |
61 | "jne 1b" | 61 | "jne 1b" |
62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | 62 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) |
63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); | 63 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory"); |
64 | return dest; | 64 | return dest; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(strcat); | 66 | EXPORT_SYMBOL(strcat); |
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct) | |||
105 | "2:\tsbbl %%eax,%%eax\n\t" | 105 | "2:\tsbbl %%eax,%%eax\n\t" |
106 | "orb $1,%%al\n" | 106 | "orb $1,%%al\n" |
107 | "3:" | 107 | "3:" |
108 | :"=a" (res), "=&S" (d0), "=&D" (d1) | 108 | : "=a" (res), "=&S" (d0), "=&D" (d1) |
109 | :"1" (cs), "2" (ct) | 109 | : "1" (cs), "2" (ct) |
110 | :"memory"); | 110 | : "memory"); |
111 | return res; | 111 | return res; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(strcmp); | 113 | EXPORT_SYMBOL(strcmp); |
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count) | |||
130 | "3:\tsbbl %%eax,%%eax\n\t" | 130 | "3:\tsbbl %%eax,%%eax\n\t" |
131 | "orb $1,%%al\n" | 131 | "orb $1,%%al\n" |
132 | "4:" | 132 | "4:" |
133 | :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) | 133 | : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) |
134 | :"1" (cs), "2" (ct), "3" (count) | 134 | : "1" (cs), "2" (ct), "3" (count) |
135 | :"memory"); | 135 | : "memory"); |
136 | return res; | 136 | return res; |
137 | } | 137 | } |
138 | EXPORT_SYMBOL(strncmp); | 138 | EXPORT_SYMBOL(strncmp); |
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c) | |||
152 | "movl $1,%1\n" | 152 | "movl $1,%1\n" |
153 | "2:\tmovl %1,%0\n\t" | 153 | "2:\tmovl %1,%0\n\t" |
154 | "decl %0" | 154 | "decl %0" |
155 | :"=a" (res), "=&S" (d0) | 155 | : "=a" (res), "=&S" (d0) |
156 | :"1" (s), "0" (c) | 156 | : "1" (s), "0" (c) |
157 | :"memory"); | 157 | : "memory"); |
158 | return res; | 158 | return res; |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(strchr); | 160 | EXPORT_SYMBOL(strchr); |
@@ -169,9 +169,9 @@ size_t strlen(const char *s) | |||
169 | "scasb\n\t" | 169 | "scasb\n\t" |
170 | "notl %0\n\t" | 170 | "notl %0\n\t" |
171 | "decl %0" | 171 | "decl %0" |
172 | :"=c" (res), "=&D" (d0) | 172 | : "=c" (res), "=&D" (d0) |
173 | :"1" (s), "a" (0), "0" (0xffffffffu) | 173 | : "1" (s), "a" (0), "0" (0xffffffffu) |
174 | :"memory"); | 174 | : "memory"); |
175 | return res; | 175 | return res; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(strlen); | 177 | EXPORT_SYMBOL(strlen); |
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count) | |||
189 | "je 1f\n\t" | 189 | "je 1f\n\t" |
190 | "movl $1,%0\n" | 190 | "movl $1,%0\n" |
191 | "1:\tdecl %0" | 191 | "1:\tdecl %0" |
192 | :"=D" (res), "=&c" (d0) | 192 | : "=D" (res), "=&c" (d0) |
193 | :"a" (c), "0" (cs), "1" (count) | 193 | : "a" (c), "0" (cs), "1" (count) |
194 | :"memory"); | 194 | : "memory"); |
195 | return res; | 195 | return res; |
196 | } | 196 | } |
197 | EXPORT_SYMBOL(memchr); | 197 | EXPORT_SYMBOL(memchr); |
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count) | |||
228 | "cmpl $-1,%1\n\t" | 228 | "cmpl $-1,%1\n\t" |
229 | "jne 1b\n" | 229 | "jne 1b\n" |
230 | "3:\tsubl %2,%0" | 230 | "3:\tsubl %2,%0" |
231 | :"=a" (res), "=&d" (d0) | 231 | : "=a" (res), "=&d" (d0) |
232 | :"c" (s), "1" (count) | 232 | : "c" (s), "1" (count) |
233 | :"memory"); | 233 | : "memory"); |
234 | return res; | 234 | return res; |
235 | } | 235 | } |
236 | EXPORT_SYMBOL(strnlen); | 236 | EXPORT_SYMBOL(strnlen); |
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index 42e8a50303f3..8e2d55f754bf 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c | |||
@@ -23,9 +23,9 @@ __asm__ __volatile__( | |||
23 | "jne 1b\n\t" | 23 | "jne 1b\n\t" |
24 | "xorl %%eax,%%eax\n\t" | 24 | "xorl %%eax,%%eax\n\t" |
25 | "2:" | 25 | "2:" |
26 | :"=a" (__res), "=&c" (d0), "=&S" (d1) | 26 | : "=a" (__res), "=&c" (d0), "=&S" (d1) |
27 | :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) | 27 | : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) |
28 | :"dx", "di"); | 28 | : "dx", "di"); |
29 | return __res; | 29 | return __res; |
30 | } | 30 | } |
31 | 31 | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 24e60944971a..9e68075544f6 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -14,6 +14,13 @@ | |||
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/mmx.h> | 15 | #include <asm/mmx.h> |
16 | 16 | ||
17 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
18 | /* | ||
19 | * Alignment at which movsl is preferred for bulk memory copies. | ||
20 | */ | ||
21 | struct movsl_mask movsl_mask __read_mostly; | ||
22 | #endif | ||
23 | |||
17 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) | 24 | static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) |
18 | { | 25 | { |
19 | #ifdef CONFIG_X86_INTEL_USERCOPY | 26 | #ifdef CONFIG_X86_INTEL_USERCOPY |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 3d317836be9e..3f2cf11f201a 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -10,13 +10,15 @@ | |||
10 | #include <asm/e820.h> | 10 | #include <asm/e820.h> |
11 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
12 | 12 | ||
13 | #include <mach_ipi.h> | ||
14 | |||
13 | #ifdef CONFIG_HOTPLUG_CPU | 15 | #ifdef CONFIG_HOTPLUG_CPU |
14 | #define DEFAULT_SEND_IPI (1) | 16 | #define DEFAULT_SEND_IPI (1) |
15 | #else | 17 | #else |
16 | #define DEFAULT_SEND_IPI (0) | 18 | #define DEFAULT_SEND_IPI (0) |
17 | #endif | 19 | #endif |
18 | 20 | ||
19 | int no_broadcast=DEFAULT_SEND_IPI; | 21 | int no_broadcast = DEFAULT_SEND_IPI; |
20 | 22 | ||
21 | /** | 23 | /** |
22 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors | 24 | * pre_intr_init_hook - initialisation prior to setting up interrupt vectors |
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile index 0dbd7803a1d5..4706de7676b1 100644 --- a/arch/x86/mach-generic/Makefile +++ b/arch/x86/mach-generic/Makefile | |||
@@ -9,4 +9,4 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o | |||
9 | obj-$(CONFIG_X86_SUMMIT) += summit.o | 9 | obj-$(CONFIG_X86_SUMMIT) += summit.o |
10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o | 10 | obj-$(CONFIG_X86_BIGSMP) += bigsmp.o |
11 | obj-$(CONFIG_X86_ES7000) += es7000.o | 11 | obj-$(CONFIG_X86_ES7000) += es7000.o |
12 | obj-$(CONFIG_X86_ES7000) += ../../x86/mach-es7000/ | 12 | obj-$(CONFIG_X86_ES7000) += ../../x86/es7000/ |
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 59d771714559..df37fc9d6a26 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -5,18 +5,17 @@ | |||
5 | #define APIC_DEFINITION 1 | 5 | #define APIC_DEFINITION 1 |
6 | #include <linux/threads.h> | 6 | #include <linux/threads.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <asm/smp.h> | ||
9 | #include <asm/mpspec.h> | 8 | #include <asm/mpspec.h> |
10 | #include <asm/genapic.h> | 9 | #include <asm/genapic.h> |
11 | #include <asm/fixmap.h> | 10 | #include <asm/fixmap.h> |
12 | #include <asm/apicdef.h> | 11 | #include <asm/apicdef.h> |
13 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <linux/dmi.h> | 14 | #include <linux/dmi.h> |
17 | #include <asm/mach-bigsmp/mach_apic.h> | 15 | #include <asm/bigsmp/apicdef.h> |
18 | #include <asm/mach-bigsmp/mach_apicdef.h> | 16 | #include <linux/smp.h> |
19 | #include <asm/mach-bigsmp/mach_ipi.h> | 17 | #include <asm/bigsmp/apic.h> |
18 | #include <asm/bigsmp/ipi.h> | ||
20 | #include <asm/mach-default/mach_mpparse.h> | 19 | #include <asm/mach-default/mach_mpparse.h> |
21 | 20 | ||
22 | static int dmi_bigsmp; /* can be set by dmi scanners */ | 21 | static int dmi_bigsmp; /* can be set by dmi scanners */ |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 4742626f08c4..520cca0ee04e 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -4,20 +4,19 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <asm/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
11 | #include <asm/apicdef.h> | 10 | #include <asm/apicdef.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <asm/mach-es7000/mach_apicdef.h> | 14 | #include <asm/es7000/apicdef.h> |
17 | #include <asm/mach-es7000/mach_apic.h> | 15 | #include <linux/smp.h> |
18 | #include <asm/mach-es7000/mach_ipi.h> | 16 | #include <asm/es7000/apic.h> |
19 | #include <asm/mach-es7000/mach_mpparse.h> | 17 | #include <asm/es7000/ipi.h> |
20 | #include <asm/mach-es7000/mach_wakecpu.h> | 18 | #include <asm/es7000/mpparse.h> |
19 | #include <asm/es7000/wakecpu.h> | ||
21 | 20 | ||
22 | static int probe_es7000(void) | 21 | static int probe_es7000(void) |
23 | { | 22 | { |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 8091e68764c4..8cf58394975e 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -4,7 +4,6 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <linux/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
@@ -12,11 +11,12 @@ | |||
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
15 | #include <asm/mach-numaq/mach_apic.h> | 14 | #include <asm/numaq/apicdef.h> |
16 | #include <asm/mach-numaq/mach_apicdef.h> | 15 | #include <linux/smp.h> |
17 | #include <asm/mach-numaq/mach_ipi.h> | 16 | #include <asm/numaq/apic.h> |
18 | #include <asm/mach-numaq/mach_mpparse.h> | 17 | #include <asm/numaq/ipi.h> |
19 | #include <asm/mach-numaq/mach_wakecpu.h> | 18 | #include <asm/numaq/mpparse.h> |
19 | #include <asm/numaq/wakecpu.h> | ||
20 | #include <asm/numaq.h> | 20 | #include <asm/numaq.h> |
21 | 21 | ||
22 | static int mps_oem_check(struct mp_config_table *mpc, char *oem, | 22 | static int mps_oem_check(struct mp_config_table *mpc, char *oem, |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index a97ea0f35b1e..6ad6b67a723d 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -4,19 +4,18 @@ | |||
4 | #define APIC_DEFINITION 1 | 4 | #define APIC_DEFINITION 1 |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/cpumask.h> | 6 | #include <linux/cpumask.h> |
7 | #include <asm/smp.h> | ||
8 | #include <asm/mpspec.h> | 7 | #include <asm/mpspec.h> |
9 | #include <asm/genapic.h> | 8 | #include <asm/genapic.h> |
10 | #include <asm/fixmap.h> | 9 | #include <asm/fixmap.h> |
11 | #include <asm/apicdef.h> | 10 | #include <asm/apicdef.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 12 | #include <linux/string.h> |
14 | #include <linux/smp.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <asm/mach-summit/mach_apic.h> | 14 | #include <asm/summit/apicdef.h> |
17 | #include <asm/mach-summit/mach_apicdef.h> | 15 | #include <linux/smp.h> |
18 | #include <asm/mach-summit/mach_ipi.h> | 16 | #include <asm/summit/apic.h> |
19 | #include <asm/mach-summit/mach_mpparse.h> | 17 | #include <asm/summit/ipi.h> |
18 | #include <asm/summit/mpparse.h> | ||
20 | 19 | ||
21 | static int probe_summit(void) | 20 | static int probe_summit(void) |
22 | { | 21 | { |
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 62fa440678d8..847c164725f4 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c | |||
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn, | |||
328 | 328 | ||
329 | get_memcfg_numa(); | 329 | get_memcfg_numa(); |
330 | 330 | ||
331 | kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); | 331 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
332 | 332 | ||
333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); | 333 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
334 | do { | 334 | do { |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a20d1fa64b4e..e7277cbcfb40 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
148 | * we have now. "break" is either changing perms, levels or | 148 | * we have now. "break" is either changing perms, levels or |
149 | * address space marker. | 149 | * address space marker. |
150 | */ | 150 | */ |
151 | prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); | 151 | prot = pgprot_val(new_prot) & PTE_FLAGS_MASK; |
152 | cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); | 152 | cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK; |
153 | 153 | ||
154 | if (!st->level) { | 154 | if (!st->level) { |
155 | /* First entry */ | 155 | /* First entry */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 455f3fe67b42..8f92cac4e6db 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include <asm/proto.h> | 36 | #include <asm/proto.h> |
37 | #include <asm-generic/sections.h> | 37 | #include <asm-generic/sections.h> |
38 | #include <asm/traps.h> | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * Page fault error code bits | 41 | * Page fault error code bits |
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address) | |||
357 | return 0; | 358 | return 0; |
358 | } | 359 | } |
359 | 360 | ||
360 | void do_invalid_op(struct pt_regs *, unsigned long); | ||
361 | |||
362 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 361 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
363 | { | 362 | { |
364 | #ifdef CONFIG_X86_F00F_BUG | 363 | #ifdef CONFIG_X86_F00F_BUG |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 60ec1d08ff24..6b9a9358b330 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/paravirt.h> | 47 | #include <asm/paravirt.h> |
48 | #include <asm/setup.h> | 48 | #include <asm/setup.h> |
49 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
50 | #include <asm/smp.h> | ||
50 | 51 | ||
51 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 52 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
52 | 53 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d3746efb060d..770536ebf7e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
225 | void __init cleanup_highmap(void) | 225 | void __init cleanup_highmap(void) |
226 | { | 226 | { |
227 | unsigned long vaddr = __START_KERNEL_map; | 227 | unsigned long vaddr = __START_KERNEL_map; |
228 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | 228 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; |
229 | pmd_t *pmd = level2_kernel_pgt; | 229 | pmd_t *pmd = level2_kernel_pgt; |
230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
231 | 231 | ||
@@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end) | |||
451 | unsigned long puds, pmds, ptes, tables, start; | 451 | unsigned long puds, pmds, ptes, tables, start; |
452 | 452 | ||
453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
454 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); | 454 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
455 | if (direct_gbpages) { | 455 | if (direct_gbpages) { |
456 | unsigned long extra; | 456 | unsigned long extra; |
457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
459 | } else | 459 | } else |
460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
461 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 461 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
462 | 462 | ||
463 | if (cpu_has_pse) { | 463 | if (cpu_has_pse) { |
464 | unsigned long extra; | 464 | unsigned long extra; |
@@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end) | |||
466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
467 | } else | 467 | } else |
468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
469 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | 469 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
470 | 470 | ||
471 | /* | 471 | /* |
472 | * RED-PEN putting page tables only on node 0 could | 472 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d4b6e6a29ae3..cac6da54203b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -421,7 +421,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
421 | return; | 421 | return; |
422 | } | 422 | } |
423 | 423 | ||
424 | int __initdata early_ioremap_debug; | 424 | static int __initdata early_ioremap_debug; |
425 | 425 | ||
426 | static int __init early_ioremap_debug_setup(char *str) | 426 | static int __init early_ioremap_debug_setup(char *str) |
427 | { | 427 | { |
@@ -547,7 +547,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | 549 | ||
550 | int __initdata early_ioremap_nested; | 550 | static int __initdata early_ioremap_nested; |
551 | 551 | ||
552 | static int __init check_early_ioremap_leak(void) | 552 | static int __init check_early_ioremap_leak(void) |
553 | { | 553 | { |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index a4dd793d6003..cebcbf152d46 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void) | |||
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | addr = 0x8000; | 81 | addr = 0x8000; |
82 | nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); | 82 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, | 83 | nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, |
84 | nodemap_size, L1_CACHE_BYTES); | 84 | nodemap_size, L1_CACHE_BYTES); |
85 | if (nodemap_addr == -1UL) { | 85 | if (nodemap_addr == -1UL) { |
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; | 176 | unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; |
177 | unsigned long bootmap_start, nodedata_phys; | 177 | unsigned long bootmap_start, nodedata_phys; |
178 | void *bootmap; | 178 | void *bootmap; |
179 | const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); | 179 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
180 | int nid; | 180 | int nid; |
181 | 181 | ||
182 | start = round_up(start, ZONE_ALIGN); | 182 | start = roundup(start, ZONE_ALIGN); |
183 | 183 | ||
184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, | 184 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, |
185 | start, end); | 185 | start, end); |
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, | |||
210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); | 210 | bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); |
211 | nid = phys_to_nid(nodedata_phys); | 211 | nid = phys_to_nid(nodedata_phys); |
212 | if (nid == nodeid) | 212 | if (nid == nodeid) |
213 | bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); | 213 | bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); |
214 | else | 214 | else |
215 | bootmap_start = round_up(start, PAGE_SIZE); | 215 | bootmap_start = roundup(start, PAGE_SIZE); |
216 | /* | 216 | /* |
217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like | 217 | * SMP_CACHE_BYTES could be enough, but init_bootmem_node like |
218 | * to use that to align to PAGE_SIZE | 218 | * to use that to align to PAGE_SIZE |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 43e2f8483e4f..be54f501776a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void) | |||
84 | 84 | ||
85 | static inline unsigned long highmap_end_pfn(void) | 85 | static inline unsigned long highmap_end_pfn(void) |
86 | { | 86 | { |
87 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 87 | return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; |
88 | } | 88 | } |
89 | 89 | ||
90 | #endif | 90 | #endif |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 6a0fca78c362..22e057665e55 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self, | |||
580 | unsigned long action, void *hcpu) | 580 | unsigned long action, void *hcpu) |
581 | { | 581 | { |
582 | int cpu = (long)hcpu; | 582 | int cpu = (long)hcpu; |
583 | switch(action) { | 583 | switch (action) { |
584 | case CPU_ONLINE: | 584 | case CPU_ONLINE: |
585 | case CPU_ONLINE_FROZEN: | 585 | case CPU_ONLINE_FROZEN: |
586 | smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); | 586 | smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 8e077185e185..006599db0dc7 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void) | |||
1043 | if (io_apic_assign_pci_irqs) { | 1043 | if (io_apic_assign_pci_irqs) { |
1044 | int irq; | 1044 | int irq; |
1045 | 1045 | ||
1046 | if (pin) { | 1046 | if (!pin) |
1047 | /* | 1047 | continue; |
1048 | * interrupt pins are numbered starting | 1048 | |
1049 | * from 1 | 1049 | /* |
1050 | */ | 1050 | * interrupt pins are numbered starting from 1 |
1051 | pin--; | 1051 | */ |
1052 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, | 1052 | pin--; |
1053 | PCI_SLOT(dev->devfn), pin); | 1053 | irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, |
1054 | /* | 1054 | PCI_SLOT(dev->devfn), pin); |
1055 | * Busses behind bridges are typically not listed in the MP-table. | 1055 | /* |
1056 | * In this case we have to look up the IRQ based on the parent bus, | 1056 | * Busses behind bridges are typically not listed in the |
1057 | * parent slot, and pin number. The SMP code detects such bridged | 1057 | * MP-table. In this case we have to look up the IRQ |
1058 | * busses itself so we should get into this branch reliably. | 1058 | * based on the parent bus, parent slot, and pin number. |
1059 | */ | 1059 | * The SMP code detects such bridged busses itself so we |
1060 | if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ | 1060 | * should get into this branch reliably. |
1061 | struct pci_dev *bridge = dev->bus->self; | 1061 | */ |
1062 | 1062 | if (irq < 0 && dev->bus->parent) { | |
1063 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; | 1063 | /* go back to the bridge */ |
1064 | irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, | 1064 | struct pci_dev *bridge = dev->bus->self; |
1065 | PCI_SLOT(bridge->devfn), pin); | 1065 | int bus; |
1066 | if (irq >= 0) | 1066 | |
1067 | dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", | 1067 | pin = (pin + PCI_SLOT(dev->devfn)) % 4; |
1068 | pci_name(bridge), | 1068 | bus = bridge->bus->number; |
1069 | 'A' + pin, irq); | 1069 | irq = IO_APIC_get_PCI_irq_vector(bus, |
1070 | } | 1070 | PCI_SLOT(bridge->devfn), pin); |
1071 | if (irq >= 0) { | 1071 | if (irq >= 0) |
1072 | dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); | 1072 | dev_warn(&dev->dev, |
1073 | dev->irq = irq; | 1073 | "using bridge %s INT %c to " |
1074 | } | 1074 | "get IRQ %d\n", |
1075 | pci_name(bridge), | ||
1076 | 'A' + pin, irq); | ||
1077 | } | ||
1078 | if (irq >= 0) { | ||
1079 | dev_info(&dev->dev, | ||
1080 | "PCI->APIC IRQ transform: INT %c " | ||
1081 | "-> IRQ %d\n", | ||
1082 | 'A' + pin, irq); | ||
1083 | dev->irq = irq; | ||
1075 | } | 1084 | } |
1076 | } | 1085 | } |
1077 | #endif | 1086 | #endif |
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index d3e083dea720..274d06082f48 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/suspend.h> | 11 | #include <linux/suspend.h> |
12 | #include <asm/mtrr.h> | 12 | #include <asm/mtrr.h> |
13 | #include <asm/mce.h> | 13 | #include <asm/mce.h> |
14 | #include <asm/xcr.h> | ||
14 | 15 | ||
15 | static struct saved_context saved_context; | 16 | static struct saved_context saved_context; |
16 | 17 | ||
@@ -126,6 +127,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
126 | if (boot_cpu_has(X86_FEATURE_SEP)) | 127 | if (boot_cpu_has(X86_FEATURE_SEP)) |
127 | enable_sep_cpu(); | 128 | enable_sep_cpu(); |
128 | 129 | ||
130 | /* | ||
131 | * restore XCR0 for xsave capable cpu's. | ||
132 | */ | ||
133 | if (cpu_has_xsave) | ||
134 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
135 | |||
129 | fix_processor_context(); | 136 | fix_processor_context(); |
130 | do_fpu_end(); | 137 | do_fpu_end(); |
131 | mtrr_ap_init(); | 138 | mtrr_ap_init(); |
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 66bdfb591fd8..e3b6cf70d62c 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/mtrr.h> | 16 | #include <asm/mtrr.h> |
17 | #include <asm/xcr.h> | ||
17 | 18 | ||
18 | static void fix_processor_context(void); | 19 | static void fix_processor_context(void); |
19 | 20 | ||
@@ -122,6 +123,12 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
122 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 123 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); |
123 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 124 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); |
124 | 125 | ||
126 | /* | ||
127 | * restore XCR0 for xsave capable cpu's. | ||
128 | */ | ||
129 | if (cpu_has_xsave) | ||
130 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | ||
131 | |||
125 | fix_processor_context(); | 132 | fix_processor_context(); |
126 | 133 | ||
127 | do_fpu_end(); | 134 | do_fpu_end(); |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 4fc7e872c85e..d1e9b53f9d33 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
@@ -1,5 +1,3 @@ | |||
1 | .text | ||
2 | |||
3 | /* | 1 | /* |
4 | * This may not use any stack, nor any variable that is not "NoSave": | 2 | * This may not use any stack, nor any variable that is not "NoSave": |
5 | * | 3 | * |
@@ -12,17 +10,18 @@ | |||
12 | #include <asm/segment.h> | 10 | #include <asm/segment.h> |
13 | #include <asm/page.h> | 11 | #include <asm/page.h> |
14 | #include <asm/asm-offsets.h> | 12 | #include <asm/asm-offsets.h> |
13 | #include <asm/processor-flags.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | 16 | ||
18 | ENTRY(swsusp_arch_suspend) | 17 | ENTRY(swsusp_arch_suspend) |
19 | |||
20 | movl %esp, saved_context_esp | 18 | movl %esp, saved_context_esp |
21 | movl %ebx, saved_context_ebx | 19 | movl %ebx, saved_context_ebx |
22 | movl %ebp, saved_context_ebp | 20 | movl %ebp, saved_context_ebp |
23 | movl %esi, saved_context_esi | 21 | movl %esi, saved_context_esi |
24 | movl %edi, saved_context_edi | 22 | movl %edi, saved_context_edi |
25 | pushfl ; popl saved_context_eflags | 23 | pushfl |
24 | popl saved_context_eflags | ||
26 | 25 | ||
27 | call swsusp_save | 26 | call swsusp_save |
28 | ret | 27 | ret |
@@ -59,7 +58,7 @@ done: | |||
59 | movl mmu_cr4_features, %ecx | 58 | movl mmu_cr4_features, %ecx |
60 | jecxz 1f # cr4 Pentium and higher, skip if zero | 59 | jecxz 1f # cr4 Pentium and higher, skip if zero |
61 | movl %ecx, %edx | 60 | movl %ecx, %edx |
62 | andl $~(1<<7), %edx; # PGE | 61 | andl $~(X86_CR4_PGE), %edx |
63 | movl %edx, %cr4; # turn off PGE | 62 | movl %edx, %cr4; # turn off PGE |
64 | 1: | 63 | 1: |
65 | movl %cr3, %eax; # flush TLB | 64 | movl %cr3, %eax; # flush TLB |
@@ -74,7 +73,8 @@ done: | |||
74 | movl saved_context_esi, %esi | 73 | movl saved_context_esi, %esi |
75 | movl saved_context_edi, %edi | 74 | movl saved_context_edi, %edi |
76 | 75 | ||
77 | pushl saved_context_eflags ; popfl | 76 | pushl saved_context_eflags |
77 | popfl | ||
78 | 78 | ||
79 | xorl %eax, %eax | 79 | xorl %eax, %eax |
80 | 80 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a4e201b47f64..7b3508952b9c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <xen/hvc-console.h> | 36 | #include <xen/hvc-console.h> |
37 | 37 | ||
38 | #include <asm/paravirt.h> | 38 | #include <asm/paravirt.h> |
39 | #include <asm/apic.h> | ||
39 | #include <asm/page.h> | 40 | #include <asm/page.h> |
40 | #include <asm/xen/hypercall.h> | 41 | #include <asm/xen/hypercall.h> |
41 | #include <asm/xen/hypervisor.h> | 42 | #include <asm/xen/hypervisor.h> |
@@ -580,16 +581,47 @@ static void xen_io_delay(void) | |||
580 | } | 581 | } |
581 | 582 | ||
582 | #ifdef CONFIG_X86_LOCAL_APIC | 583 | #ifdef CONFIG_X86_LOCAL_APIC |
583 | static u32 xen_apic_read(unsigned long reg) | 584 | static u32 xen_apic_read(u32 reg) |
584 | { | 585 | { |
585 | return 0; | 586 | return 0; |
586 | } | 587 | } |
587 | 588 | ||
588 | static void xen_apic_write(unsigned long reg, u32 val) | 589 | static void xen_apic_write(u32 reg, u32 val) |
589 | { | 590 | { |
590 | /* Warn to see if there's any stray references */ | 591 | /* Warn to see if there's any stray references */ |
591 | WARN_ON(1); | 592 | WARN_ON(1); |
592 | } | 593 | } |
594 | |||
595 | static u64 xen_apic_icr_read(void) | ||
596 | { | ||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | static void xen_apic_icr_write(u32 low, u32 id) | ||
601 | { | ||
602 | /* Warn to see if there's any stray references */ | ||
603 | WARN_ON(1); | ||
604 | } | ||
605 | |||
606 | static void xen_apic_wait_icr_idle(void) | ||
607 | { | ||
608 | return; | ||
609 | } | ||
610 | |||
611 | static u32 xen_safe_apic_wait_icr_idle(void) | ||
612 | { | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | static struct apic_ops xen_basic_apic_ops = { | ||
617 | .read = xen_apic_read, | ||
618 | .write = xen_apic_write, | ||
619 | .icr_read = xen_apic_icr_read, | ||
620 | .icr_write = xen_apic_icr_write, | ||
621 | .wait_icr_idle = xen_apic_wait_icr_idle, | ||
622 | .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, | ||
623 | }; | ||
624 | |||
593 | #endif | 625 | #endif |
594 | 626 | ||
595 | static void xen_flush_tlb(void) | 627 | static void xen_flush_tlb(void) |
@@ -1273,8 +1305,6 @@ static const struct pv_irq_ops xen_irq_ops __initdata = { | |||
1273 | 1305 | ||
1274 | static const struct pv_apic_ops xen_apic_ops __initdata = { | 1306 | static const struct pv_apic_ops xen_apic_ops __initdata = { |
1275 | #ifdef CONFIG_X86_LOCAL_APIC | 1307 | #ifdef CONFIG_X86_LOCAL_APIC |
1276 | .apic_write = xen_apic_write, | ||
1277 | .apic_read = xen_apic_read, | ||
1278 | .setup_boot_clock = paravirt_nop, | 1308 | .setup_boot_clock = paravirt_nop, |
1279 | .setup_secondary_clock = paravirt_nop, | 1309 | .setup_secondary_clock = paravirt_nop, |
1280 | .startup_ipi_hook = paravirt_nop, | 1310 | .startup_ipi_hook = paravirt_nop, |
@@ -1677,6 +1707,13 @@ asmlinkage void __init xen_start_kernel(void) | |||
1677 | pv_apic_ops = xen_apic_ops; | 1707 | pv_apic_ops = xen_apic_ops; |
1678 | pv_mmu_ops = xen_mmu_ops; | 1708 | pv_mmu_ops = xen_mmu_ops; |
1679 | 1709 | ||
1710 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1711 | /* | ||
1712 | * set up the basic apic ops. | ||
1713 | */ | ||
1714 | apic_ops = &xen_basic_apic_ops; | ||
1715 | #endif | ||
1716 | |||
1680 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { | 1717 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { |
1681 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; | 1718 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; |
1682 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; | 1719 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; |